diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..7224dfc
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,23 @@
+name: Test
+
+on:
+  pull_request:
+    branches:
+      - "*"
+
+jobs:
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v3
+
+      - name: Set up Go
+        uses: actions/setup-go@v3
+        with:
+          go-version: "1.19"
+          check-latest: true
+          cache: true
+
+      - name: Test
+        run: go test -v ./...
diff --git a/auth.go b/auth.go
new file mode 100644
index 0000000..84d1384
--- /dev/null
+++ b/auth.go
@@ -0,0 +1,46 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package auth
+
+const (
+	APIKeyPrefixString            = "AK-"
+	ServiceKeyPrefixString        = "SK-"
+	ServiceKeySessionPrefixString = "SS-"
+)
+
+var (
+	APIKeyPrefix            = []byte(APIKeyPrefixString)
+	ServiceKeySessionPrefix = []byte(ServiceKeySessionPrefixString)
+)
+
+const (
+	SessionContextKey           = "session"
+	APIKeyContextKey            = "apikey"
+	ServiceKeySessionContextKey = "service"
+	UserContextKey              = "user"
+	OrganizationContextKey      = "organization"
+)
+
+type Kind string
+
+const (
+	KindContextKey Kind = "kind"
+
+	KindSession    Kind = "session"
+	KindAPIKey     Kind = "api"
+	KindServiceKey Kind = "service"
+)
diff --git a/go.mod b/go.mod
index 19fded4..d22c41b 100644
--- a/go.mod
+++ b/go.mod
@@ -2,115 +2,57 @@ module github.com/loopholelabs/auth
 
 go 1.18
 
-replace github.com/cli/oauth => github.com/loopholelabs/oauth v0.10.0
-
-replace github.com/dexidp/dex => github.com/loopholelabs/dex v0.0.0-20221115010751-2d5dae79c41f
-
 require (
-	entgo.io/ent v0.11.3
-	github.com/AppsFlyer/go-sundheit v0.5.0
-	github.com/cli/oauth v0.9.0
-	github.com/dexidp/dex v0.0.0-20221011121916-54c9e8231fb8
-	github.com/go-openapi/runtime v0.24.2
-	github.com/gofiber/fiber/v2 v2.39.0
+	entgo.io/ent v0.11.4
+	github.com/gofiber/fiber/v2 v2.40.1
 	github.com/google/uuid v1.3.0
-	github.com/joho/godotenv v1.4.0
+	github.com/grokify/go-pkce v0.2.0
 	github.com/lib/pq v1.10.7
 	github.com/mattn/go-sqlite3 v1.14.16
-	github.com/sirupsen/logrus v1.9.0
-	github.com/stretchr/testify v1.8.1
-	github.com/valyala/fasthttp v1.40.0
-	golang.org/x/crypto v0.1.0
-	golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1
-	gopkg.in/square/go-jose.v2 v2.6.0
+	github.com/rs/zerolog v1.28.0
+	github.com/stretchr/testify v1.8.0
+	github.com/swaggo/swag v1.8.9
+	golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167
+	golang.org/x/oauth2 v0.3.0
 )
 
 require (
-	ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a // indirect
-	cloud.google.com/go/compute v1.10.0 // indirect
-	github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e // indirect
-	github.com/Masterminds/goutils v1.1.1 // indirect
-	github.com/Masterminds/semver/v3 v3.1.1 // indirect
-	github.com/Masterminds/sprig/v3 v3.2.2 // indirect
+	ariga.io/atlas v0.7.3-0.20221011160332-3ca609863edd // indirect
+	github.com/KyleBanks/depth v1.2.1 // indirect
 	github.com/PuerkitoBio/purell v1.1.1 // indirect
 	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
 	github.com/agext/levenshtein v1.2.1 // indirect
 	github.com/andybalholm/brotli v1.0.4 // indirect
 	github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
-	github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
-	github.com/beevik/etree v1.1.0 // indirect
-	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/cespare/xxhash/v2 v2.1.2 // indirect
-	github.com/cli/browser v1.0.0 // indirect
-	github.com/cli/safeexec v1.0.0 // indirect
-	github.com/coreos/go-oidc/v3 v3.4.0 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/dexidp/dex/api/v2 v2.1.0 // indirect
-	github.com/felixge/httpsnoop v1.0.3 // indirect
-	github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
-	github.com/go-ldap/ldap/v3 v3.4.4 // indirect
-	github.com/go-openapi/analysis v0.21.2 // indirect
-	github.com/go-openapi/errors v0.20.2 // indirect
 	github.com/go-openapi/inflect v0.19.0 // indirect
 	github.com/go-openapi/jsonpointer v0.19.5 // indirect
 	github.com/go-openapi/jsonreference v0.19.6 // indirect
-	github.com/go-openapi/loads v0.21.1 // indirect
 	github.com/go-openapi/spec v0.20.4 // indirect
-	github.com/go-openapi/strfmt v0.21.2 // indirect
-	github.com/go-openapi/swag v0.21.1 // indirect
-	github.com/go-openapi/validate v0.21.0 // indirect
-	github.com/go-sql-driver/mysql v1.6.0 // indirect
-	github.com/go-stack/stack v1.8.1 // indirect
-	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+	github.com/go-openapi/swag v0.19.15 // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
-	github.com/google/go-cmp v0.5.9 // indirect
-	github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
-	github.com/googleapis/gax-go/v2 v2.6.0 // indirect
-	github.com/gorilla/handlers v1.5.1 // indirect
-	github.com/gorilla/mux v1.8.0 // indirect
+	github.com/google/go-cmp v0.5.8 // indirect
 	github.com/hashicorp/hcl/v2 v2.13.0 // indirect
-	github.com/huandu/xstrings v1.3.1 // indirect
-	github.com/imdario/mergo v0.3.11 // indirect
-	github.com/jonboulle/clockwork v0.2.2 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
-	github.com/klauspost/compress v1.15.0 // indirect
-	github.com/mailru/easyjson v0.7.7 // indirect
-	github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
+	github.com/klauspost/compress v1.15.9 // indirect
+	github.com/mailru/easyjson v0.7.6 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
 	github.com/mattn/go-isatty v0.0.16 // indirect
 	github.com/mattn/go-runewidth v0.0.14 // indirect
-	github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
-	github.com/mitchellh/copystructure v1.0.0 // indirect
 	github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
-	github.com/mitchellh/mapstructure v1.5.0 // indirect
-	github.com/mitchellh/reflectwalk v1.0.0 // indirect
-	github.com/oklog/ulid v1.3.1 // indirect
-	github.com/opentracing/opentracing-go v1.2.0 // indirect
-	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_golang v1.13.0 // indirect
-	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.37.0 // indirect
-	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/rivo/uniseg v0.2.0 // indirect
-	github.com/russellhaering/goxmldsig v1.2.0 // indirect
-	github.com/shopspring/decimal v1.2.0 // indirect
-	github.com/spf13/cast v1.4.1 // indirect
 	github.com/valyala/bytebufferpool v1.0.0 // indirect
+	github.com/valyala/fasthttp v1.41.0 // indirect
 	github.com/valyala/tcplisten v1.0.0 // indirect
 	github.com/zclconf/go-cty v1.8.0 // indirect
-	go.mongodb.org/mongo-driver v1.8.3 // indirect
-	go.opencensus.io v0.23.0 // indirect
-	golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741 // indirect
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
-	golang.org/x/net v0.1.0 // indirect
-	golang.org/x/sys v0.1.0 // indirect
-	golang.org/x/text v0.4.0 // indirect
-	google.golang.org/api v0.101.0 // indirect
+	golang.org/x/net v0.3.0 // indirect
+	golang.org/x/sys v0.3.0 // indirect
+	golang.org/x/text v0.5.0 // indirect
+	golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa // indirect
 	google.golang.org/appengine v1.6.7 // indirect
-	google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55 // indirect
-	google.golang.org/grpc v1.50.1 // indirect
-	google.golang.org/protobuf v1.28.1 // indirect
+	google.golang.org/protobuf v1.28.0 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/go.sum b/go.sum
index 47d7243..e42e7d2 100644
--- a/go.sum
+++ b/go.sum
@@ -1,172 +1,26 @@
-ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a h1:6/nt4DODfgxzHTTg3tYy7YkVzruGQGZ/kRvXpA45KUo=
-ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE=
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
-cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
-cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
-cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
-cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
-cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
-cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
-cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
-cloud.google.com/go/compute v1.10.0 h1:aoLIYaA1fX3ywihqpBk2APQKOo20nXsp1GEZQbx5Jk4=
-cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-entgo.io/ent v0.11.3 h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=
-entgo.io/ent v0.11.3/go.mod h1:mvDhvynOzAsOe7anH7ynPPtMjA/eeXP96kAfweevyxc=
-github.com/AppsFlyer/go-sundheit v0.5.0 h1:/VxpyigCfJrq1r97mn9HPiAB2qrhcTFHwNIIDr15CZM=
-github.com/AppsFlyer/go-sundheit v0.5.0/go.mod h1:2ZM0BnfqT/mljBQO224VbL5XH06TgWuQ6Cn+cTtCpTY=
-github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e h1:NeAW1fUYUEWhft7pkxDf6WoUvEZJ/uOKsvtpjLnn8MU=
-github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+ariga.io/atlas v0.7.3-0.20221011160332-3ca609863edd h1:c3F2jvvEZzsoH/KUpDNhTsCVeUPnpXaF8kADZvUSiU0=
+ariga.io/atlas v0.7.3-0.20221011160332-3ca609863edd/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE=
+entgo.io/ent v0.11.4 h1:grwVY0fp31BZ6oEo3YrXenAuv8VJmEw7F/Bi6WqeH3Q=
+entgo.io/ent v0.11.4/go.mod h1:fnQIXL36RYnCk/9nvG4aE7YHBFZhCycfh7wMjY5p7SE=
 github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
-github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
-github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
-github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
-github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
+github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
 github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
 github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
 github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
 github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
 github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=
 github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
 github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
-github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
-github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cli/browser v1.0.0 h1:RIleZgXrhdiCVgFBSjtWwkLPUCWyhhhN5k5HGSBt1js=
-github.com/cli/browser v1.0.0/go.mod h1:IEWkHYbLjkhtjwwWlwTHW2lGxeS5gezEQBMLTwDHf5Q=
-github.com/cli/safeexec v1.0.0 h1:0VngyaIyqACHdcMNWfo6+KdUYnqEr2Sg+bSP1pdF+dI=
-github.com/cli/safeexec v1.0.0/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g=
-github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
+github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dexidp/dex/api/v2 v2.1.0 h1:V7XTnG2HM2bqWZMABDQpf4EA6F+0jWPsv9pGaUIDo+k=
-github.com/dexidp/dex/api/v2 v2.1.0/go.mod h1:s91/6CI290JhYN1F8aiRifLF71qRGLVZvzq68uC6Ln4=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
-github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs=
-github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU=
-github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
-github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
-github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
 github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
 github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
 github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -174,863 +28,146 @@ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUe
 github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
 github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
-github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
-github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
-github.com/go-openapi/runtime v0.24.2 h1:yX9HMGQbz32M87ECaAhGpJjBmErO3QLcgdZj9BzGx7c=
-github.com/go-openapi/runtime v0.24.2/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk=
 github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
 github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
-github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
-github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
-github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os=
-github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
 github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
 github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
-github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI=
-github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
-github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
 github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
-github.com/gofiber/fiber/v2 v2.39.0 h1:uhWpYQ6EHN8J7FOPYbI2hrdBD/KNZBC5CjbuOd4QUt4=
-github.com/gofiber/fiber/v2 v2.39.0/go.mod h1:Cmuu+elPYGqlvQvdKyjtYsjGMi69PDp8a1AY2I5B2gM=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofiber/fiber/v2 v2.40.1 h1:pc7n9VVpGIqNsvg9IPLQhyFEMJL8gCs1kneH5D1pIl4=
+github.com/gofiber/fiber/v2 v2.40.1/go.mod h1:Gko04sLksnHbzLSRBFWPFdzM9Ws9pRxvvIaohJK1dsk=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
 github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
 github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
 github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
-github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
-github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
-github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
-github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
-github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
-github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU=
-github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
-github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
-github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/grokify/go-pkce v0.2.0 h1:IwjppAAUnKZuDo2e3S+EKiwXopAc2UHFzoOFbC24bO0=
+github.com/grokify/go-pkce v0.2.0/go.mod h1:DABMww8Ue+sVrmOBDrt8dH8iFFUtSfmUCKOS3nh4ye8=
 github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc=
 github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
-github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
-github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg=
-github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
 github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
 github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/loopholelabs/dex v0.0.0-20221115010751-2d5dae79c41f h1:ByEcdnlTYuMFY6cv++0IMfArvp9Tzil6p1hDMycWt3g=
-github.com/loopholelabs/dex v0.0.0-20221115010751-2d5dae79c41f/go.mod h1:46qF7n3I7ZHxHxvfdYRRgB8J6U/hmmGZOB33QJHTHMw=
-github.com/loopholelabs/oauth v0.10.0 h1:ciyX5cUIBRX7yg3y8NdAI0vNQ74eyQ6Sz+9OS9iieN8=
-github.com/loopholelabs/oauth v0.10.0/go.mod h1:qd/FX8ZBD6n1sVNQO3aIdRxeu5LGw9WhKnYhIIoC2A4=
 github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU=
-github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
 github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
 github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
 github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
 github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
 github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
 github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
 github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
-github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
 github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
-github.com/russellhaering/goxmldsig v1.2.0 h1:Y6GTTc9Un5hCxSzVz4UIWQ/zuVwDvzJk80guqzwx6Vg=
-github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw=
+github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY=
+github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
 github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
-github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
-github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/swaggo/swag v1.8.9 h1:kHtaBe/Ob9AZzAANfcn5c6RyCke9gG9QpH0jky0I/sA=
+github.com/swaggo/swag v1.8.9/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1Yc=
-github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
+github.com/valyala/fasthttp v1.41.0 h1:zeR0Z1my1wDHTRiamBCXVglQdbUwgb9uWG3k1HQz6jY=
+github.com/valyala/fasthttp v1.41.0/go.mod h1:f6VbjjoI3z1NDOZOv17o6RvtRSWxC77seBFc2uWtgiY=
 github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
 github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
 github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
 github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA=
 github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
-go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
-go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
-go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4=
-go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
 golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741 h1:fGZugkZk2UgYBxtpKmvub51Yno1LJDeEsRp2xGD+0gY=
-golang.org/x/exp v0.0.0-20221004215720-b9f4876ce741/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0=
+golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
-golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
-golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
-golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA=
-golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
+golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa h1:uKcci2q7Qtp6nMTC/AAvfNUAldFtJuHWV9/5QWiypts=
+golang.org/x/tools v0.1.13-0.20220804200503-81c7dc4e4efa/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
-google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
-google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
-google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
-google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
-google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
-google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
-google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
-google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
-google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
-google.golang.org/api v0.101.0 h1:lJPPeEBIRxGpGLwnBTam1NPEM8Z2BmmXEd3z812pjwM=
-google.golang.org/api v0.101.0/go.mod h1:CjxAAWWt3A3VrUE2IGDY2bgK5qhoG/OkyWVlYcP05MY=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
 google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
-google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
-google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
-google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
-google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
-google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
-google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55 h1:U1u4KB2kx6KR/aJDjQ97hZ15wQs8ZPvDcGcRynBhkvg=
-google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55/go.mod h1:45EK0dUbEZ2NHjCeAd2LXmyjAgGUGrpGROgjhC3ADck=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
-google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
-gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
 gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/aes/aes.go b/internal/aes/aes.go
new file mode 100644
index 0000000..44ec993
--- /dev/null
+++ b/internal/aes/aes.go
@@ -0,0 +1,90 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package aes
+
+import (
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"crypto/rand"
+	"encoding/base64"
+	"errors"
+	"io"
+)
+
+var (
+	ErrInvalidNonceSize = errors.New("invalid nonce size")
+	ErrInvalidContent   = errors.New("invalid content")
+)
+
+func Encrypt(secretKey []byte, identifier []byte, content []byte) (string, error) {
+	block, err := aes.NewCipher(secretKey)
+	if err != nil {
+		return "", err
+	}
+
+	aesGCM, err := cipher.NewGCM(block)
+	if err != nil {
+		return "", err
+	}
+
+	nonce := make([]byte, aesGCM.NonceSize())
+	_, err = io.ReadFull(rand.Reader, nonce)
+	if err != nil {
+		return "", err
+	}
+
+	return base64.StdEncoding.EncodeToString(aesGCM.Seal(nonce, nonce, append(identifier, content...), nil)), nil
+}
+
+func Decrypt(secretKey []byte, identifier []byte, content string) ([]byte, error) {
+	block, err := aes.NewCipher(secretKey)
+	if err != nil {
+		return nil, err
+	}
+
+	aesGCM, err := cipher.NewGCM(block)
+	if err != nil {
+		return nil, err
+	}
+
+	nonceSize := aesGCM.NonceSize()
+
+	if len(content) < nonceSize {
+		return nil, ErrInvalidNonceSize
+	}
+
+	contentBytes, err := base64.StdEncoding.DecodeString(content)
+	if err != nil {
+		return nil, err
+	}
+
+	contentBytes, err = aesGCM.Open(nil, contentBytes[:nonceSize], contentBytes[nonceSize:], nil)
+	if err != nil {
+		return nil, ErrInvalidContent
+	}
+
+	if len(contentBytes) < len(identifier) {
+		return nil, ErrInvalidContent
+	}
+
+	if !bytes.Equal(contentBytes[:len(identifier)], identifier) {
+		return nil, ErrInvalidContent
+	}
+
+	return contentBytes[len(identifier):], nil
+}
diff --git a/internal/aes/aes_test.go b/internal/aes/aes_test.go
new file mode 100644
index 0000000..b37ebac
--- /dev/null
+++ b/internal/aes/aes_test.go
@@ -0,0 +1,38 @@
+/*
+	Copyright 2022 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package aes
+
+import (
+	"github.com/stretchr/testify/require"
+	"testing"
+)
+
+func TestAES(t *testing.T) {
+	key := []byte("0123456789abcdef0123456789abcdef")
+	identifier := []byte("test")
+	content := []byte("content")
+
+	encrypted, err := Encrypt(key, identifier, content)
+	require.NoError(t, err)
+
+	decrypted, err := Decrypt(key, identifier, encrypted)
+	require.NoError(t, err)
+	require.Equal(t, content, decrypted)
+
+	_, err = Decrypt([]byte("0123456789abcdef0123456789abcdee"), identifier, encrypted)
+	require.ErrorIs(t, err, ErrInvalidContent)
+}
diff --git a/internal/ent/client.go b/internal/ent/client.go
new file mode 100644
index 0000000..3416111
--- /dev/null
+++ b/internal/ent/client.go
@@ -0,0 +1,326 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"log"
+
+	"github.com/loopholelabs/auth/internal/ent/migrate"
+
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+
+	"entgo.io/ent/dialect"
+	"entgo.io/ent/dialect/sql"
+)
+
+// Client is the client that holds all ent builders.
+type Client struct {
+	config
+	// Schema is the client for creating, migrating and dropping schema.
+	Schema *migrate.Schema
+	// DeviceFlow is the client for interacting with the DeviceFlow builders.
+	DeviceFlow *DeviceFlowClient
+	// GithubFlow is the client for interacting with the GithubFlow builders.
+	GithubFlow *GithubFlowClient
+}
+
+// NewClient creates a new client configured with the given options.
+func NewClient(opts ...Option) *Client {
+	cfg := config{log: log.Println, hooks: &hooks{}}
+	cfg.options(opts...)
+	client := &Client{config: cfg}
+	client.init()
+	return client
+}
+
+func (c *Client) init() {
+	c.Schema = migrate.NewSchema(c.driver)
+	c.DeviceFlow = NewDeviceFlowClient(c.config)
+	c.GithubFlow = NewGithubFlowClient(c.config)
+}
+
+// Open opens a database/sql.DB specified by the driver name and
+// the data source name, and returns a new client attached to it.
+// Optional parameters can be added for configuring the client.
+func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
+	switch driverName {
+	case dialect.MySQL, dialect.Postgres, dialect.SQLite:
+		drv, err := sql.Open(driverName, dataSourceName)
+		if err != nil {
+			return nil, err
+		}
+		return NewClient(append(options, Driver(drv))...), nil
+	default:
+		return nil, fmt.Errorf("unsupported driver: %q", driverName)
+	}
+}
+
+// Tx returns a new transactional client. The provided context
+// is used until the transaction is committed or rolled back.
+func (c *Client) Tx(ctx context.Context) (*Tx, error) {
+	if _, ok := c.driver.(*txDriver); ok {
+		return nil, errors.New("ent: cannot start a transaction within a transaction")
+	}
+	tx, err := newTx(ctx, c.driver)
+	if err != nil {
+		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
+	}
+	cfg := c.config
+	cfg.driver = tx
+	return &Tx{
+		ctx:        ctx,
+		config:     cfg,
+		DeviceFlow: NewDeviceFlowClient(cfg),
+		GithubFlow: NewGithubFlowClient(cfg),
+	}, nil
+}
+
+// BeginTx returns a transactional client with specified options.
+func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+	if _, ok := c.driver.(*txDriver); ok {
+		return nil, errors.New("ent: cannot start a transaction within a transaction")
+	}
+	tx, err := c.driver.(interface {
+		BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
+	}).BeginTx(ctx, opts)
+	if err != nil {
+		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
+	}
+	cfg := c.config
+	cfg.driver = &txDriver{tx: tx, drv: c.driver}
+	return &Tx{
+		ctx:        ctx,
+		config:     cfg,
+		DeviceFlow: NewDeviceFlowClient(cfg),
+		GithubFlow: NewGithubFlowClient(cfg),
+	}, nil
+}
+
+// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
+//
+//	client.Debug().
+//		DeviceFlow.
+//		Query().
+//		Count(ctx)
+func (c *Client) Debug() *Client {
+	if c.debug {
+		return c
+	}
+	cfg := c.config
+	cfg.driver = dialect.Debug(c.driver, c.log)
+	client := &Client{config: cfg}
+	client.init()
+	return client
+}
+
+// Close closes the database connection and prevents new queries from starting.
+func (c *Client) Close() error {
+	return c.driver.Close()
+}
+
+// Use adds the mutation hooks to all the entity clients.
+// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
+func (c *Client) Use(hooks ...Hook) {
+	c.DeviceFlow.Use(hooks...)
+	c.GithubFlow.Use(hooks...)
+}
+
+// DeviceFlowClient is a client for the DeviceFlow schema.
+type DeviceFlowClient struct {
+	config
+}
+
+// NewDeviceFlowClient returns a client for the DeviceFlow from the given config.
+func NewDeviceFlowClient(c config) *DeviceFlowClient {
+	return &DeviceFlowClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `deviceflow.Hooks(f(g(h())))`.
+func (c *DeviceFlowClient) Use(hooks ...Hook) {
+	c.hooks.DeviceFlow = append(c.hooks.DeviceFlow, hooks...)
+}
+
+// Create returns a builder for creating a DeviceFlow entity.
+func (c *DeviceFlowClient) Create() *DeviceFlowCreate {
+	mutation := newDeviceFlowMutation(c.config, OpCreate)
+	return &DeviceFlowCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of DeviceFlow entities.
+func (c *DeviceFlowClient) CreateBulk(builders ...*DeviceFlowCreate) *DeviceFlowCreateBulk {
+	return &DeviceFlowCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for DeviceFlow.
+func (c *DeviceFlowClient) Update() *DeviceFlowUpdate {
+	mutation := newDeviceFlowMutation(c.config, OpUpdate)
+	return &DeviceFlowUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *DeviceFlowClient) UpdateOne(df *DeviceFlow) *DeviceFlowUpdateOne {
+	mutation := newDeviceFlowMutation(c.config, OpUpdateOne, withDeviceFlow(df))
+	return &DeviceFlowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *DeviceFlowClient) UpdateOneID(id int) *DeviceFlowUpdateOne {
+	mutation := newDeviceFlowMutation(c.config, OpUpdateOne, withDeviceFlowID(id))
+	return &DeviceFlowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for DeviceFlow.
+func (c *DeviceFlowClient) Delete() *DeviceFlowDelete {
+	mutation := newDeviceFlowMutation(c.config, OpDelete)
+	return &DeviceFlowDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *DeviceFlowClient) DeleteOne(df *DeviceFlow) *DeviceFlowDeleteOne {
+	return c.DeleteOneID(df.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *DeviceFlowClient) DeleteOneID(id int) *DeviceFlowDeleteOne {
+	builder := c.Delete().Where(deviceflow.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &DeviceFlowDeleteOne{builder}
+}
+
+// Query returns a query builder for DeviceFlow.
+func (c *DeviceFlowClient) Query() *DeviceFlowQuery {
+	return &DeviceFlowQuery{
+		config: c.config,
+	}
+}
+
+// Get returns a DeviceFlow entity by its id.
+func (c *DeviceFlowClient) Get(ctx context.Context, id int) (*DeviceFlow, error) {
+	return c.Query().Where(deviceflow.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *DeviceFlowClient) GetX(ctx context.Context, id int) *DeviceFlow {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// Hooks returns the client hooks.
+func (c *DeviceFlowClient) Hooks() []Hook {
+	return c.hooks.DeviceFlow
+}
+
+// GithubFlowClient is a client for the GithubFlow schema.
+type GithubFlowClient struct {
+	config
+}
+
+// NewGithubFlowClient returns a client for the GithubFlow from the given config.
+func NewGithubFlowClient(c config) *GithubFlowClient {
+	return &GithubFlowClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `githubflow.Hooks(f(g(h())))`.
+func (c *GithubFlowClient) Use(hooks ...Hook) {
+	c.hooks.GithubFlow = append(c.hooks.GithubFlow, hooks...)
+}
+
+// Create returns a builder for creating a GithubFlow entity.
+func (c *GithubFlowClient) Create() *GithubFlowCreate {
+	mutation := newGithubFlowMutation(c.config, OpCreate)
+	return &GithubFlowCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of GithubFlow entities.
+func (c *GithubFlowClient) CreateBulk(builders ...*GithubFlowCreate) *GithubFlowCreateBulk {
+	return &GithubFlowCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for GithubFlow.
+func (c *GithubFlowClient) Update() *GithubFlowUpdate {
+	mutation := newGithubFlowMutation(c.config, OpUpdate)
+	return &GithubFlowUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *GithubFlowClient) UpdateOne(gf *GithubFlow) *GithubFlowUpdateOne {
+	mutation := newGithubFlowMutation(c.config, OpUpdateOne, withGithubFlow(gf))
+	return &GithubFlowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *GithubFlowClient) UpdateOneID(id int) *GithubFlowUpdateOne {
+	mutation := newGithubFlowMutation(c.config, OpUpdateOne, withGithubFlowID(id))
+	return &GithubFlowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for GithubFlow.
+func (c *GithubFlowClient) Delete() *GithubFlowDelete {
+	mutation := newGithubFlowMutation(c.config, OpDelete)
+	return &GithubFlowDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *GithubFlowClient) DeleteOne(gf *GithubFlow) *GithubFlowDeleteOne {
+	return c.DeleteOneID(gf.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *GithubFlowClient) DeleteOneID(id int) *GithubFlowDeleteOne {
+	builder := c.Delete().Where(githubflow.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &GithubFlowDeleteOne{builder}
+}
+
+// Query returns a query builder for GithubFlow.
+func (c *GithubFlowClient) Query() *GithubFlowQuery {
+	return &GithubFlowQuery{
+		config: c.config,
+	}
+}
+
+// Get returns a GithubFlow entity by its id.
+func (c *GithubFlowClient) Get(ctx context.Context, id int) (*GithubFlow, error) {
+	return c.Query().Where(githubflow.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *GithubFlowClient) GetX(ctx context.Context, id int) *GithubFlow {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// Hooks returns the client hooks.
+func (c *GithubFlowClient) Hooks() []Hook {
+	return c.hooks.GithubFlow
+}
diff --git a/pkg/storage/default/ent/config.go b/internal/ent/config.go
similarity index 64%
rename from pkg/storage/default/ent/config.go
rename to internal/ent/config.go
index d585015..c7b085b 100644
--- a/pkg/storage/default/ent/config.go
+++ b/internal/ent/config.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package ent
@@ -24,9 +40,8 @@ type config struct {
 
 // hooks per client, for fast access.
 type hooks struct {
-	APIKey     []ent.Hook
-	ServiceKey []ent.Hook
-	User       []ent.Hook
+	DeviceFlow []ent.Hook
+	GithubFlow []ent.Hook
 }
 
 // Options applies the options on the config object.
diff --git a/pkg/storage/default/ent/context.go b/internal/ent/context.go
similarity index 60%
rename from pkg/storage/default/ent/context.go
rename to internal/ent/context.go
index 7811bfa..14a4517 100644
--- a/pkg/storage/default/ent/context.go
+++ b/internal/ent/context.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package ent
diff --git a/internal/ent/deviceflow.go b/internal/ent/deviceflow.go
new file mode 100644
index 0000000..7edfb35
--- /dev/null
+++ b/internal/ent/deviceflow.go
@@ -0,0 +1,184 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+)
+
+// DeviceFlow is the model entity for the DeviceFlow schema.
+type DeviceFlow struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID int `json:"id,omitempty"`
+	// CreatedAt holds the value of the "created_at" field.
+	CreatedAt time.Time `json:"created_at,omitempty"`
+	// LastPoll holds the value of the "last_poll" field.
+	LastPoll time.Time `json:"last_poll,omitempty"`
+	// Identifier holds the value of the "identifier" field.
+	Identifier string `json:"identifier,omitempty"`
+	// DeviceCode holds the value of the "device_code" field.
+	DeviceCode string `json:"device_code,omitempty"`
+	// UserCode holds the value of the "user_code" field.
+	UserCode string `json:"user_code,omitempty"`
+	// Session holds the value of the "session" field.
+	Session string `json:"session,omitempty"`
+	// ExpiresAt holds the value of the "expires_at" field.
+	ExpiresAt time.Time `json:"expires_at,omitempty"`
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*DeviceFlow) scanValues(columns []string) ([]any, error) {
+	values := make([]any, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case deviceflow.FieldID:
+			values[i] = new(sql.NullInt64)
+		case deviceflow.FieldIdentifier, deviceflow.FieldDeviceCode, deviceflow.FieldUserCode, deviceflow.FieldSession:
+			values[i] = new(sql.NullString)
+		case deviceflow.FieldCreatedAt, deviceflow.FieldLastPoll, deviceflow.FieldExpiresAt:
+			values[i] = new(sql.NullTime)
+		default:
+			return nil, fmt.Errorf("unexpected column %q for type DeviceFlow", columns[i])
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the DeviceFlow fields.
+func (df *DeviceFlow) assignValues(columns []string, values []any) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case deviceflow.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			df.ID = int(value.Int64)
+		case deviceflow.FieldCreatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field created_at", values[i])
+			} else if value.Valid {
+				df.CreatedAt = value.Time
+			}
+		case deviceflow.FieldLastPoll:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field last_poll", values[i])
+			} else if value.Valid {
+				df.LastPoll = value.Time
+			}
+		case deviceflow.FieldIdentifier:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field identifier", values[i])
+			} else if value.Valid {
+				df.Identifier = value.String
+			}
+		case deviceflow.FieldDeviceCode:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field device_code", values[i])
+			} else if value.Valid {
+				df.DeviceCode = value.String
+			}
+		case deviceflow.FieldUserCode:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field user_code", values[i])
+			} else if value.Valid {
+				df.UserCode = value.String
+			}
+		case deviceflow.FieldSession:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field session", values[i])
+			} else if value.Valid {
+				df.Session = value.String
+			}
+		case deviceflow.FieldExpiresAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field expires_at", values[i])
+			} else if value.Valid {
+				df.ExpiresAt = value.Time
+			}
+		}
+	}
+	return nil
+}
+
+// Update returns a builder for updating this DeviceFlow.
+// Note that you need to call DeviceFlow.Unwrap() before calling this method if this DeviceFlow
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (df *DeviceFlow) Update() *DeviceFlowUpdateOne {
+	return (&DeviceFlowClient{config: df.config}).UpdateOne(df)
+}
+
+// Unwrap unwraps the DeviceFlow entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (df *DeviceFlow) Unwrap() *DeviceFlow {
+	_tx, ok := df.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: DeviceFlow is not a transactional entity")
+	}
+	df.config.driver = _tx.drv
+	return df
+}
+
+// String implements the fmt.Stringer.
+func (df *DeviceFlow) String() string {
+	var builder strings.Builder
+	builder.WriteString("DeviceFlow(")
+	builder.WriteString(fmt.Sprintf("id=%v, ", df.ID))
+	builder.WriteString("created_at=")
+	builder.WriteString(df.CreatedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("last_poll=")
+	builder.WriteString(df.LastPoll.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("identifier=")
+	builder.WriteString(df.Identifier)
+	builder.WriteString(", ")
+	builder.WriteString("device_code=")
+	builder.WriteString(df.DeviceCode)
+	builder.WriteString(", ")
+	builder.WriteString("user_code=")
+	builder.WriteString(df.UserCode)
+	builder.WriteString(", ")
+	builder.WriteString("session=")
+	builder.WriteString(df.Session)
+	builder.WriteString(", ")
+	builder.WriteString("expires_at=")
+	builder.WriteString(df.ExpiresAt.Format(time.ANSIC))
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// DeviceFlows is a parsable slice of DeviceFlow.
+type DeviceFlows []*DeviceFlow
+
+func (df DeviceFlows) config(cfg config) {
+	for _i := range df {
+		df[_i].config = cfg
+	}
+}
diff --git a/internal/ent/deviceflow/deviceflow.go b/internal/ent/deviceflow/deviceflow.go
new file mode 100644
index 0000000..13d6d7b
--- /dev/null
+++ b/internal/ent/deviceflow/deviceflow.go
@@ -0,0 +1,81 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package deviceflow
+
+import (
+	"time"
+)
+
+const (
+	// Label holds the string label denoting the deviceflow type in the database.
+	Label = "device_flow"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldCreatedAt holds the string denoting the created_at field in the database.
+	FieldCreatedAt = "created_at"
+	// FieldLastPoll holds the string denoting the last_poll field in the database.
+	FieldLastPoll = "last_poll"
+	// FieldIdentifier holds the string denoting the identifier field in the database.
+	FieldIdentifier = "identifier"
+	// FieldDeviceCode holds the string denoting the device_code field in the database.
+	FieldDeviceCode = "device_code"
+	// FieldUserCode holds the string denoting the user_code field in the database.
+	FieldUserCode = "user_code"
+	// FieldSession holds the string denoting the session field in the database.
+	FieldSession = "session"
+	// FieldExpiresAt holds the string denoting the expires_at field in the database.
+	FieldExpiresAt = "expires_at"
+	// Table holds the table name of the deviceflow in the database.
+	Table = "device_flows"
+)
+
+// Columns holds all SQL columns for deviceflow fields.
+var Columns = []string{
+	FieldID,
+	FieldCreatedAt,
+	FieldLastPoll,
+	FieldIdentifier,
+	FieldDeviceCode,
+	FieldUserCode,
+	FieldSession,
+	FieldExpiresAt,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
+	DefaultCreatedAt func() time.Time
+	// DefaultLastPoll holds the default value on creation for the "last_poll" field.
+	DefaultLastPoll func() time.Time
+	// IdentifierValidator is a validator for the "identifier" field. It is called by the builders before save.
+	IdentifierValidator func(string) error
+	// DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save.
+	DeviceCodeValidator func(string) error
+	// UserCodeValidator is a validator for the "user_code" field. It is called by the builders before save.
+	UserCodeValidator func(string) error
+)
diff --git a/internal/ent/deviceflow/where.go b/internal/ent/deviceflow/where.go
new file mode 100644
index 0000000..c5d6f06
--- /dev/null
+++ b/internal/ent/deviceflow/where.go
@@ -0,0 +1,794 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package deviceflow
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldID), id))
+	})
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.In(s.C(FieldID), v...))
+	})
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.NotIn(s.C(FieldID), v...))
+	})
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldID), id))
+	})
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldID), id))
+	})
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldID), id))
+	})
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id int) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldID), id))
+	})
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// LastPoll applies equality check predicate on the "last_poll" field. It's identical to LastPollEQ.
+func LastPoll(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldLastPoll), v))
+	})
+}
+
+// Identifier applies equality check predicate on the "identifier" field. It's identical to IdentifierEQ.
+func Identifier(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldIdentifier), v))
+	})
+}
+
+// DeviceCode applies equality check predicate on the "device_code" field. It's identical to DeviceCodeEQ.
+func DeviceCode(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldDeviceCode), v))
+	})
+}
+
+// UserCode applies equality check predicate on the "user_code" field. It's identical to UserCodeEQ.
+func UserCode(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUserCode), v))
+	})
+}
+
+// Session applies equality check predicate on the "session" field. It's identical to SessionEQ.
+func Session(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldSession), v))
+	})
+}
+
+// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
+func ExpiresAt(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldExpiresAt), v))
+	})
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// LastPollEQ applies the EQ predicate on the "last_poll" field.
+func LastPollEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldLastPoll), v))
+	})
+}
+
+// LastPollNEQ applies the NEQ predicate on the "last_poll" field.
+func LastPollNEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldLastPoll), v))
+	})
+}
+
+// LastPollIn applies the In predicate on the "last_poll" field.
+func LastPollIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldLastPoll), v...))
+	})
+}
+
+// LastPollNotIn applies the NotIn predicate on the "last_poll" field.
+func LastPollNotIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldLastPoll), v...))
+	})
+}
+
+// LastPollGT applies the GT predicate on the "last_poll" field.
+func LastPollGT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldLastPoll), v))
+	})
+}
+
+// LastPollGTE applies the GTE predicate on the "last_poll" field.
+func LastPollGTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldLastPoll), v))
+	})
+}
+
+// LastPollLT applies the LT predicate on the "last_poll" field.
+func LastPollLT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldLastPoll), v))
+	})
+}
+
+// LastPollLTE applies the LTE predicate on the "last_poll" field.
+func LastPollLTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldLastPoll), v))
+	})
+}
+
+// IdentifierEQ applies the EQ predicate on the "identifier" field.
+func IdentifierEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierNEQ applies the NEQ predicate on the "identifier" field.
+func IdentifierNEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierIn applies the In predicate on the "identifier" field.
+func IdentifierIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldIdentifier), v...))
+	})
+}
+
+// IdentifierNotIn applies the NotIn predicate on the "identifier" field.
+func IdentifierNotIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldIdentifier), v...))
+	})
+}
+
+// IdentifierGT applies the GT predicate on the "identifier" field.
+func IdentifierGT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierGTE applies the GTE predicate on the "identifier" field.
+func IdentifierGTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierLT applies the LT predicate on the "identifier" field.
+func IdentifierLT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierLTE applies the LTE predicate on the "identifier" field.
+func IdentifierLTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierContains applies the Contains predicate on the "identifier" field.
+func IdentifierContains(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierHasPrefix applies the HasPrefix predicate on the "identifier" field.
+func IdentifierHasPrefix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierHasSuffix applies the HasSuffix predicate on the "identifier" field.
+func IdentifierHasSuffix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierEqualFold applies the EqualFold predicate on the "identifier" field.
+func IdentifierEqualFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldIdentifier), v))
+	})
+}
+
+// IdentifierContainsFold applies the ContainsFold predicate on the "identifier" field.
+func IdentifierContainsFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldIdentifier), v))
+	})
+}
+
+// DeviceCodeEQ applies the EQ predicate on the "device_code" field.
+func DeviceCodeEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeNEQ applies the NEQ predicate on the "device_code" field.
+func DeviceCodeNEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeIn applies the In predicate on the "device_code" field.
+func DeviceCodeIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldDeviceCode), v...))
+	})
+}
+
+// DeviceCodeNotIn applies the NotIn predicate on the "device_code" field.
+func DeviceCodeNotIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldDeviceCode), v...))
+	})
+}
+
+// DeviceCodeGT applies the GT predicate on the "device_code" field.
+func DeviceCodeGT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeGTE applies the GTE predicate on the "device_code" field.
+func DeviceCodeGTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeLT applies the LT predicate on the "device_code" field.
+func DeviceCodeLT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeLTE applies the LTE predicate on the "device_code" field.
+func DeviceCodeLTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeContains applies the Contains predicate on the "device_code" field.
+func DeviceCodeContains(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeHasPrefix applies the HasPrefix predicate on the "device_code" field.
+func DeviceCodeHasPrefix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeHasSuffix applies the HasSuffix predicate on the "device_code" field.
+func DeviceCodeHasSuffix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeEqualFold applies the EqualFold predicate on the "device_code" field.
+func DeviceCodeEqualFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldDeviceCode), v))
+	})
+}
+
+// DeviceCodeContainsFold applies the ContainsFold predicate on the "device_code" field.
+func DeviceCodeContainsFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldDeviceCode), v))
+	})
+}
+
+// UserCodeEQ applies the EQ predicate on the "user_code" field.
+func UserCodeEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeNEQ applies the NEQ predicate on the "user_code" field.
+func UserCodeNEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeIn applies the In predicate on the "user_code" field.
+func UserCodeIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldUserCode), v...))
+	})
+}
+
+// UserCodeNotIn applies the NotIn predicate on the "user_code" field.
+func UserCodeNotIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldUserCode), v...))
+	})
+}
+
+// UserCodeGT applies the GT predicate on the "user_code" field.
+func UserCodeGT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeGTE applies the GTE predicate on the "user_code" field.
+func UserCodeGTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeLT applies the LT predicate on the "user_code" field.
+func UserCodeLT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeLTE applies the LTE predicate on the "user_code" field.
+func UserCodeLTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeContains applies the Contains predicate on the "user_code" field.
+func UserCodeContains(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeHasPrefix applies the HasPrefix predicate on the "user_code" field.
+func UserCodeHasPrefix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeHasSuffix applies the HasSuffix predicate on the "user_code" field.
+func UserCodeHasSuffix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeEqualFold applies the EqualFold predicate on the "user_code" field.
+func UserCodeEqualFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldUserCode), v))
+	})
+}
+
+// UserCodeContainsFold applies the ContainsFold predicate on the "user_code" field.
+func UserCodeContainsFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldUserCode), v))
+	})
+}
+
+// SessionEQ applies the EQ predicate on the "session" field.
+func SessionEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldSession), v))
+	})
+}
+
+// SessionNEQ applies the NEQ predicate on the "session" field.
+func SessionNEQ(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldSession), v))
+	})
+}
+
+// SessionIn applies the In predicate on the "session" field.
+func SessionIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldSession), v...))
+	})
+}
+
+// SessionNotIn applies the NotIn predicate on the "session" field.
+func SessionNotIn(vs ...string) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldSession), v...))
+	})
+}
+
+// SessionGT applies the GT predicate on the "session" field.
+func SessionGT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldSession), v))
+	})
+}
+
+// SessionGTE applies the GTE predicate on the "session" field.
+func SessionGTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldSession), v))
+	})
+}
+
+// SessionLT applies the LT predicate on the "session" field.
+func SessionLT(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldSession), v))
+	})
+}
+
+// SessionLTE applies the LTE predicate on the "session" field.
+func SessionLTE(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldSession), v))
+	})
+}
+
+// SessionContains applies the Contains predicate on the "session" field.
+func SessionContains(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldSession), v))
+	})
+}
+
+// SessionHasPrefix applies the HasPrefix predicate on the "session" field.
+func SessionHasPrefix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldSession), v))
+	})
+}
+
+// SessionHasSuffix applies the HasSuffix predicate on the "session" field.
+func SessionHasSuffix(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldSession), v))
+	})
+}
+
+// SessionIsNil applies the IsNil predicate on the "session" field.
+func SessionIsNil() predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldSession)))
+	})
+}
+
+// SessionNotNil applies the NotNil predicate on the "session" field.
+func SessionNotNil() predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldSession)))
+	})
+}
+
+// SessionEqualFold applies the EqualFold predicate on the "session" field.
+func SessionEqualFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldSession), v))
+	})
+}
+
+// SessionContainsFold applies the ContainsFold predicate on the "session" field.
+func SessionContainsFold(v string) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldSession), v))
+	})
+}
+
+// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
+func ExpiresAtEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
+func ExpiresAtNEQ(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtIn applies the In predicate on the "expires_at" field.
+func ExpiresAtIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldExpiresAt), v...))
+	})
+}
+
+// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
+func ExpiresAtNotIn(vs ...time.Time) predicate.DeviceFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldExpiresAt), v...))
+	})
+}
+
+// ExpiresAtGT applies the GT predicate on the "expires_at" field.
+func ExpiresAtGT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
+func ExpiresAtGTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtLT applies the LT predicate on the "expires_at" field.
+func ExpiresAtLT(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
+func ExpiresAtLTE(v time.Time) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldExpiresAt), v))
+	})
+}
+
+// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field.
+func ExpiresAtIsNil() predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldExpiresAt)))
+	})
+}
+
+// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field.
+func ExpiresAtNotNil() predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldExpiresAt)))
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.DeviceFlow) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for _, p := range predicates {
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.DeviceFlow) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for i, p := range predicates {
+			if i > 0 {
+				s1.Or()
+			}
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.DeviceFlow) predicate.DeviceFlow {
+	return predicate.DeviceFlow(func(s *sql.Selector) {
+		p(s.Not())
+	})
+}
diff --git a/internal/ent/deviceflow_create.go b/internal/ent/deviceflow_create.go
new file mode 100644
index 0000000..012659d
--- /dev/null
+++ b/internal/ent/deviceflow_create.go
@@ -0,0 +1,372 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+)
+
+// DeviceFlowCreate is the builder for creating a DeviceFlow entity.
+type DeviceFlowCreate struct {
+	config
+	mutation *DeviceFlowMutation
+	hooks    []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (dfc *DeviceFlowCreate) SetCreatedAt(t time.Time) *DeviceFlowCreate {
+	dfc.mutation.SetCreatedAt(t)
+	return dfc
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (dfc *DeviceFlowCreate) SetNillableCreatedAt(t *time.Time) *DeviceFlowCreate {
+	if t != nil {
+		dfc.SetCreatedAt(*t)
+	}
+	return dfc
+}
+
+// SetLastPoll sets the "last_poll" field.
+func (dfc *DeviceFlowCreate) SetLastPoll(t time.Time) *DeviceFlowCreate {
+	dfc.mutation.SetLastPoll(t)
+	return dfc
+}
+
+// SetNillableLastPoll sets the "last_poll" field if the given value is not nil.
+func (dfc *DeviceFlowCreate) SetNillableLastPoll(t *time.Time) *DeviceFlowCreate {
+	if t != nil {
+		dfc.SetLastPoll(*t)
+	}
+	return dfc
+}
+
+// SetIdentifier sets the "identifier" field.
+func (dfc *DeviceFlowCreate) SetIdentifier(s string) *DeviceFlowCreate {
+	dfc.mutation.SetIdentifier(s)
+	return dfc
+}
+
+// SetDeviceCode sets the "device_code" field.
+func (dfc *DeviceFlowCreate) SetDeviceCode(s string) *DeviceFlowCreate {
+	dfc.mutation.SetDeviceCode(s)
+	return dfc
+}
+
+// SetUserCode sets the "user_code" field.
+func (dfc *DeviceFlowCreate) SetUserCode(s string) *DeviceFlowCreate {
+	dfc.mutation.SetUserCode(s)
+	return dfc
+}
+
+// SetSession sets the "session" field.
+func (dfc *DeviceFlowCreate) SetSession(s string) *DeviceFlowCreate {
+	dfc.mutation.SetSession(s)
+	return dfc
+}
+
+// SetNillableSession sets the "session" field if the given value is not nil.
+func (dfc *DeviceFlowCreate) SetNillableSession(s *string) *DeviceFlowCreate {
+	if s != nil {
+		dfc.SetSession(*s)
+	}
+	return dfc
+}
+
+// SetExpiresAt sets the "expires_at" field.
+func (dfc *DeviceFlowCreate) SetExpiresAt(t time.Time) *DeviceFlowCreate {
+	dfc.mutation.SetExpiresAt(t)
+	return dfc
+}
+
+// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
+func (dfc *DeviceFlowCreate) SetNillableExpiresAt(t *time.Time) *DeviceFlowCreate {
+	if t != nil {
+		dfc.SetExpiresAt(*t)
+	}
+	return dfc
+}
+
+// Mutation returns the DeviceFlowMutation object of the builder.
+func (dfc *DeviceFlowCreate) Mutation() *DeviceFlowMutation {
+	return dfc.mutation
+}
+
+// Save creates the DeviceFlow in the database.
+func (dfc *DeviceFlowCreate) Save(ctx context.Context) (*DeviceFlow, error) {
+	var (
+		err  error
+		node *DeviceFlow
+	)
+	dfc.defaults()
+	if len(dfc.hooks) == 0 {
+		if err = dfc.check(); err != nil {
+			return nil, err
+		}
+		node, err = dfc.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*DeviceFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			if err = dfc.check(); err != nil {
+				return nil, err
+			}
+			dfc.mutation = mutation
+			if node, err = dfc.sqlSave(ctx); err != nil {
+				return nil, err
+			}
+			mutation.id = &node.ID
+			mutation.done = true
+			return node, err
+		})
+		for i := len(dfc.hooks) - 1; i >= 0; i-- {
+			if dfc.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = dfc.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, dfc.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*DeviceFlow)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from DeviceFlowMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (dfc *DeviceFlowCreate) SaveX(ctx context.Context) *DeviceFlow {
+	v, err := dfc.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (dfc *DeviceFlowCreate) Exec(ctx context.Context) error {
+	_, err := dfc.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfc *DeviceFlowCreate) ExecX(ctx context.Context) {
+	if err := dfc.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (dfc *DeviceFlowCreate) defaults() {
+	if _, ok := dfc.mutation.CreatedAt(); !ok {
+		v := deviceflow.DefaultCreatedAt()
+		dfc.mutation.SetCreatedAt(v)
+	}
+	if _, ok := dfc.mutation.LastPoll(); !ok {
+		v := deviceflow.DefaultLastPoll()
+		dfc.mutation.SetLastPoll(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (dfc *DeviceFlowCreate) check() error {
+	if _, ok := dfc.mutation.CreatedAt(); !ok {
+		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DeviceFlow.created_at"`)}
+	}
+	if _, ok := dfc.mutation.LastPoll(); !ok {
+		return &ValidationError{Name: "last_poll", err: errors.New(`ent: missing required field "DeviceFlow.last_poll"`)}
+	}
+	if _, ok := dfc.mutation.Identifier(); !ok {
+		return &ValidationError{Name: "identifier", err: errors.New(`ent: missing required field "DeviceFlow.identifier"`)}
+	}
+	if v, ok := dfc.mutation.Identifier(); ok {
+		if err := deviceflow.IdentifierValidator(v); err != nil {
+			return &ValidationError{Name: "identifier", err: fmt.Errorf(`ent: validator failed for field "DeviceFlow.identifier": %w`, err)}
+		}
+	}
+	if _, ok := dfc.mutation.DeviceCode(); !ok {
+		return &ValidationError{Name: "device_code", err: errors.New(`ent: missing required field "DeviceFlow.device_code"`)}
+	}
+	if v, ok := dfc.mutation.DeviceCode(); ok {
+		if err := deviceflow.DeviceCodeValidator(v); err != nil {
+			return &ValidationError{Name: "device_code", err: fmt.Errorf(`ent: validator failed for field "DeviceFlow.device_code": %w`, err)}
+		}
+	}
+	if _, ok := dfc.mutation.UserCode(); !ok {
+		return &ValidationError{Name: "user_code", err: errors.New(`ent: missing required field "DeviceFlow.user_code"`)}
+	}
+	if v, ok := dfc.mutation.UserCode(); ok {
+		if err := deviceflow.UserCodeValidator(v); err != nil {
+			return &ValidationError{Name: "user_code", err: fmt.Errorf(`ent: validator failed for field "DeviceFlow.user_code": %w`, err)}
+		}
+	}
+	return nil
+}
+
+func (dfc *DeviceFlowCreate) sqlSave(ctx context.Context) (*DeviceFlow, error) {
+	_node, _spec := dfc.createSpec()
+	if err := sqlgraph.CreateNode(ctx, dfc.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	id := _spec.ID.Value.(int64)
+	_node.ID = int(id)
+	return _node, nil
+}
+
+func (dfc *DeviceFlowCreate) createSpec() (*DeviceFlow, *sqlgraph.CreateSpec) {
+	var (
+		_node = &DeviceFlow{config: dfc.config}
+		_spec = &sqlgraph.CreateSpec{
+			Table: deviceflow.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: deviceflow.FieldID,
+			},
+		}
+	)
+	if value, ok := dfc.mutation.CreatedAt(); ok {
+		_spec.SetField(deviceflow.FieldCreatedAt, field.TypeTime, value)
+		_node.CreatedAt = value
+	}
+	if value, ok := dfc.mutation.LastPoll(); ok {
+		_spec.SetField(deviceflow.FieldLastPoll, field.TypeTime, value)
+		_node.LastPoll = value
+	}
+	if value, ok := dfc.mutation.Identifier(); ok {
+		_spec.SetField(deviceflow.FieldIdentifier, field.TypeString, value)
+		_node.Identifier = value
+	}
+	if value, ok := dfc.mutation.DeviceCode(); ok {
+		_spec.SetField(deviceflow.FieldDeviceCode, field.TypeString, value)
+		_node.DeviceCode = value
+	}
+	if value, ok := dfc.mutation.UserCode(); ok {
+		_spec.SetField(deviceflow.FieldUserCode, field.TypeString, value)
+		_node.UserCode = value
+	}
+	if value, ok := dfc.mutation.Session(); ok {
+		_spec.SetField(deviceflow.FieldSession, field.TypeString, value)
+		_node.Session = value
+	}
+	if value, ok := dfc.mutation.ExpiresAt(); ok {
+		_spec.SetField(deviceflow.FieldExpiresAt, field.TypeTime, value)
+		_node.ExpiresAt = value
+	}
+	return _node, _spec
+}
+
+// DeviceFlowCreateBulk is the builder for creating many DeviceFlow entities in bulk.
+type DeviceFlowCreateBulk struct {
+	config
+	builders []*DeviceFlowCreate
+}
+
+// Save creates the DeviceFlow entities in the database.
+func (dfcb *DeviceFlowCreateBulk) Save(ctx context.Context) ([]*DeviceFlow, error) {
+	specs := make([]*sqlgraph.CreateSpec, len(dfcb.builders))
+	nodes := make([]*DeviceFlow, len(dfcb.builders))
+	mutators := make([]Mutator, len(dfcb.builders))
+	for i := range dfcb.builders {
+		func(i int, root context.Context) {
+			builder := dfcb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*DeviceFlowMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				nodes[i], specs[i] = builder.createSpec()
+				var err error
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, dfcb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, dfcb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{msg: err.Error(), wrap: err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				if specs[i].ID.Value != nil {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = int(id)
+				}
+				mutation.done = true
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, dfcb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (dfcb *DeviceFlowCreateBulk) SaveX(ctx context.Context) []*DeviceFlow {
+	v, err := dfcb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (dfcb *DeviceFlowCreateBulk) Exec(ctx context.Context) error {
+	_, err := dfcb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfcb *DeviceFlowCreateBulk) ExecX(ctx context.Context) {
+	if err := dfcb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
diff --git a/internal/ent/deviceflow_delete.go b/internal/ent/deviceflow_delete.go
new file mode 100644
index 0000000..8f29226
--- /dev/null
+++ b/internal/ent/deviceflow_delete.go
@@ -0,0 +1,131 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// DeviceFlowDelete is the builder for deleting a DeviceFlow entity.
+type DeviceFlowDelete struct {
+	config
+	hooks    []Hook
+	mutation *DeviceFlowMutation
+}
+
+// Where appends a list predicates to the DeviceFlowDelete builder.
+func (dfd *DeviceFlowDelete) Where(ps ...predicate.DeviceFlow) *DeviceFlowDelete {
+	dfd.mutation.Where(ps...)
+	return dfd
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (dfd *DeviceFlowDelete) Exec(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(dfd.hooks) == 0 {
+		affected, err = dfd.sqlExec(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*DeviceFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			dfd.mutation = mutation
+			affected, err = dfd.sqlExec(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(dfd.hooks) - 1; i >= 0; i-- {
+			if dfd.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = dfd.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, dfd.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfd *DeviceFlowDelete) ExecX(ctx context.Context) int {
+	n, err := dfd.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (dfd *DeviceFlowDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := &sqlgraph.DeleteSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table: deviceflow.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: deviceflow.FieldID,
+			},
+		},
+	}
+	if ps := dfd.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	affected, err := sqlgraph.DeleteNodes(ctx, dfd.driver, _spec)
+	if err != nil && sqlgraph.IsConstraintError(err) {
+		err = &ConstraintError{msg: err.Error(), wrap: err}
+	}
+	return affected, err
+}
+
+// DeviceFlowDeleteOne is the builder for deleting a single DeviceFlow entity.
+type DeviceFlowDeleteOne struct {
+	dfd *DeviceFlowDelete
+}
+
+// Exec executes the deletion query.
+func (dfdo *DeviceFlowDeleteOne) Exec(ctx context.Context) error {
+	n, err := dfdo.dfd.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{deviceflow.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfdo *DeviceFlowDeleteOne) ExecX(ctx context.Context) {
+	dfdo.dfd.ExecX(ctx)
+}
diff --git a/internal/ent/deviceflow_query.go b/internal/ent/deviceflow_query.go
new file mode 100644
index 0000000..85cdddd
--- /dev/null
+++ b/internal/ent/deviceflow_query.go
@@ -0,0 +1,564 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// DeviceFlowQuery is the builder for querying DeviceFlow entities.
+type DeviceFlowQuery struct {
+	config
+	limit      *int
+	offset     *int
+	unique     *bool
+	order      []OrderFunc
+	fields     []string
+	predicates []predicate.DeviceFlow
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the DeviceFlowQuery builder.
+func (dfq *DeviceFlowQuery) Where(ps ...predicate.DeviceFlow) *DeviceFlowQuery {
+	dfq.predicates = append(dfq.predicates, ps...)
+	return dfq
+}
+
+// Limit adds a limit step to the query.
+func (dfq *DeviceFlowQuery) Limit(limit int) *DeviceFlowQuery {
+	dfq.limit = &limit
+	return dfq
+}
+
+// Offset adds an offset step to the query.
+func (dfq *DeviceFlowQuery) Offset(offset int) *DeviceFlowQuery {
+	dfq.offset = &offset
+	return dfq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (dfq *DeviceFlowQuery) Unique(unique bool) *DeviceFlowQuery {
+	dfq.unique = &unique
+	return dfq
+}
+
+// Order adds an order step to the query.
+func (dfq *DeviceFlowQuery) Order(o ...OrderFunc) *DeviceFlowQuery {
+	dfq.order = append(dfq.order, o...)
+	return dfq
+}
+
+// First returns the first DeviceFlow entity from the query.
+// Returns a *NotFoundError when no DeviceFlow was found.
+func (dfq *DeviceFlowQuery) First(ctx context.Context) (*DeviceFlow, error) {
+	nodes, err := dfq.Limit(1).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{deviceflow.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) FirstX(ctx context.Context) *DeviceFlow {
+	node, err := dfq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first DeviceFlow ID from the query.
+// Returns a *NotFoundError when no DeviceFlow ID was found.
+func (dfq *DeviceFlowQuery) FirstID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = dfq.Limit(1).IDs(ctx); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{deviceflow.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) FirstIDX(ctx context.Context) int {
+	id, err := dfq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single DeviceFlow entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one DeviceFlow entity is found.
+// Returns a *NotFoundError when no DeviceFlow entities are found.
+func (dfq *DeviceFlowQuery) Only(ctx context.Context) (*DeviceFlow, error) {
+	nodes, err := dfq.Limit(2).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{deviceflow.Label}
+	default:
+		return nil, &NotSingularError{deviceflow.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) OnlyX(ctx context.Context) *DeviceFlow {
+	node, err := dfq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only DeviceFlow ID in the query.
+// Returns a *NotSingularError when more than one DeviceFlow ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (dfq *DeviceFlowQuery) OnlyID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = dfq.Limit(2).IDs(ctx); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{deviceflow.Label}
+	default:
+		err = &NotSingularError{deviceflow.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) OnlyIDX(ctx context.Context) int {
+	id, err := dfq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of DeviceFlows.
+func (dfq *DeviceFlowQuery) All(ctx context.Context) ([]*DeviceFlow, error) {
+	if err := dfq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	return dfq.sqlAll(ctx)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) AllX(ctx context.Context) []*DeviceFlow {
+	nodes, err := dfq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of DeviceFlow IDs.
+func (dfq *DeviceFlowQuery) IDs(ctx context.Context) ([]int, error) {
+	var ids []int
+	if err := dfq.Select(deviceflow.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) IDsX(ctx context.Context) []int {
+	ids, err := dfq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (dfq *DeviceFlowQuery) Count(ctx context.Context) (int, error) {
+	if err := dfq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return dfq.sqlCount(ctx)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) CountX(ctx context.Context) int {
+	count, err := dfq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (dfq *DeviceFlowQuery) Exist(ctx context.Context) (bool, error) {
+	if err := dfq.prepareQuery(ctx); err != nil {
+		return false, err
+	}
+	return dfq.sqlExist(ctx)
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (dfq *DeviceFlowQuery) ExistX(ctx context.Context) bool {
+	exist, err := dfq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the DeviceFlowQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (dfq *DeviceFlowQuery) Clone() *DeviceFlowQuery {
+	if dfq == nil {
+		return nil
+	}
+	return &DeviceFlowQuery{
+		config:     dfq.config,
+		limit:      dfq.limit,
+		offset:     dfq.offset,
+		order:      append([]OrderFunc{}, dfq.order...),
+		predicates: append([]predicate.DeviceFlow{}, dfq.predicates...),
+		// clone intermediate query.
+		sql:    dfq.sql.Clone(),
+		path:   dfq.path,
+		unique: dfq.unique,
+	}
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.DeviceFlow.Query().
+//		GroupBy(deviceflow.FieldCreatedAt).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+func (dfq *DeviceFlowQuery) GroupBy(field string, fields ...string) *DeviceFlowGroupBy {
+	grbuild := &DeviceFlowGroupBy{config: dfq.config}
+	grbuild.fields = append([]string{field}, fields...)
+	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
+		if err := dfq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		return dfq.sqlQuery(ctx), nil
+	}
+	grbuild.label = deviceflow.Label
+	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+	return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//	}
+//
+//	client.DeviceFlow.Query().
+//		Select(deviceflow.FieldCreatedAt).
+//		Scan(ctx, &v)
+func (dfq *DeviceFlowQuery) Select(fields ...string) *DeviceFlowSelect {
+	dfq.fields = append(dfq.fields, fields...)
+	selbuild := &DeviceFlowSelect{DeviceFlowQuery: dfq}
+	selbuild.label = deviceflow.Label
+	selbuild.flds, selbuild.scan = &dfq.fields, selbuild.Scan
+	return selbuild
+}
+
+// Aggregate returns a DeviceFlowSelect configured with the given aggregations.
+func (dfq *DeviceFlowQuery) Aggregate(fns ...AggregateFunc) *DeviceFlowSelect {
+	return dfq.Select().Aggregate(fns...)
+}
+
+func (dfq *DeviceFlowQuery) prepareQuery(ctx context.Context) error {
+	for _, f := range dfq.fields {
+		if !deviceflow.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if dfq.path != nil {
+		prev, err := dfq.path(ctx)
+		if err != nil {
+			return err
+		}
+		dfq.sql = prev
+	}
+	return nil
+}
+
+func (dfq *DeviceFlowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DeviceFlow, error) {
+	var (
+		nodes = []*DeviceFlow{}
+		_spec = dfq.querySpec()
+	)
+	_spec.ScanValues = func(columns []string) ([]any, error) {
+		return (*DeviceFlow).scanValues(nil, columns)
+	}
+	_spec.Assign = func(columns []string, values []any) error {
+		node := &DeviceFlow{config: dfq.config}
+		nodes = append(nodes, node)
+		return node.assignValues(columns, values)
+	}
+	for i := range hooks {
+		hooks[i](ctx, _spec)
+	}
+	if err := sqlgraph.QueryNodes(ctx, dfq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	return nodes, nil
+}
+
+func (dfq *DeviceFlowQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := dfq.querySpec()
+	_spec.Node.Columns = dfq.fields
+	if len(dfq.fields) > 0 {
+		_spec.Unique = dfq.unique != nil && *dfq.unique
+	}
+	return sqlgraph.CountNodes(ctx, dfq.driver, _spec)
+}
+
+func (dfq *DeviceFlowQuery) sqlExist(ctx context.Context) (bool, error) {
+	switch _, err := dfq.FirstID(ctx); {
+	case IsNotFound(err):
+		return false, nil
+	case err != nil:
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	default:
+		return true, nil
+	}
+}
+
+func (dfq *DeviceFlowQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := &sqlgraph.QuerySpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   deviceflow.Table,
+			Columns: deviceflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: deviceflow.FieldID,
+			},
+		},
+		From:   dfq.sql,
+		Unique: true,
+	}
+	if unique := dfq.unique; unique != nil {
+		_spec.Unique = *unique
+	}
+	if fields := dfq.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, deviceflow.FieldID)
+		for i := range fields {
+			if fields[i] != deviceflow.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := dfq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := dfq.limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := dfq.offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := dfq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (dfq *DeviceFlowQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(dfq.driver.Dialect())
+	t1 := builder.Table(deviceflow.Table)
+	columns := dfq.fields
+	if len(columns) == 0 {
+		columns = deviceflow.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if dfq.sql != nil {
+		selector = dfq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if dfq.unique != nil && *dfq.unique {
+		selector.Distinct()
+	}
+	for _, p := range dfq.predicates {
+		p(selector)
+	}
+	for _, p := range dfq.order {
+		p(selector)
+	}
+	if offset := dfq.offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := dfq.limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// DeviceFlowGroupBy is the group-by builder for DeviceFlow entities.
+type DeviceFlowGroupBy struct {
+	config
+	selector
+	fields []string
+	fns    []AggregateFunc
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (dfgb *DeviceFlowGroupBy) Aggregate(fns ...AggregateFunc) *DeviceFlowGroupBy {
+	dfgb.fns = append(dfgb.fns, fns...)
+	return dfgb
+}
+
+// Scan applies the group-by query and scans the result into the given value.
+func (dfgb *DeviceFlowGroupBy) Scan(ctx context.Context, v any) error {
+	query, err := dfgb.path(ctx)
+	if err != nil {
+		return err
+	}
+	dfgb.sql = query
+	return dfgb.sqlScan(ctx, v)
+}
+
+func (dfgb *DeviceFlowGroupBy) sqlScan(ctx context.Context, v any) error {
+	for _, f := range dfgb.fields {
+		if !deviceflow.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
+		}
+	}
+	selector := dfgb.sqlQuery()
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := dfgb.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+func (dfgb *DeviceFlowGroupBy) sqlQuery() *sql.Selector {
+	selector := dfgb.sql.Select()
+	aggregation := make([]string, 0, len(dfgb.fns))
+	for _, fn := range dfgb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(dfgb.fields)+len(dfgb.fns))
+		for _, f := range dfgb.fields {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	return selector.GroupBy(selector.Columns(dfgb.fields...)...)
+}
+
+// DeviceFlowSelect is the builder for selecting fields of DeviceFlow entities.
+type DeviceFlowSelect struct {
+	*DeviceFlowQuery
+	selector
+	// intermediate query (i.e. traversal path).
+	sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (dfs *DeviceFlowSelect) Aggregate(fns ...AggregateFunc) *DeviceFlowSelect {
+	dfs.fns = append(dfs.fns, fns...)
+	return dfs
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (dfs *DeviceFlowSelect) Scan(ctx context.Context, v any) error {
+	if err := dfs.prepareQuery(ctx); err != nil {
+		return err
+	}
+	dfs.sql = dfs.DeviceFlowQuery.sqlQuery(ctx)
+	return dfs.sqlScan(ctx, v)
+}
+
+func (dfs *DeviceFlowSelect) sqlScan(ctx context.Context, v any) error {
+	aggregation := make([]string, 0, len(dfs.fns))
+	for _, fn := range dfs.fns {
+		aggregation = append(aggregation, fn(dfs.sql))
+	}
+	switch n := len(*dfs.selector.flds); {
+	case n == 0 && len(aggregation) > 0:
+		dfs.sql.Select(aggregation...)
+	case n != 0 && len(aggregation) > 0:
+		dfs.sql.AppendSelect(aggregation...)
+	}
+	rows := &sql.Rows{}
+	query, args := dfs.sql.Query()
+	if err := dfs.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
diff --git a/internal/ent/deviceflow_update.go b/internal/ent/deviceflow_update.go
new file mode 100644
index 0000000..f8b53d7
--- /dev/null
+++ b/internal/ent/deviceflow_update.go
@@ -0,0 +1,400 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// DeviceFlowUpdate is the builder for updating DeviceFlow entities.
+type DeviceFlowUpdate struct {
+	config
+	hooks    []Hook
+	mutation *DeviceFlowMutation
+}
+
+// Where appends a list predicates to the DeviceFlowUpdate builder.
+func (dfu *DeviceFlowUpdate) Where(ps ...predicate.DeviceFlow) *DeviceFlowUpdate {
+	dfu.mutation.Where(ps...)
+	return dfu
+}
+
+// SetLastPoll sets the "last_poll" field.
+func (dfu *DeviceFlowUpdate) SetLastPoll(t time.Time) *DeviceFlowUpdate {
+	dfu.mutation.SetLastPoll(t)
+	return dfu
+}
+
+// SetNillableLastPoll sets the "last_poll" field if the given value is not nil.
+func (dfu *DeviceFlowUpdate) SetNillableLastPoll(t *time.Time) *DeviceFlowUpdate {
+	if t != nil {
+		dfu.SetLastPoll(*t)
+	}
+	return dfu
+}
+
+// SetSession sets the "session" field.
+func (dfu *DeviceFlowUpdate) SetSession(s string) *DeviceFlowUpdate {
+	dfu.mutation.SetSession(s)
+	return dfu
+}
+
+// SetNillableSession sets the "session" field if the given value is not nil.
+func (dfu *DeviceFlowUpdate) SetNillableSession(s *string) *DeviceFlowUpdate {
+	if s != nil {
+		dfu.SetSession(*s)
+	}
+	return dfu
+}
+
+// ClearSession clears the value of the "session" field.
+func (dfu *DeviceFlowUpdate) ClearSession() *DeviceFlowUpdate {
+	dfu.mutation.ClearSession()
+	return dfu
+}
+
+// SetExpiresAt sets the "expires_at" field.
+func (dfu *DeviceFlowUpdate) SetExpiresAt(t time.Time) *DeviceFlowUpdate {
+	dfu.mutation.SetExpiresAt(t)
+	return dfu
+}
+
+// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
+func (dfu *DeviceFlowUpdate) SetNillableExpiresAt(t *time.Time) *DeviceFlowUpdate {
+	if t != nil {
+		dfu.SetExpiresAt(*t)
+	}
+	return dfu
+}
+
+// ClearExpiresAt clears the value of the "expires_at" field.
+func (dfu *DeviceFlowUpdate) ClearExpiresAt() *DeviceFlowUpdate {
+	dfu.mutation.ClearExpiresAt()
+	return dfu
+}
+
+// Mutation returns the DeviceFlowMutation object of the builder.
+func (dfu *DeviceFlowUpdate) Mutation() *DeviceFlowMutation {
+	return dfu.mutation
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (dfu *DeviceFlowUpdate) Save(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(dfu.hooks) == 0 {
+		affected, err = dfu.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*DeviceFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			dfu.mutation = mutation
+			affected, err = dfu.sqlSave(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(dfu.hooks) - 1; i >= 0; i-- {
+			if dfu.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = dfu.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, dfu.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (dfu *DeviceFlowUpdate) SaveX(ctx context.Context) int {
+	affected, err := dfu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (dfu *DeviceFlowUpdate) Exec(ctx context.Context) error {
+	_, err := dfu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfu *DeviceFlowUpdate) ExecX(ctx context.Context) {
+	if err := dfu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (dfu *DeviceFlowUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   deviceflow.Table,
+			Columns: deviceflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: deviceflow.FieldID,
+			},
+		},
+	}
+	if ps := dfu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := dfu.mutation.LastPoll(); ok {
+		_spec.SetField(deviceflow.FieldLastPoll, field.TypeTime, value)
+	}
+	if value, ok := dfu.mutation.Session(); ok {
+		_spec.SetField(deviceflow.FieldSession, field.TypeString, value)
+	}
+	if dfu.mutation.SessionCleared() {
+		_spec.ClearField(deviceflow.FieldSession, field.TypeString)
+	}
+	if value, ok := dfu.mutation.ExpiresAt(); ok {
+		_spec.SetField(deviceflow.FieldExpiresAt, field.TypeTime, value)
+	}
+	if dfu.mutation.ExpiresAtCleared() {
+		_spec.ClearField(deviceflow.FieldExpiresAt, field.TypeTime)
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, dfu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{deviceflow.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return 0, err
+	}
+	return n, nil
+}
+
+// DeviceFlowUpdateOne is the builder for updating a single DeviceFlow entity.
+type DeviceFlowUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *DeviceFlowMutation
+}
+
+// SetLastPoll sets the "last_poll" field.
+func (dfuo *DeviceFlowUpdateOne) SetLastPoll(t time.Time) *DeviceFlowUpdateOne {
+	dfuo.mutation.SetLastPoll(t)
+	return dfuo
+}
+
+// SetNillableLastPoll sets the "last_poll" field if the given value is not nil.
+func (dfuo *DeviceFlowUpdateOne) SetNillableLastPoll(t *time.Time) *DeviceFlowUpdateOne {
+	if t != nil {
+		dfuo.SetLastPoll(*t)
+	}
+	return dfuo
+}
+
+// SetSession sets the "session" field.
+func (dfuo *DeviceFlowUpdateOne) SetSession(s string) *DeviceFlowUpdateOne {
+	dfuo.mutation.SetSession(s)
+	return dfuo
+}
+
+// SetNillableSession sets the "session" field if the given value is not nil.
+func (dfuo *DeviceFlowUpdateOne) SetNillableSession(s *string) *DeviceFlowUpdateOne {
+	if s != nil {
+		dfuo.SetSession(*s)
+	}
+	return dfuo
+}
+
+// ClearSession clears the value of the "session" field.
+func (dfuo *DeviceFlowUpdateOne) ClearSession() *DeviceFlowUpdateOne {
+	dfuo.mutation.ClearSession()
+	return dfuo
+}
+
+// SetExpiresAt sets the "expires_at" field.
+func (dfuo *DeviceFlowUpdateOne) SetExpiresAt(t time.Time) *DeviceFlowUpdateOne {
+	dfuo.mutation.SetExpiresAt(t)
+	return dfuo
+}
+
+// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
+func (dfuo *DeviceFlowUpdateOne) SetNillableExpiresAt(t *time.Time) *DeviceFlowUpdateOne {
+	if t != nil {
+		dfuo.SetExpiresAt(*t)
+	}
+	return dfuo
+}
+
+// ClearExpiresAt clears the value of the "expires_at" field.
+func (dfuo *DeviceFlowUpdateOne) ClearExpiresAt() *DeviceFlowUpdateOne {
+	dfuo.mutation.ClearExpiresAt()
+	return dfuo
+}
+
+// Mutation returns the DeviceFlowMutation object of the builder.
+func (dfuo *DeviceFlowUpdateOne) Mutation() *DeviceFlowMutation {
+	return dfuo.mutation
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (dfuo *DeviceFlowUpdateOne) Select(field string, fields ...string) *DeviceFlowUpdateOne {
+	dfuo.fields = append([]string{field}, fields...)
+	return dfuo
+}
+
+// Save executes the query and returns the updated DeviceFlow entity.
+func (dfuo *DeviceFlowUpdateOne) Save(ctx context.Context) (*DeviceFlow, error) {
+	var (
+		err  error
+		node *DeviceFlow
+	)
+	if len(dfuo.hooks) == 0 {
+		node, err = dfuo.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*DeviceFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			dfuo.mutation = mutation
+			node, err = dfuo.sqlSave(ctx)
+			mutation.done = true
+			return node, err
+		})
+		for i := len(dfuo.hooks) - 1; i >= 0; i-- {
+			if dfuo.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = dfuo.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, dfuo.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*DeviceFlow)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from DeviceFlowMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (dfuo *DeviceFlowUpdateOne) SaveX(ctx context.Context) *DeviceFlow {
+	node, err := dfuo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (dfuo *DeviceFlowUpdateOne) Exec(ctx context.Context) error {
+	_, err := dfuo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (dfuo *DeviceFlowUpdateOne) ExecX(ctx context.Context) {
+	if err := dfuo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (dfuo *DeviceFlowUpdateOne) sqlSave(ctx context.Context) (_node *DeviceFlow, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   deviceflow.Table,
+			Columns: deviceflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: deviceflow.FieldID,
+			},
+		},
+	}
+	id, ok := dfuo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DeviceFlow.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := dfuo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, deviceflow.FieldID)
+		for _, f := range fields {
+			if !deviceflow.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != deviceflow.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := dfuo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := dfuo.mutation.LastPoll(); ok {
+		_spec.SetField(deviceflow.FieldLastPoll, field.TypeTime, value)
+	}
+	if value, ok := dfuo.mutation.Session(); ok {
+		_spec.SetField(deviceflow.FieldSession, field.TypeString, value)
+	}
+	if dfuo.mutation.SessionCleared() {
+		_spec.ClearField(deviceflow.FieldSession, field.TypeString)
+	}
+	if value, ok := dfuo.mutation.ExpiresAt(); ok {
+		_spec.SetField(deviceflow.FieldExpiresAt, field.TypeTime, value)
+	}
+	if dfuo.mutation.ExpiresAtCleared() {
+		_spec.ClearField(deviceflow.FieldExpiresAt, field.TypeTime)
+	}
+	_node = &DeviceFlow{config: dfuo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, dfuo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{deviceflow.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	return _node, nil
+}
diff --git a/pkg/storage/default/ent/ent.go b/internal/ent/ent.go
similarity index 93%
rename from pkg/storage/default/ent/ent.go
rename to internal/ent/ent.go
index d4668ef..ab6a134 100644
--- a/pkg/storage/default/ent/ent.go
+++ b/internal/ent/ent.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package ent
@@ -10,9 +26,8 @@ import (
 	"entgo.io/ent"
 	"entgo.io/ent/dialect/sql"
 	"entgo.io/ent/dialect/sql/sqlgraph"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
 )
 
 // ent aliases to avoid import conflicts in user's code.
@@ -33,9 +48,8 @@ type OrderFunc func(*sql.Selector)
 // columnChecker returns a function indicates if the column exists in the given column.
 func columnChecker(table string) func(string) error {
 	checks := map[string]func(string) bool{
-		apikey.Table:     apikey.ValidColumn,
-		servicekey.Table: servicekey.ValidColumn,
-		user.Table:       user.ValidColumn,
+		deviceflow.Table: deviceflow.ValidColumn,
+		githubflow.Table: githubflow.ValidColumn,
 	}
 	check, ok := checks[table]
 	if !ok {
@@ -267,6 +281,7 @@ func IsConstraintError(err error) bool {
 type selector struct {
 	label string
 	flds  *[]string
+	fns   []AggregateFunc
 	scan  func(context.Context, any) error
 }
 
diff --git a/pkg/storage/default/ent/enttest/enttest.go b/internal/ent/enttest/enttest.go
similarity index 70%
rename from pkg/storage/default/ent/enttest/enttest.go
rename to internal/ent/enttest/enttest.go
index 27054eb..ea67f76 100644
--- a/pkg/storage/default/ent/enttest/enttest.go
+++ b/internal/ent/enttest/enttest.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package enttest
@@ -5,12 +21,12 @@ package enttest
 import (
 	"context"
 
-	"github.com/loopholelabs/auth/pkg/storage/default/ent"
+	"github.com/loopholelabs/auth/internal/ent"
 	// required by schema hooks.
-	_ "github.com/loopholelabs/auth/pkg/storage/default/ent/runtime"
+	_ "github.com/loopholelabs/auth/internal/ent/runtime"
 
 	"entgo.io/ent/dialect/sql/schema"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/migrate"
+	"github.com/loopholelabs/auth/internal/ent/migrate"
 )
 
 type (
diff --git a/pkg/storage/default/test_helpers.go b/internal/ent/generate.go
similarity index 63%
rename from pkg/storage/default/test_helpers.go
rename to internal/ent/generate.go
index ae10db2..8d71690 100644
--- a/pkg/storage/default/test_helpers.go
+++ b/internal/ent/generate.go
@@ -1,5 +1,5 @@
 /*
-	Copyright 2022 Loophole Labs
+	Copyright 2023 Loophole Labs
 
 	Licensed under the Apache License, Version 2.0 (the "License");
 	you may not use this file except in compliance with the License.
@@ -14,17 +14,6 @@
 	limitations under the License.
 */
 
-package database
+package ent
 
-import (
-	"github.com/sirupsen/logrus"
-	"github.com/stretchr/testify/require"
-	"testing"
-)
-
-func NewTestDatabase(t *testing.T) *Default {
-	d, err := New("sqlite3", ":memory:?_fk=1", "sqlite3", ":memory:?_fk=1", logrus.New())
-	require.NoError(t, err)
-
-	return d
-}
+//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema
diff --git a/internal/ent/githubflow.go b/internal/ent/githubflow.go
new file mode 100644
index 0000000..0780f10
--- /dev/null
+++ b/internal/ent/githubflow.go
@@ -0,0 +1,184 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+)
+
+// GithubFlow is the model entity for the GithubFlow schema.
+type GithubFlow struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID int `json:"id,omitempty"`
+	// CreatedAt holds the value of the "created_at" field.
+	CreatedAt time.Time `json:"created_at,omitempty"`
+	// State holds the value of the "state" field.
+	State string `json:"state,omitempty"`
+	// Verifier holds the value of the "verifier" field.
+	Verifier string `json:"verifier,omitempty"`
+	// Challenge holds the value of the "challenge" field.
+	Challenge string `json:"challenge,omitempty"`
+	// NextURL holds the value of the "next_url" field.
+	NextURL string `json:"next_url,omitempty"`
+	// Organization holds the value of the "organization" field.
+	Organization string `json:"organization,omitempty"`
+	// DeviceIdentifier holds the value of the "device_identifier" field.
+	DeviceIdentifier string `json:"device_identifier,omitempty"`
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*GithubFlow) scanValues(columns []string) ([]any, error) {
+	values := make([]any, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case githubflow.FieldID:
+			values[i] = new(sql.NullInt64)
+		case githubflow.FieldState, githubflow.FieldVerifier, githubflow.FieldChallenge, githubflow.FieldNextURL, githubflow.FieldOrganization, githubflow.FieldDeviceIdentifier:
+			values[i] = new(sql.NullString)
+		case githubflow.FieldCreatedAt:
+			values[i] = new(sql.NullTime)
+		default:
+			return nil, fmt.Errorf("unexpected column %q for type GithubFlow", columns[i])
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the GithubFlow fields.
+func (gf *GithubFlow) assignValues(columns []string, values []any) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case githubflow.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			gf.ID = int(value.Int64)
+		case githubflow.FieldCreatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field created_at", values[i])
+			} else if value.Valid {
+				gf.CreatedAt = value.Time
+			}
+		case githubflow.FieldState:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field state", values[i])
+			} else if value.Valid {
+				gf.State = value.String
+			}
+		case githubflow.FieldVerifier:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field verifier", values[i])
+			} else if value.Valid {
+				gf.Verifier = value.String
+			}
+		case githubflow.FieldChallenge:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field challenge", values[i])
+			} else if value.Valid {
+				gf.Challenge = value.String
+			}
+		case githubflow.FieldNextURL:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field next_url", values[i])
+			} else if value.Valid {
+				gf.NextURL = value.String
+			}
+		case githubflow.FieldOrganization:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field organization", values[i])
+			} else if value.Valid {
+				gf.Organization = value.String
+			}
+		case githubflow.FieldDeviceIdentifier:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field device_identifier", values[i])
+			} else if value.Valid {
+				gf.DeviceIdentifier = value.String
+			}
+		}
+	}
+	return nil
+}
+
+// Update returns a builder for updating this GithubFlow.
+// Note that you need to call GithubFlow.Unwrap() before calling this method if this GithubFlow
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (gf *GithubFlow) Update() *GithubFlowUpdateOne {
+	return (&GithubFlowClient{config: gf.config}).UpdateOne(gf)
+}
+
+// Unwrap unwraps the GithubFlow entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (gf *GithubFlow) Unwrap() *GithubFlow {
+	_tx, ok := gf.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: GithubFlow is not a transactional entity")
+	}
+	gf.config.driver = _tx.drv
+	return gf
+}
+
+// String implements the fmt.Stringer.
+func (gf *GithubFlow) String() string {
+	var builder strings.Builder
+	builder.WriteString("GithubFlow(")
+	builder.WriteString(fmt.Sprintf("id=%v, ", gf.ID))
+	builder.WriteString("created_at=")
+	builder.WriteString(gf.CreatedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("state=")
+	builder.WriteString(gf.State)
+	builder.WriteString(", ")
+	builder.WriteString("verifier=")
+	builder.WriteString(gf.Verifier)
+	builder.WriteString(", ")
+	builder.WriteString("challenge=")
+	builder.WriteString(gf.Challenge)
+	builder.WriteString(", ")
+	builder.WriteString("next_url=")
+	builder.WriteString(gf.NextURL)
+	builder.WriteString(", ")
+	builder.WriteString("organization=")
+	builder.WriteString(gf.Organization)
+	builder.WriteString(", ")
+	builder.WriteString("device_identifier=")
+	builder.WriteString(gf.DeviceIdentifier)
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// GithubFlows is a parsable slice of GithubFlow.
+type GithubFlows []*GithubFlow
+
+func (gf GithubFlows) config(cfg config) {
+	for _i := range gf {
+		gf[_i].config = cfg
+	}
+}
diff --git a/internal/ent/githubflow/githubflow.go b/internal/ent/githubflow/githubflow.go
new file mode 100644
index 0000000..1cc171e
--- /dev/null
+++ b/internal/ent/githubflow/githubflow.go
@@ -0,0 +1,81 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package githubflow
+
+import (
+	"time"
+)
+
+const (
+	// Label holds the string label denoting the githubflow type in the database.
+	Label = "github_flow"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldCreatedAt holds the string denoting the created_at field in the database.
+	FieldCreatedAt = "created_at"
+	// FieldState holds the string denoting the state field in the database.
+	FieldState = "state"
+	// FieldVerifier holds the string denoting the verifier field in the database.
+	FieldVerifier = "verifier"
+	// FieldChallenge holds the string denoting the challenge field in the database.
+	FieldChallenge = "challenge"
+	// FieldNextURL holds the string denoting the next_url field in the database.
+	FieldNextURL = "next_url"
+	// FieldOrganization holds the string denoting the organization field in the database.
+	FieldOrganization = "organization"
+	// FieldDeviceIdentifier holds the string denoting the device_identifier field in the database.
+	FieldDeviceIdentifier = "device_identifier"
+	// Table holds the table name of the githubflow in the database.
+	Table = "github_flows"
+)
+
+// Columns holds all SQL columns for githubflow fields.
+var Columns = []string{
+	FieldID,
+	FieldCreatedAt,
+	FieldState,
+	FieldVerifier,
+	FieldChallenge,
+	FieldNextURL,
+	FieldOrganization,
+	FieldDeviceIdentifier,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
+	DefaultCreatedAt func() time.Time
+	// StateValidator is a validator for the "state" field. It is called by the builders before save.
+	StateValidator func(string) error
+	// VerifierValidator is a validator for the "verifier" field. It is called by the builders before save.
+	VerifierValidator func(string) error
+	// ChallengeValidator is a validator for the "challenge" field. It is called by the builders before save.
+	ChallengeValidator func(string) error
+	// NextURLValidator is a validator for the "next_url" field. It is called by the builders before save.
+	NextURLValidator func(string) error
+)
diff --git a/internal/ent/githubflow/where.go b/internal/ent/githubflow/where.go
new file mode 100644
index 0000000..b4d14e3
--- /dev/null
+++ b/internal/ent/githubflow/where.go
@@ -0,0 +1,864 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package githubflow
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldID), id))
+	})
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.In(s.C(FieldID), v...))
+	})
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		v := make([]any, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.NotIn(s.C(FieldID), v...))
+	})
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldID), id))
+	})
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldID), id))
+	})
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldID), id))
+	})
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id int) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldID), id))
+	})
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// State applies equality check predicate on the "state" field. It's identical to StateEQ.
+func State(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldState), v))
+	})
+}
+
+// Verifier applies equality check predicate on the "verifier" field. It's identical to VerifierEQ.
+func Verifier(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldVerifier), v))
+	})
+}
+
+// Challenge applies equality check predicate on the "challenge" field. It's identical to ChallengeEQ.
+func Challenge(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldChallenge), v))
+	})
+}
+
+// NextURL applies equality check predicate on the "next_url" field. It's identical to NextURLEQ.
+func NextURL(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldNextURL), v))
+	})
+}
+
+// Organization applies equality check predicate on the "organization" field. It's identical to OrganizationEQ.
+func Organization(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldOrganization), v))
+	})
+}
+
+// DeviceIdentifier applies equality check predicate on the "device_identifier" field. It's identical to DeviceIdentifierEQ.
+func DeviceIdentifier(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// StateEQ applies the EQ predicate on the "state" field.
+func StateEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldState), v))
+	})
+}
+
+// StateNEQ applies the NEQ predicate on the "state" field.
+func StateNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldState), v))
+	})
+}
+
+// StateIn applies the In predicate on the "state" field.
+func StateIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldState), v...))
+	})
+}
+
+// StateNotIn applies the NotIn predicate on the "state" field.
+func StateNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldState), v...))
+	})
+}
+
+// StateGT applies the GT predicate on the "state" field.
+func StateGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldState), v))
+	})
+}
+
+// StateGTE applies the GTE predicate on the "state" field.
+func StateGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldState), v))
+	})
+}
+
+// StateLT applies the LT predicate on the "state" field.
+func StateLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldState), v))
+	})
+}
+
+// StateLTE applies the LTE predicate on the "state" field.
+func StateLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldState), v))
+	})
+}
+
+// StateContains applies the Contains predicate on the "state" field.
+func StateContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldState), v))
+	})
+}
+
+// StateHasPrefix applies the HasPrefix predicate on the "state" field.
+func StateHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldState), v))
+	})
+}
+
+// StateHasSuffix applies the HasSuffix predicate on the "state" field.
+func StateHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldState), v))
+	})
+}
+
+// StateEqualFold applies the EqualFold predicate on the "state" field.
+func StateEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldState), v))
+	})
+}
+
+// StateContainsFold applies the ContainsFold predicate on the "state" field.
+func StateContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldState), v))
+	})
+}
+
+// VerifierEQ applies the EQ predicate on the "verifier" field.
+func VerifierEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierNEQ applies the NEQ predicate on the "verifier" field.
+func VerifierNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierIn applies the In predicate on the "verifier" field.
+func VerifierIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldVerifier), v...))
+	})
+}
+
+// VerifierNotIn applies the NotIn predicate on the "verifier" field.
+func VerifierNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldVerifier), v...))
+	})
+}
+
+// VerifierGT applies the GT predicate on the "verifier" field.
+func VerifierGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierGTE applies the GTE predicate on the "verifier" field.
+func VerifierGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierLT applies the LT predicate on the "verifier" field.
+func VerifierLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierLTE applies the LTE predicate on the "verifier" field.
+func VerifierLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierContains applies the Contains predicate on the "verifier" field.
+func VerifierContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierHasPrefix applies the HasPrefix predicate on the "verifier" field.
+func VerifierHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierHasSuffix applies the HasSuffix predicate on the "verifier" field.
+func VerifierHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierEqualFold applies the EqualFold predicate on the "verifier" field.
+func VerifierEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldVerifier), v))
+	})
+}
+
+// VerifierContainsFold applies the ContainsFold predicate on the "verifier" field.
+func VerifierContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldVerifier), v))
+	})
+}
+
+// ChallengeEQ applies the EQ predicate on the "challenge" field.
+func ChallengeEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeNEQ applies the NEQ predicate on the "challenge" field.
+func ChallengeNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeIn applies the In predicate on the "challenge" field.
+func ChallengeIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldChallenge), v...))
+	})
+}
+
+// ChallengeNotIn applies the NotIn predicate on the "challenge" field.
+func ChallengeNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldChallenge), v...))
+	})
+}
+
+// ChallengeGT applies the GT predicate on the "challenge" field.
+func ChallengeGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeGTE applies the GTE predicate on the "challenge" field.
+func ChallengeGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeLT applies the LT predicate on the "challenge" field.
+func ChallengeLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeLTE applies the LTE predicate on the "challenge" field.
+func ChallengeLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeContains applies the Contains predicate on the "challenge" field.
+func ChallengeContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeHasPrefix applies the HasPrefix predicate on the "challenge" field.
+func ChallengeHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeHasSuffix applies the HasSuffix predicate on the "challenge" field.
+func ChallengeHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeEqualFold applies the EqualFold predicate on the "challenge" field.
+func ChallengeEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldChallenge), v))
+	})
+}
+
+// ChallengeContainsFold applies the ContainsFold predicate on the "challenge" field.
+func ChallengeContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldChallenge), v))
+	})
+}
+
+// NextURLEQ applies the EQ predicate on the "next_url" field.
+func NextURLEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLNEQ applies the NEQ predicate on the "next_url" field.
+func NextURLNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLIn applies the In predicate on the "next_url" field.
+func NextURLIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldNextURL), v...))
+	})
+}
+
+// NextURLNotIn applies the NotIn predicate on the "next_url" field.
+func NextURLNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldNextURL), v...))
+	})
+}
+
+// NextURLGT applies the GT predicate on the "next_url" field.
+func NextURLGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLGTE applies the GTE predicate on the "next_url" field.
+func NextURLGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLLT applies the LT predicate on the "next_url" field.
+func NextURLLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLLTE applies the LTE predicate on the "next_url" field.
+func NextURLLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLContains applies the Contains predicate on the "next_url" field.
+func NextURLContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLHasPrefix applies the HasPrefix predicate on the "next_url" field.
+func NextURLHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLHasSuffix applies the HasSuffix predicate on the "next_url" field.
+func NextURLHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLEqualFold applies the EqualFold predicate on the "next_url" field.
+func NextURLEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldNextURL), v))
+	})
+}
+
+// NextURLContainsFold applies the ContainsFold predicate on the "next_url" field.
+func NextURLContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldNextURL), v))
+	})
+}
+
+// OrganizationEQ applies the EQ predicate on the "organization" field.
+func OrganizationEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationNEQ applies the NEQ predicate on the "organization" field.
+func OrganizationNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationIn applies the In predicate on the "organization" field.
+func OrganizationIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldOrganization), v...))
+	})
+}
+
+// OrganizationNotIn applies the NotIn predicate on the "organization" field.
+func OrganizationNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldOrganization), v...))
+	})
+}
+
+// OrganizationGT applies the GT predicate on the "organization" field.
+func OrganizationGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationGTE applies the GTE predicate on the "organization" field.
+func OrganizationGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationLT applies the LT predicate on the "organization" field.
+func OrganizationLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationLTE applies the LTE predicate on the "organization" field.
+func OrganizationLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationContains applies the Contains predicate on the "organization" field.
+func OrganizationContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationHasPrefix applies the HasPrefix predicate on the "organization" field.
+func OrganizationHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationHasSuffix applies the HasSuffix predicate on the "organization" field.
+func OrganizationHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationIsNil applies the IsNil predicate on the "organization" field.
+func OrganizationIsNil() predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldOrganization)))
+	})
+}
+
+// OrganizationNotNil applies the NotNil predicate on the "organization" field.
+func OrganizationNotNil() predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldOrganization)))
+	})
+}
+
+// OrganizationEqualFold applies the EqualFold predicate on the "organization" field.
+func OrganizationEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldOrganization), v))
+	})
+}
+
+// OrganizationContainsFold applies the ContainsFold predicate on the "organization" field.
+func OrganizationContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldOrganization), v))
+	})
+}
+
+// DeviceIdentifierEQ applies the EQ predicate on the "device_identifier" field.
+func DeviceIdentifierEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierNEQ applies the NEQ predicate on the "device_identifier" field.
+func DeviceIdentifierNEQ(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierIn applies the In predicate on the "device_identifier" field.
+func DeviceIdentifierIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.In(s.C(FieldDeviceIdentifier), v...))
+	})
+}
+
+// DeviceIdentifierNotIn applies the NotIn predicate on the "device_identifier" field.
+func DeviceIdentifierNotIn(vs ...string) predicate.GithubFlow {
+	v := make([]any, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotIn(s.C(FieldDeviceIdentifier), v...))
+	})
+}
+
+// DeviceIdentifierGT applies the GT predicate on the "device_identifier" field.
+func DeviceIdentifierGT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierGTE applies the GTE predicate on the "device_identifier" field.
+func DeviceIdentifierGTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierLT applies the LT predicate on the "device_identifier" field.
+func DeviceIdentifierLT(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierLTE applies the LTE predicate on the "device_identifier" field.
+func DeviceIdentifierLTE(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierContains applies the Contains predicate on the "device_identifier" field.
+func DeviceIdentifierContains(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierHasPrefix applies the HasPrefix predicate on the "device_identifier" field.
+func DeviceIdentifierHasPrefix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierHasSuffix applies the HasSuffix predicate on the "device_identifier" field.
+func DeviceIdentifierHasSuffix(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierIsNil applies the IsNil predicate on the "device_identifier" field.
+func DeviceIdentifierIsNil() predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldDeviceIdentifier)))
+	})
+}
+
+// DeviceIdentifierNotNil applies the NotNil predicate on the "device_identifier" field.
+func DeviceIdentifierNotNil() predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldDeviceIdentifier)))
+	})
+}
+
+// DeviceIdentifierEqualFold applies the EqualFold predicate on the "device_identifier" field.
+func DeviceIdentifierEqualFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// DeviceIdentifierContainsFold applies the ContainsFold predicate on the "device_identifier" field.
+func DeviceIdentifierContainsFold(v string) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldDeviceIdentifier), v))
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.GithubFlow) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for _, p := range predicates {
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.GithubFlow) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for i, p := range predicates {
+			if i > 0 {
+				s1.Or()
+			}
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.GithubFlow) predicate.GithubFlow {
+	return predicate.GithubFlow(func(s *sql.Selector) {
+		p(s.Not())
+	})
+}
diff --git a/internal/ent/githubflow_create.go b/internal/ent/githubflow_create.go
new file mode 100644
index 0000000..559489d
--- /dev/null
+++ b/internal/ent/githubflow_create.go
@@ -0,0 +1,365 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+)
+
+// GithubFlowCreate is the builder for creating a GithubFlow entity.
+type GithubFlowCreate struct {
+	config
+	mutation *GithubFlowMutation
+	hooks    []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (gfc *GithubFlowCreate) SetCreatedAt(t time.Time) *GithubFlowCreate {
+	gfc.mutation.SetCreatedAt(t)
+	return gfc
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (gfc *GithubFlowCreate) SetNillableCreatedAt(t *time.Time) *GithubFlowCreate {
+	if t != nil {
+		gfc.SetCreatedAt(*t)
+	}
+	return gfc
+}
+
+// SetState sets the "state" field.
+func (gfc *GithubFlowCreate) SetState(s string) *GithubFlowCreate {
+	gfc.mutation.SetState(s)
+	return gfc
+}
+
+// SetVerifier sets the "verifier" field.
+func (gfc *GithubFlowCreate) SetVerifier(s string) *GithubFlowCreate {
+	gfc.mutation.SetVerifier(s)
+	return gfc
+}
+
+// SetChallenge sets the "challenge" field.
+func (gfc *GithubFlowCreate) SetChallenge(s string) *GithubFlowCreate {
+	gfc.mutation.SetChallenge(s)
+	return gfc
+}
+
+// SetNextURL sets the "next_url" field.
+func (gfc *GithubFlowCreate) SetNextURL(s string) *GithubFlowCreate {
+	gfc.mutation.SetNextURL(s)
+	return gfc
+}
+
+// SetOrganization sets the "organization" field.
+func (gfc *GithubFlowCreate) SetOrganization(s string) *GithubFlowCreate {
+	gfc.mutation.SetOrganization(s)
+	return gfc
+}
+
+// SetNillableOrganization sets the "organization" field if the given value is not nil.
+func (gfc *GithubFlowCreate) SetNillableOrganization(s *string) *GithubFlowCreate {
+	if s != nil {
+		gfc.SetOrganization(*s)
+	}
+	return gfc
+}
+
+// SetDeviceIdentifier sets the "device_identifier" field.
+func (gfc *GithubFlowCreate) SetDeviceIdentifier(s string) *GithubFlowCreate {
+	gfc.mutation.SetDeviceIdentifier(s)
+	return gfc
+}
+
+// SetNillableDeviceIdentifier sets the "device_identifier" field if the given value is not nil.
+func (gfc *GithubFlowCreate) SetNillableDeviceIdentifier(s *string) *GithubFlowCreate {
+	if s != nil {
+		gfc.SetDeviceIdentifier(*s)
+	}
+	return gfc
+}
+
+// Mutation returns the GithubFlowMutation object of the builder.
+func (gfc *GithubFlowCreate) Mutation() *GithubFlowMutation {
+	return gfc.mutation
+}
+
+// Save creates the GithubFlow in the database.
+func (gfc *GithubFlowCreate) Save(ctx context.Context) (*GithubFlow, error) {
+	var (
+		err  error
+		node *GithubFlow
+	)
+	gfc.defaults()
+	if len(gfc.hooks) == 0 {
+		if err = gfc.check(); err != nil {
+			return nil, err
+		}
+		node, err = gfc.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*GithubFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			if err = gfc.check(); err != nil {
+				return nil, err
+			}
+			gfc.mutation = mutation
+			if node, err = gfc.sqlSave(ctx); err != nil {
+				return nil, err
+			}
+			mutation.id = &node.ID
+			mutation.done = true
+			return node, err
+		})
+		for i := len(gfc.hooks) - 1; i >= 0; i-- {
+			if gfc.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = gfc.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, gfc.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*GithubFlow)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from GithubFlowMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (gfc *GithubFlowCreate) SaveX(ctx context.Context) *GithubFlow {
+	v, err := gfc.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (gfc *GithubFlowCreate) Exec(ctx context.Context) error {
+	_, err := gfc.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfc *GithubFlowCreate) ExecX(ctx context.Context) {
+	if err := gfc.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (gfc *GithubFlowCreate) defaults() {
+	if _, ok := gfc.mutation.CreatedAt(); !ok {
+		v := githubflow.DefaultCreatedAt()
+		gfc.mutation.SetCreatedAt(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (gfc *GithubFlowCreate) check() error {
+	if _, ok := gfc.mutation.CreatedAt(); !ok {
+		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "GithubFlow.created_at"`)}
+	}
+	if _, ok := gfc.mutation.State(); !ok {
+		return &ValidationError{Name: "state", err: errors.New(`ent: missing required field "GithubFlow.state"`)}
+	}
+	if v, ok := gfc.mutation.State(); ok {
+		if err := githubflow.StateValidator(v); err != nil {
+			return &ValidationError{Name: "state", err: fmt.Errorf(`ent: validator failed for field "GithubFlow.state": %w`, err)}
+		}
+	}
+	if _, ok := gfc.mutation.Verifier(); !ok {
+		return &ValidationError{Name: "verifier", err: errors.New(`ent: missing required field "GithubFlow.verifier"`)}
+	}
+	if v, ok := gfc.mutation.Verifier(); ok {
+		if err := githubflow.VerifierValidator(v); err != nil {
+			return &ValidationError{Name: "verifier", err: fmt.Errorf(`ent: validator failed for field "GithubFlow.verifier": %w`, err)}
+		}
+	}
+	if _, ok := gfc.mutation.Challenge(); !ok {
+		return &ValidationError{Name: "challenge", err: errors.New(`ent: missing required field "GithubFlow.challenge"`)}
+	}
+	if v, ok := gfc.mutation.Challenge(); ok {
+		if err := githubflow.ChallengeValidator(v); err != nil {
+			return &ValidationError{Name: "challenge", err: fmt.Errorf(`ent: validator failed for field "GithubFlow.challenge": %w`, err)}
+		}
+	}
+	if _, ok := gfc.mutation.NextURL(); !ok {
+		return &ValidationError{Name: "next_url", err: errors.New(`ent: missing required field "GithubFlow.next_url"`)}
+	}
+	if v, ok := gfc.mutation.NextURL(); ok {
+		if err := githubflow.NextURLValidator(v); err != nil {
+			return &ValidationError{Name: "next_url", err: fmt.Errorf(`ent: validator failed for field "GithubFlow.next_url": %w`, err)}
+		}
+	}
+	return nil
+}
+
+func (gfc *GithubFlowCreate) sqlSave(ctx context.Context) (*GithubFlow, error) {
+	_node, _spec := gfc.createSpec()
+	if err := sqlgraph.CreateNode(ctx, gfc.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	id := _spec.ID.Value.(int64)
+	_node.ID = int(id)
+	return _node, nil
+}
+
+func (gfc *GithubFlowCreate) createSpec() (*GithubFlow, *sqlgraph.CreateSpec) {
+	var (
+		_node = &GithubFlow{config: gfc.config}
+		_spec = &sqlgraph.CreateSpec{
+			Table: githubflow.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: githubflow.FieldID,
+			},
+		}
+	)
+	if value, ok := gfc.mutation.CreatedAt(); ok {
+		_spec.SetField(githubflow.FieldCreatedAt, field.TypeTime, value)
+		_node.CreatedAt = value
+	}
+	if value, ok := gfc.mutation.State(); ok {
+		_spec.SetField(githubflow.FieldState, field.TypeString, value)
+		_node.State = value
+	}
+	if value, ok := gfc.mutation.Verifier(); ok {
+		_spec.SetField(githubflow.FieldVerifier, field.TypeString, value)
+		_node.Verifier = value
+	}
+	if value, ok := gfc.mutation.Challenge(); ok {
+		_spec.SetField(githubflow.FieldChallenge, field.TypeString, value)
+		_node.Challenge = value
+	}
+	if value, ok := gfc.mutation.NextURL(); ok {
+		_spec.SetField(githubflow.FieldNextURL, field.TypeString, value)
+		_node.NextURL = value
+	}
+	if value, ok := gfc.mutation.Organization(); ok {
+		_spec.SetField(githubflow.FieldOrganization, field.TypeString, value)
+		_node.Organization = value
+	}
+	if value, ok := gfc.mutation.DeviceIdentifier(); ok {
+		_spec.SetField(githubflow.FieldDeviceIdentifier, field.TypeString, value)
+		_node.DeviceIdentifier = value
+	}
+	return _node, _spec
+}
+
+// GithubFlowCreateBulk is the builder for creating many GithubFlow entities in bulk.
+type GithubFlowCreateBulk struct {
+	config
+	builders []*GithubFlowCreate
+}
+
+// Save creates the GithubFlow entities in the database.
+func (gfcb *GithubFlowCreateBulk) Save(ctx context.Context) ([]*GithubFlow, error) {
+	specs := make([]*sqlgraph.CreateSpec, len(gfcb.builders))
+	nodes := make([]*GithubFlow, len(gfcb.builders))
+	mutators := make([]Mutator, len(gfcb.builders))
+	for i := range gfcb.builders {
+		func(i int, root context.Context) {
+			builder := gfcb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*GithubFlowMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				nodes[i], specs[i] = builder.createSpec()
+				var err error
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, gfcb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, gfcb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{msg: err.Error(), wrap: err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				if specs[i].ID.Value != nil {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = int(id)
+				}
+				mutation.done = true
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, gfcb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (gfcb *GithubFlowCreateBulk) SaveX(ctx context.Context) []*GithubFlow {
+	v, err := gfcb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (gfcb *GithubFlowCreateBulk) Exec(ctx context.Context) error {
+	_, err := gfcb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfcb *GithubFlowCreateBulk) ExecX(ctx context.Context) {
+	if err := gfcb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
diff --git a/internal/ent/githubflow_delete.go b/internal/ent/githubflow_delete.go
new file mode 100644
index 0000000..5a06a5b
--- /dev/null
+++ b/internal/ent/githubflow_delete.go
@@ -0,0 +1,131 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// GithubFlowDelete is the builder for deleting a GithubFlow entity.
+type GithubFlowDelete struct {
+	config
+	hooks    []Hook
+	mutation *GithubFlowMutation
+}
+
+// Where appends a list predicates to the GithubFlowDelete builder.
+func (gfd *GithubFlowDelete) Where(ps ...predicate.GithubFlow) *GithubFlowDelete {
+	gfd.mutation.Where(ps...)
+	return gfd
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (gfd *GithubFlowDelete) Exec(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(gfd.hooks) == 0 {
+		affected, err = gfd.sqlExec(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*GithubFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			gfd.mutation = mutation
+			affected, err = gfd.sqlExec(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(gfd.hooks) - 1; i >= 0; i-- {
+			if gfd.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = gfd.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, gfd.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfd *GithubFlowDelete) ExecX(ctx context.Context) int {
+	n, err := gfd.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (gfd *GithubFlowDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := &sqlgraph.DeleteSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table: githubflow.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: githubflow.FieldID,
+			},
+		},
+	}
+	if ps := gfd.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	affected, err := sqlgraph.DeleteNodes(ctx, gfd.driver, _spec)
+	if err != nil && sqlgraph.IsConstraintError(err) {
+		err = &ConstraintError{msg: err.Error(), wrap: err}
+	}
+	return affected, err
+}
+
+// GithubFlowDeleteOne is the builder for deleting a single GithubFlow entity.
+type GithubFlowDeleteOne struct {
+	gfd *GithubFlowDelete
+}
+
+// Exec executes the deletion query.
+func (gfdo *GithubFlowDeleteOne) Exec(ctx context.Context) error {
+	n, err := gfdo.gfd.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{githubflow.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfdo *GithubFlowDeleteOne) ExecX(ctx context.Context) {
+	gfdo.gfd.ExecX(ctx)
+}
diff --git a/internal/ent/githubflow_query.go b/internal/ent/githubflow_query.go
new file mode 100644
index 0000000..a44ba8c
--- /dev/null
+++ b/internal/ent/githubflow_query.go
@@ -0,0 +1,564 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// GithubFlowQuery is the builder for querying GithubFlow entities.
+type GithubFlowQuery struct {
+	config
+	limit      *int
+	offset     *int
+	unique     *bool
+	order      []OrderFunc
+	fields     []string
+	predicates []predicate.GithubFlow
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the GithubFlowQuery builder.
+func (gfq *GithubFlowQuery) Where(ps ...predicate.GithubFlow) *GithubFlowQuery {
+	gfq.predicates = append(gfq.predicates, ps...)
+	return gfq
+}
+
+// Limit adds a limit step to the query.
+func (gfq *GithubFlowQuery) Limit(limit int) *GithubFlowQuery {
+	gfq.limit = &limit
+	return gfq
+}
+
+// Offset adds an offset step to the query.
+func (gfq *GithubFlowQuery) Offset(offset int) *GithubFlowQuery {
+	gfq.offset = &offset
+	return gfq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (gfq *GithubFlowQuery) Unique(unique bool) *GithubFlowQuery {
+	gfq.unique = &unique
+	return gfq
+}
+
+// Order adds an order step to the query.
+func (gfq *GithubFlowQuery) Order(o ...OrderFunc) *GithubFlowQuery {
+	gfq.order = append(gfq.order, o...)
+	return gfq
+}
+
+// First returns the first GithubFlow entity from the query.
+// Returns a *NotFoundError when no GithubFlow was found.
+func (gfq *GithubFlowQuery) First(ctx context.Context) (*GithubFlow, error) {
+	nodes, err := gfq.Limit(1).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{githubflow.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (gfq *GithubFlowQuery) FirstX(ctx context.Context) *GithubFlow {
+	node, err := gfq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first GithubFlow ID from the query.
+// Returns a *NotFoundError when no GithubFlow ID was found.
+func (gfq *GithubFlowQuery) FirstID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = gfq.Limit(1).IDs(ctx); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{githubflow.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (gfq *GithubFlowQuery) FirstIDX(ctx context.Context) int {
+	id, err := gfq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single GithubFlow entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one GithubFlow entity is found.
+// Returns a *NotFoundError when no GithubFlow entities are found.
+func (gfq *GithubFlowQuery) Only(ctx context.Context) (*GithubFlow, error) {
+	nodes, err := gfq.Limit(2).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{githubflow.Label}
+	default:
+		return nil, &NotSingularError{githubflow.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (gfq *GithubFlowQuery) OnlyX(ctx context.Context) *GithubFlow {
+	node, err := gfq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only GithubFlow ID in the query.
+// Returns a *NotSingularError when more than one GithubFlow ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (gfq *GithubFlowQuery) OnlyID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = gfq.Limit(2).IDs(ctx); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{githubflow.Label}
+	default:
+		err = &NotSingularError{githubflow.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (gfq *GithubFlowQuery) OnlyIDX(ctx context.Context) int {
+	id, err := gfq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of GithubFlows.
+func (gfq *GithubFlowQuery) All(ctx context.Context) ([]*GithubFlow, error) {
+	if err := gfq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	return gfq.sqlAll(ctx)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (gfq *GithubFlowQuery) AllX(ctx context.Context) []*GithubFlow {
+	nodes, err := gfq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of GithubFlow IDs.
+func (gfq *GithubFlowQuery) IDs(ctx context.Context) ([]int, error) {
+	var ids []int
+	if err := gfq.Select(githubflow.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (gfq *GithubFlowQuery) IDsX(ctx context.Context) []int {
+	ids, err := gfq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (gfq *GithubFlowQuery) Count(ctx context.Context) (int, error) {
+	if err := gfq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return gfq.sqlCount(ctx)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (gfq *GithubFlowQuery) CountX(ctx context.Context) int {
+	count, err := gfq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (gfq *GithubFlowQuery) Exist(ctx context.Context) (bool, error) {
+	if err := gfq.prepareQuery(ctx); err != nil {
+		return false, err
+	}
+	return gfq.sqlExist(ctx)
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (gfq *GithubFlowQuery) ExistX(ctx context.Context) bool {
+	exist, err := gfq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the GithubFlowQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (gfq *GithubFlowQuery) Clone() *GithubFlowQuery {
+	if gfq == nil {
+		return nil
+	}
+	return &GithubFlowQuery{
+		config:     gfq.config,
+		limit:      gfq.limit,
+		offset:     gfq.offset,
+		order:      append([]OrderFunc{}, gfq.order...),
+		predicates: append([]predicate.GithubFlow{}, gfq.predicates...),
+		// clone intermediate query.
+		sql:    gfq.sql.Clone(),
+		path:   gfq.path,
+		unique: gfq.unique,
+	}
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.GithubFlow.Query().
+//		GroupBy(githubflow.FieldCreatedAt).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+func (gfq *GithubFlowQuery) GroupBy(field string, fields ...string) *GithubFlowGroupBy {
+	grbuild := &GithubFlowGroupBy{config: gfq.config}
+	grbuild.fields = append([]string{field}, fields...)
+	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
+		if err := gfq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		return gfq.sqlQuery(ctx), nil
+	}
+	grbuild.label = githubflow.Label
+	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
+	return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//	}
+//
+//	client.GithubFlow.Query().
+//		Select(githubflow.FieldCreatedAt).
+//		Scan(ctx, &v)
+func (gfq *GithubFlowQuery) Select(fields ...string) *GithubFlowSelect {
+	gfq.fields = append(gfq.fields, fields...)
+	selbuild := &GithubFlowSelect{GithubFlowQuery: gfq}
+	selbuild.label = githubflow.Label
+	selbuild.flds, selbuild.scan = &gfq.fields, selbuild.Scan
+	return selbuild
+}
+
+// Aggregate returns a GithubFlowSelect configured with the given aggregations.
+func (gfq *GithubFlowQuery) Aggregate(fns ...AggregateFunc) *GithubFlowSelect {
+	return gfq.Select().Aggregate(fns...)
+}
+
+func (gfq *GithubFlowQuery) prepareQuery(ctx context.Context) error {
+	for _, f := range gfq.fields {
+		if !githubflow.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if gfq.path != nil {
+		prev, err := gfq.path(ctx)
+		if err != nil {
+			return err
+		}
+		gfq.sql = prev
+	}
+	return nil
+}
+
+func (gfq *GithubFlowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*GithubFlow, error) {
+	var (
+		nodes = []*GithubFlow{}
+		_spec = gfq.querySpec()
+	)
+	_spec.ScanValues = func(columns []string) ([]any, error) {
+		return (*GithubFlow).scanValues(nil, columns)
+	}
+	_spec.Assign = func(columns []string, values []any) error {
+		node := &GithubFlow{config: gfq.config}
+		nodes = append(nodes, node)
+		return node.assignValues(columns, values)
+	}
+	for i := range hooks {
+		hooks[i](ctx, _spec)
+	}
+	if err := sqlgraph.QueryNodes(ctx, gfq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	return nodes, nil
+}
+
+func (gfq *GithubFlowQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := gfq.querySpec()
+	_spec.Node.Columns = gfq.fields
+	if len(gfq.fields) > 0 {
+		_spec.Unique = gfq.unique != nil && *gfq.unique
+	}
+	return sqlgraph.CountNodes(ctx, gfq.driver, _spec)
+}
+
+func (gfq *GithubFlowQuery) sqlExist(ctx context.Context) (bool, error) {
+	switch _, err := gfq.FirstID(ctx); {
+	case IsNotFound(err):
+		return false, nil
+	case err != nil:
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	default:
+		return true, nil
+	}
+}
+
+func (gfq *GithubFlowQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := &sqlgraph.QuerySpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   githubflow.Table,
+			Columns: githubflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: githubflow.FieldID,
+			},
+		},
+		From:   gfq.sql,
+		Unique: true,
+	}
+	if unique := gfq.unique; unique != nil {
+		_spec.Unique = *unique
+	}
+	if fields := gfq.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, githubflow.FieldID)
+		for i := range fields {
+			if fields[i] != githubflow.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := gfq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := gfq.limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := gfq.offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := gfq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (gfq *GithubFlowQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(gfq.driver.Dialect())
+	t1 := builder.Table(githubflow.Table)
+	columns := gfq.fields
+	if len(columns) == 0 {
+		columns = githubflow.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if gfq.sql != nil {
+		selector = gfq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if gfq.unique != nil && *gfq.unique {
+		selector.Distinct()
+	}
+	for _, p := range gfq.predicates {
+		p(selector)
+	}
+	for _, p := range gfq.order {
+		p(selector)
+	}
+	if offset := gfq.offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := gfq.limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// GithubFlowGroupBy is the group-by builder for GithubFlow entities.
+type GithubFlowGroupBy struct {
+	config
+	selector
+	fields []string
+	fns    []AggregateFunc
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (gfgb *GithubFlowGroupBy) Aggregate(fns ...AggregateFunc) *GithubFlowGroupBy {
+	gfgb.fns = append(gfgb.fns, fns...)
+	return gfgb
+}
+
+// Scan applies the group-by query and scans the result into the given value.
+func (gfgb *GithubFlowGroupBy) Scan(ctx context.Context, v any) error {
+	query, err := gfgb.path(ctx)
+	if err != nil {
+		return err
+	}
+	gfgb.sql = query
+	return gfgb.sqlScan(ctx, v)
+}
+
+func (gfgb *GithubFlowGroupBy) sqlScan(ctx context.Context, v any) error {
+	for _, f := range gfgb.fields {
+		if !githubflow.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
+		}
+	}
+	selector := gfgb.sqlQuery()
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := gfgb.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+func (gfgb *GithubFlowGroupBy) sqlQuery() *sql.Selector {
+	selector := gfgb.sql.Select()
+	aggregation := make([]string, 0, len(gfgb.fns))
+	for _, fn := range gfgb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(gfgb.fields)+len(gfgb.fns))
+		for _, f := range gfgb.fields {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	return selector.GroupBy(selector.Columns(gfgb.fields...)...)
+}
+
+// GithubFlowSelect is the builder for selecting fields of GithubFlow entities.
+type GithubFlowSelect struct {
+	*GithubFlowQuery
+	selector
+	// intermediate query (i.e. traversal path).
+	sql *sql.Selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (gfs *GithubFlowSelect) Aggregate(fns ...AggregateFunc) *GithubFlowSelect {
+	gfs.fns = append(gfs.fns, fns...)
+	return gfs
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (gfs *GithubFlowSelect) Scan(ctx context.Context, v any) error {
+	if err := gfs.prepareQuery(ctx); err != nil {
+		return err
+	}
+	gfs.sql = gfs.GithubFlowQuery.sqlQuery(ctx)
+	return gfs.sqlScan(ctx, v)
+}
+
+func (gfs *GithubFlowSelect) sqlScan(ctx context.Context, v any) error {
+	aggregation := make([]string, 0, len(gfs.fns))
+	for _, fn := range gfs.fns {
+		aggregation = append(aggregation, fn(gfs.sql))
+	}
+	switch n := len(*gfs.selector.flds); {
+	case n == 0 && len(aggregation) > 0:
+		gfs.sql.Select(aggregation...)
+	case n != 0 && len(aggregation) > 0:
+		gfs.sql.AppendSelect(aggregation...)
+	}
+	rows := &sql.Rows{}
+	query, args := gfs.sql.Query()
+	if err := gfs.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
diff --git a/internal/ent/githubflow_update.go b/internal/ent/githubflow_update.go
new file mode 100644
index 0000000..e59269a
--- /dev/null
+++ b/internal/ent/githubflow_update.go
@@ -0,0 +1,273 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+)
+
+// GithubFlowUpdate is the builder for updating GithubFlow entities.
+type GithubFlowUpdate struct {
+	config
+	hooks    []Hook
+	mutation *GithubFlowMutation
+}
+
+// Where appends a list predicates to the GithubFlowUpdate builder.
+func (gfu *GithubFlowUpdate) Where(ps ...predicate.GithubFlow) *GithubFlowUpdate {
+	gfu.mutation.Where(ps...)
+	return gfu
+}
+
+// Mutation returns the GithubFlowMutation object of the builder.
+func (gfu *GithubFlowUpdate) Mutation() *GithubFlowMutation {
+	return gfu.mutation
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (gfu *GithubFlowUpdate) Save(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(gfu.hooks) == 0 {
+		affected, err = gfu.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*GithubFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			gfu.mutation = mutation
+			affected, err = gfu.sqlSave(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(gfu.hooks) - 1; i >= 0; i-- {
+			if gfu.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = gfu.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, gfu.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (gfu *GithubFlowUpdate) SaveX(ctx context.Context) int {
+	affected, err := gfu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (gfu *GithubFlowUpdate) Exec(ctx context.Context) error {
+	_, err := gfu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfu *GithubFlowUpdate) ExecX(ctx context.Context) {
+	if err := gfu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (gfu *GithubFlowUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   githubflow.Table,
+			Columns: githubflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: githubflow.FieldID,
+			},
+		},
+	}
+	if ps := gfu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if gfu.mutation.OrganizationCleared() {
+		_spec.ClearField(githubflow.FieldOrganization, field.TypeString)
+	}
+	if gfu.mutation.DeviceIdentifierCleared() {
+		_spec.ClearField(githubflow.FieldDeviceIdentifier, field.TypeString)
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, gfu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{githubflow.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return 0, err
+	}
+	return n, nil
+}
+
+// GithubFlowUpdateOne is the builder for updating a single GithubFlow entity.
+type GithubFlowUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *GithubFlowMutation
+}
+
+// Mutation returns the GithubFlowMutation object of the builder.
+func (gfuo *GithubFlowUpdateOne) Mutation() *GithubFlowMutation {
+	return gfuo.mutation
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (gfuo *GithubFlowUpdateOne) Select(field string, fields ...string) *GithubFlowUpdateOne {
+	gfuo.fields = append([]string{field}, fields...)
+	return gfuo
+}
+
+// Save executes the query and returns the updated GithubFlow entity.
+func (gfuo *GithubFlowUpdateOne) Save(ctx context.Context) (*GithubFlow, error) {
+	var (
+		err  error
+		node *GithubFlow
+	)
+	if len(gfuo.hooks) == 0 {
+		node, err = gfuo.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*GithubFlowMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			gfuo.mutation = mutation
+			node, err = gfuo.sqlSave(ctx)
+			mutation.done = true
+			return node, err
+		})
+		for i := len(gfuo.hooks) - 1; i >= 0; i-- {
+			if gfuo.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = gfuo.hooks[i](mut)
+		}
+		v, err := mut.Mutate(ctx, gfuo.mutation)
+		if err != nil {
+			return nil, err
+		}
+		nv, ok := v.(*GithubFlow)
+		if !ok {
+			return nil, fmt.Errorf("unexpected node type %T returned from GithubFlowMutation", v)
+		}
+		node = nv
+	}
+	return node, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (gfuo *GithubFlowUpdateOne) SaveX(ctx context.Context) *GithubFlow {
+	node, err := gfuo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (gfuo *GithubFlowUpdateOne) Exec(ctx context.Context) error {
+	_, err := gfuo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (gfuo *GithubFlowUpdateOne) ExecX(ctx context.Context) {
+	if err := gfuo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (gfuo *GithubFlowUpdateOne) sqlSave(ctx context.Context) (_node *GithubFlow, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   githubflow.Table,
+			Columns: githubflow.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: githubflow.FieldID,
+			},
+		},
+	}
+	id, ok := gfuo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "GithubFlow.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := gfuo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, githubflow.FieldID)
+		for _, f := range fields {
+			if !githubflow.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != githubflow.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := gfuo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if gfuo.mutation.OrganizationCleared() {
+		_spec.ClearField(githubflow.FieldOrganization, field.TypeString)
+	}
+	if gfuo.mutation.DeviceIdentifierCleared() {
+		_spec.ClearField(githubflow.FieldDeviceIdentifier, field.TypeString)
+	}
+	_node = &GithubFlow{config: gfuo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, gfuo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{githubflow.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	return _node, nil
+}
diff --git a/pkg/storage/default/ent/hook/hook.go b/internal/ent/hook/hook.go
similarity index 79%
rename from pkg/storage/default/ent/hook/hook.go
rename to internal/ent/hook/hook.go
index 4ba2f31..f1493f8 100644
--- a/pkg/storage/default/ent/hook/hook.go
+++ b/internal/ent/hook/hook.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package hook
@@ -6,44 +22,31 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/loopholelabs/auth/pkg/storage/default/ent"
+	"github.com/loopholelabs/auth/internal/ent"
 )
 
-// The APIKeyFunc type is an adapter to allow the use of ordinary
-// function as APIKey mutator.
-type APIKeyFunc func(context.Context, *ent.APIKeyMutation) (ent.Value, error)
-
-// Mutate calls f(ctx, m).
-func (f APIKeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
-	mv, ok := m.(*ent.APIKeyMutation)
-	if !ok {
-		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.APIKeyMutation", m)
-	}
-	return f(ctx, mv)
-}
-
-// The ServiceKeyFunc type is an adapter to allow the use of ordinary
-// function as ServiceKey mutator.
-type ServiceKeyFunc func(context.Context, *ent.ServiceKeyMutation) (ent.Value, error)
+// The DeviceFlowFunc type is an adapter to allow the use of ordinary
+// function as DeviceFlow mutator.
+type DeviceFlowFunc func(context.Context, *ent.DeviceFlowMutation) (ent.Value, error)
 
 // Mutate calls f(ctx, m).
-func (f ServiceKeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
-	mv, ok := m.(*ent.ServiceKeyMutation)
+func (f DeviceFlowFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	mv, ok := m.(*ent.DeviceFlowMutation)
 	if !ok {
-		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ServiceKeyMutation", m)
+		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DeviceFlowMutation", m)
 	}
 	return f(ctx, mv)
 }
 
-// The UserFunc type is an adapter to allow the use of ordinary
-// function as User mutator.
-type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
+// The GithubFlowFunc type is an adapter to allow the use of ordinary
+// function as GithubFlow mutator.
+type GithubFlowFunc func(context.Context, *ent.GithubFlowMutation) (ent.Value, error)
 
 // Mutate calls f(ctx, m).
-func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
-	mv, ok := m.(*ent.UserMutation)
+func (f GithubFlowFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	mv, ok := m.(*ent.GithubFlowMutation)
 	if !ok {
-		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
+		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GithubFlowMutation", m)
 	}
 	return f(ctx, mv)
 }
diff --git a/pkg/storage/default/ent/migrate/migrate.go b/internal/ent/migrate/migrate.go
similarity index 80%
rename from pkg/storage/default/ent/migrate/migrate.go
rename to internal/ent/migrate/migrate.go
index 1956a6b..fbf7bb6 100644
--- a/pkg/storage/default/ent/migrate/migrate.go
+++ b/internal/ent/migrate/migrate.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package migrate
diff --git a/internal/ent/migrate/schema.go b/internal/ent/migrate/schema.go
new file mode 100644
index 0000000..aa41a58
--- /dev/null
+++ b/internal/ent/migrate/schema.go
@@ -0,0 +1,69 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package migrate
+
+import (
+	"entgo.io/ent/dialect/sql/schema"
+	"entgo.io/ent/schema/field"
+)
+
+var (
+	// DeviceFlowsColumns holds the columns for the "device_flows" table.
+	DeviceFlowsColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeInt, Increment: true},
+		{Name: "created_at", Type: field.TypeTime},
+		{Name: "last_poll", Type: field.TypeTime},
+		{Name: "identifier", Type: field.TypeString, Unique: true},
+		{Name: "device_code", Type: field.TypeString, Unique: true},
+		{Name: "user_code", Type: field.TypeString, Unique: true},
+		{Name: "session", Type: field.TypeString, Unique: true, Nullable: true},
+		{Name: "expires_at", Type: field.TypeTime, Nullable: true},
+	}
+	// DeviceFlowsTable holds the schema information for the "device_flows" table.
+	DeviceFlowsTable = &schema.Table{
+		Name:       "device_flows",
+		Columns:    DeviceFlowsColumns,
+		PrimaryKey: []*schema.Column{DeviceFlowsColumns[0]},
+	}
+	// GithubFlowsColumns holds the columns for the "github_flows" table.
+	GithubFlowsColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeInt, Increment: true},
+		{Name: "created_at", Type: field.TypeTime},
+		{Name: "state", Type: field.TypeString, Unique: true},
+		{Name: "verifier", Type: field.TypeString, Unique: true},
+		{Name: "challenge", Type: field.TypeString, Unique: true},
+		{Name: "next_url", Type: field.TypeString},
+		{Name: "organization", Type: field.TypeString, Nullable: true},
+		{Name: "device_identifier", Type: field.TypeString, Unique: true, Nullable: true},
+	}
+	// GithubFlowsTable holds the schema information for the "github_flows" table.
+	GithubFlowsTable = &schema.Table{
+		Name:       "github_flows",
+		Columns:    GithubFlowsColumns,
+		PrimaryKey: []*schema.Column{GithubFlowsColumns[0]},
+	}
+	// Tables holds all the tables in the schema.
+	Tables = []*schema.Table{
+		DeviceFlowsTable,
+		GithubFlowsTable,
+	}
+)
+
+func init() {
+}
diff --git a/internal/ent/mutation.go b/internal/ent/mutation.go
new file mode 100644
index 0000000..f4865e4
--- /dev/null
+++ b/internal/ent/mutation.go
@@ -0,0 +1,1398 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/internal/ent/predicate"
+
+	"entgo.io/ent"
+)
+
+const (
+	// Operation types.
+	OpCreate    = ent.OpCreate
+	OpDelete    = ent.OpDelete
+	OpDeleteOne = ent.OpDeleteOne
+	OpUpdate    = ent.OpUpdate
+	OpUpdateOne = ent.OpUpdateOne
+
+	// Node types.
+	TypeDeviceFlow = "DeviceFlow"
+	TypeGithubFlow = "GithubFlow"
+)
+
+// DeviceFlowMutation represents an operation that mutates the DeviceFlow nodes in the graph.
+type DeviceFlowMutation struct {
+	config
+	op            Op
+	typ           string
+	id            *int
+	created_at    *time.Time
+	last_poll     *time.Time
+	identifier    *string
+	device_code   *string
+	user_code     *string
+	session       *string
+	expires_at    *time.Time
+	clearedFields map[string]struct{}
+	done          bool
+	oldValue      func(context.Context) (*DeviceFlow, error)
+	predicates    []predicate.DeviceFlow
+}
+
+var _ ent.Mutation = (*DeviceFlowMutation)(nil)
+
+// deviceflowOption allows management of the mutation configuration using functional options.
+type deviceflowOption func(*DeviceFlowMutation)
+
+// newDeviceFlowMutation creates new mutation for the DeviceFlow entity.
+func newDeviceFlowMutation(c config, op Op, opts ...deviceflowOption) *DeviceFlowMutation {
+	m := &DeviceFlowMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeDeviceFlow,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withDeviceFlowID sets the ID field of the mutation.
+func withDeviceFlowID(id int) deviceflowOption {
+	return func(m *DeviceFlowMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *DeviceFlow
+		)
+		m.oldValue = func(ctx context.Context) (*DeviceFlow, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().DeviceFlow.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withDeviceFlow sets the old DeviceFlow of the mutation.
+func withDeviceFlow(node *DeviceFlow) deviceflowOption {
+	return func(m *DeviceFlowMutation) {
+		m.oldValue = func(context.Context) (*DeviceFlow, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m DeviceFlowMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m DeviceFlowMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *DeviceFlowMutation) ID() (id int, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *DeviceFlowMutation) IDs(ctx context.Context) ([]int, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []int{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().DeviceFlow.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *DeviceFlowMutation) SetCreatedAt(t time.Time) {
+	m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *DeviceFlowMutation) CreatedAt() (r time.Time, exists bool) {
+	v := m.created_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+	}
+	return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *DeviceFlowMutation) ResetCreatedAt() {
+	m.created_at = nil
+}
+
+// SetLastPoll sets the "last_poll" field.
+func (m *DeviceFlowMutation) SetLastPoll(t time.Time) {
+	m.last_poll = &t
+}
+
+// LastPoll returns the value of the "last_poll" field in the mutation.
+func (m *DeviceFlowMutation) LastPoll() (r time.Time, exists bool) {
+	v := m.last_poll
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldLastPoll returns the old "last_poll" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldLastPoll(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldLastPoll is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldLastPoll requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldLastPoll: %w", err)
+	}
+	return oldValue.LastPoll, nil
+}
+
+// ResetLastPoll resets all changes to the "last_poll" field.
+func (m *DeviceFlowMutation) ResetLastPoll() {
+	m.last_poll = nil
+}
+
+// SetIdentifier sets the "identifier" field.
+func (m *DeviceFlowMutation) SetIdentifier(s string) {
+	m.identifier = &s
+}
+
+// Identifier returns the value of the "identifier" field in the mutation.
+func (m *DeviceFlowMutation) Identifier() (r string, exists bool) {
+	v := m.identifier
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldIdentifier returns the old "identifier" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldIdentifier(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldIdentifier is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldIdentifier requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldIdentifier: %w", err)
+	}
+	return oldValue.Identifier, nil
+}
+
+// ResetIdentifier resets all changes to the "identifier" field.
+func (m *DeviceFlowMutation) ResetIdentifier() {
+	m.identifier = nil
+}
+
+// SetDeviceCode sets the "device_code" field.
+func (m *DeviceFlowMutation) SetDeviceCode(s string) {
+	m.device_code = &s
+}
+
+// DeviceCode returns the value of the "device_code" field in the mutation.
+func (m *DeviceFlowMutation) DeviceCode() (r string, exists bool) {
+	v := m.device_code
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldDeviceCode returns the old "device_code" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldDeviceCode(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldDeviceCode requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err)
+	}
+	return oldValue.DeviceCode, nil
+}
+
+// ResetDeviceCode resets all changes to the "device_code" field.
+func (m *DeviceFlowMutation) ResetDeviceCode() {
+	m.device_code = nil
+}
+
+// SetUserCode sets the "user_code" field.
+func (m *DeviceFlowMutation) SetUserCode(s string) {
+	m.user_code = &s
+}
+
+// UserCode returns the value of the "user_code" field in the mutation.
+func (m *DeviceFlowMutation) UserCode() (r string, exists bool) {
+	v := m.user_code
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldUserCode returns the old "user_code" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldUserCode(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldUserCode is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldUserCode requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldUserCode: %w", err)
+	}
+	return oldValue.UserCode, nil
+}
+
+// ResetUserCode resets all changes to the "user_code" field.
+func (m *DeviceFlowMutation) ResetUserCode() {
+	m.user_code = nil
+}
+
+// SetSession sets the "session" field.
+func (m *DeviceFlowMutation) SetSession(s string) {
+	m.session = &s
+}
+
+// Session returns the value of the "session" field in the mutation.
+func (m *DeviceFlowMutation) Session() (r string, exists bool) {
+	v := m.session
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldSession returns the old "session" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldSession(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldSession is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldSession requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldSession: %w", err)
+	}
+	return oldValue.Session, nil
+}
+
+// ClearSession clears the value of the "session" field.
+func (m *DeviceFlowMutation) ClearSession() {
+	m.session = nil
+	m.clearedFields[deviceflow.FieldSession] = struct{}{}
+}
+
+// SessionCleared returns if the "session" field was cleared in this mutation.
+func (m *DeviceFlowMutation) SessionCleared() bool {
+	_, ok := m.clearedFields[deviceflow.FieldSession]
+	return ok
+}
+
+// ResetSession resets all changes to the "session" field.
+func (m *DeviceFlowMutation) ResetSession() {
+	m.session = nil
+	delete(m.clearedFields, deviceflow.FieldSession)
+}
+
+// SetExpiresAt sets the "expires_at" field.
+func (m *DeviceFlowMutation) SetExpiresAt(t time.Time) {
+	m.expires_at = &t
+}
+
+// ExpiresAt returns the value of the "expires_at" field in the mutation.
+func (m *DeviceFlowMutation) ExpiresAt() (r time.Time, exists bool) {
+	v := m.expires_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldExpiresAt returns the old "expires_at" field's value of the DeviceFlow entity.
+// If the DeviceFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DeviceFlowMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldExpiresAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err)
+	}
+	return oldValue.ExpiresAt, nil
+}
+
+// ClearExpiresAt clears the value of the "expires_at" field.
+func (m *DeviceFlowMutation) ClearExpiresAt() {
+	m.expires_at = nil
+	m.clearedFields[deviceflow.FieldExpiresAt] = struct{}{}
+}
+
+// ExpiresAtCleared returns if the "expires_at" field was cleared in this mutation.
+func (m *DeviceFlowMutation) ExpiresAtCleared() bool {
+	_, ok := m.clearedFields[deviceflow.FieldExpiresAt]
+	return ok
+}
+
+// ResetExpiresAt resets all changes to the "expires_at" field.
+func (m *DeviceFlowMutation) ResetExpiresAt() {
+	m.expires_at = nil
+	delete(m.clearedFields, deviceflow.FieldExpiresAt)
+}
+
+// Where appends a list predicates to the DeviceFlowMutation builder.
+func (m *DeviceFlowMutation) Where(ps ...predicate.DeviceFlow) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// Op returns the operation name.
+func (m *DeviceFlowMutation) Op() Op {
+	return m.op
+}
+
+// Type returns the node type of this mutation (DeviceFlow).
+func (m *DeviceFlowMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *DeviceFlowMutation) Fields() []string {
+	fields := make([]string, 0, 7)
+	if m.created_at != nil {
+		fields = append(fields, deviceflow.FieldCreatedAt)
+	}
+	if m.last_poll != nil {
+		fields = append(fields, deviceflow.FieldLastPoll)
+	}
+	if m.identifier != nil {
+		fields = append(fields, deviceflow.FieldIdentifier)
+	}
+	if m.device_code != nil {
+		fields = append(fields, deviceflow.FieldDeviceCode)
+	}
+	if m.user_code != nil {
+		fields = append(fields, deviceflow.FieldUserCode)
+	}
+	if m.session != nil {
+		fields = append(fields, deviceflow.FieldSession)
+	}
+	if m.expires_at != nil {
+		fields = append(fields, deviceflow.FieldExpiresAt)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *DeviceFlowMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case deviceflow.FieldCreatedAt:
+		return m.CreatedAt()
+	case deviceflow.FieldLastPoll:
+		return m.LastPoll()
+	case deviceflow.FieldIdentifier:
+		return m.Identifier()
+	case deviceflow.FieldDeviceCode:
+		return m.DeviceCode()
+	case deviceflow.FieldUserCode:
+		return m.UserCode()
+	case deviceflow.FieldSession:
+		return m.Session()
+	case deviceflow.FieldExpiresAt:
+		return m.ExpiresAt()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *DeviceFlowMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case deviceflow.FieldCreatedAt:
+		return m.OldCreatedAt(ctx)
+	case deviceflow.FieldLastPoll:
+		return m.OldLastPoll(ctx)
+	case deviceflow.FieldIdentifier:
+		return m.OldIdentifier(ctx)
+	case deviceflow.FieldDeviceCode:
+		return m.OldDeviceCode(ctx)
+	case deviceflow.FieldUserCode:
+		return m.OldUserCode(ctx)
+	case deviceflow.FieldSession:
+		return m.OldSession(ctx)
+	case deviceflow.FieldExpiresAt:
+		return m.OldExpiresAt(ctx)
+	}
+	return nil, fmt.Errorf("unknown DeviceFlow field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *DeviceFlowMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case deviceflow.FieldCreatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCreatedAt(v)
+		return nil
+	case deviceflow.FieldLastPoll:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetLastPoll(v)
+		return nil
+	case deviceflow.FieldIdentifier:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetIdentifier(v)
+		return nil
+	case deviceflow.FieldDeviceCode:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetDeviceCode(v)
+		return nil
+	case deviceflow.FieldUserCode:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetUserCode(v)
+		return nil
+	case deviceflow.FieldSession:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetSession(v)
+		return nil
+	case deviceflow.FieldExpiresAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetExpiresAt(v)
+		return nil
+	}
+	return fmt.Errorf("unknown DeviceFlow field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *DeviceFlowMutation) AddedFields() []string {
+	return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *DeviceFlowMutation) AddedField(name string) (ent.Value, bool) {
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *DeviceFlowMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	}
+	return fmt.Errorf("unknown DeviceFlow numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *DeviceFlowMutation) ClearedFields() []string {
+	var fields []string
+	if m.FieldCleared(deviceflow.FieldSession) {
+		fields = append(fields, deviceflow.FieldSession)
+	}
+	if m.FieldCleared(deviceflow.FieldExpiresAt) {
+		fields = append(fields, deviceflow.FieldExpiresAt)
+	}
+	return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *DeviceFlowMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *DeviceFlowMutation) ClearField(name string) error {
+	switch name {
+	case deviceflow.FieldSession:
+		m.ClearSession()
+		return nil
+	case deviceflow.FieldExpiresAt:
+		m.ClearExpiresAt()
+		return nil
+	}
+	return fmt.Errorf("unknown DeviceFlow nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *DeviceFlowMutation) ResetField(name string) error {
+	switch name {
+	case deviceflow.FieldCreatedAt:
+		m.ResetCreatedAt()
+		return nil
+	case deviceflow.FieldLastPoll:
+		m.ResetLastPoll()
+		return nil
+	case deviceflow.FieldIdentifier:
+		m.ResetIdentifier()
+		return nil
+	case deviceflow.FieldDeviceCode:
+		m.ResetDeviceCode()
+		return nil
+	case deviceflow.FieldUserCode:
+		m.ResetUserCode()
+		return nil
+	case deviceflow.FieldSession:
+		m.ResetSession()
+		return nil
+	case deviceflow.FieldExpiresAt:
+		m.ResetExpiresAt()
+		return nil
+	}
+	return fmt.Errorf("unknown DeviceFlow field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *DeviceFlowMutation) AddedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *DeviceFlowMutation) AddedIDs(name string) []ent.Value {
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *DeviceFlowMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *DeviceFlowMutation) RemovedIDs(name string) []ent.Value {
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *DeviceFlowMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *DeviceFlowMutation) EdgeCleared(name string) bool {
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *DeviceFlowMutation) ClearEdge(name string) error {
+	return fmt.Errorf("unknown DeviceFlow unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *DeviceFlowMutation) ResetEdge(name string) error {
+	return fmt.Errorf("unknown DeviceFlow edge %s", name)
+}
+
+// GithubFlowMutation represents an operation that mutates the GithubFlow nodes in the graph.
+type GithubFlowMutation struct {
+	config
+	op                Op
+	typ               string
+	id                *int
+	created_at        *time.Time
+	state             *string
+	verifier          *string
+	challenge         *string
+	next_url          *string
+	organization      *string
+	device_identifier *string
+	clearedFields     map[string]struct{}
+	done              bool
+	oldValue          func(context.Context) (*GithubFlow, error)
+	predicates        []predicate.GithubFlow
+}
+
+var _ ent.Mutation = (*GithubFlowMutation)(nil)
+
+// githubflowOption allows management of the mutation configuration using functional options.
+type githubflowOption func(*GithubFlowMutation)
+
+// newGithubFlowMutation creates new mutation for the GithubFlow entity.
+func newGithubFlowMutation(c config, op Op, opts ...githubflowOption) *GithubFlowMutation {
+	m := &GithubFlowMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeGithubFlow,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withGithubFlowID sets the ID field of the mutation.
+func withGithubFlowID(id int) githubflowOption {
+	return func(m *GithubFlowMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *GithubFlow
+		)
+		m.oldValue = func(ctx context.Context) (*GithubFlow, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().GithubFlow.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withGithubFlow sets the old GithubFlow of the mutation.
+func withGithubFlow(node *GithubFlow) githubflowOption {
+	return func(m *GithubFlowMutation) {
+		m.oldValue = func(context.Context) (*GithubFlow, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m GithubFlowMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m GithubFlowMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *GithubFlowMutation) ID() (id int, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *GithubFlowMutation) IDs(ctx context.Context) ([]int, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []int{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().GithubFlow.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *GithubFlowMutation) SetCreatedAt(t time.Time) {
+	m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *GithubFlowMutation) CreatedAt() (r time.Time, exists bool) {
+	v := m.created_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+	}
+	return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *GithubFlowMutation) ResetCreatedAt() {
+	m.created_at = nil
+}
+
+// SetState sets the "state" field.
+func (m *GithubFlowMutation) SetState(s string) {
+	m.state = &s
+}
+
+// State returns the value of the "state" field in the mutation.
+func (m *GithubFlowMutation) State() (r string, exists bool) {
+	v := m.state
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldState returns the old "state" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldState(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldState is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldState requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldState: %w", err)
+	}
+	return oldValue.State, nil
+}
+
+// ResetState resets all changes to the "state" field.
+func (m *GithubFlowMutation) ResetState() {
+	m.state = nil
+}
+
+// SetVerifier sets the "verifier" field.
+func (m *GithubFlowMutation) SetVerifier(s string) {
+	m.verifier = &s
+}
+
+// Verifier returns the value of the "verifier" field in the mutation.
+func (m *GithubFlowMutation) Verifier() (r string, exists bool) {
+	v := m.verifier
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldVerifier returns the old "verifier" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldVerifier(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldVerifier is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldVerifier requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldVerifier: %w", err)
+	}
+	return oldValue.Verifier, nil
+}
+
+// ResetVerifier resets all changes to the "verifier" field.
+func (m *GithubFlowMutation) ResetVerifier() {
+	m.verifier = nil
+}
+
+// SetChallenge sets the "challenge" field.
+func (m *GithubFlowMutation) SetChallenge(s string) {
+	m.challenge = &s
+}
+
+// Challenge returns the value of the "challenge" field in the mutation.
+func (m *GithubFlowMutation) Challenge() (r string, exists bool) {
+	v := m.challenge
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldChallenge returns the old "challenge" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldChallenge(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldChallenge is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldChallenge requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldChallenge: %w", err)
+	}
+	return oldValue.Challenge, nil
+}
+
+// ResetChallenge resets all changes to the "challenge" field.
+func (m *GithubFlowMutation) ResetChallenge() {
+	m.challenge = nil
+}
+
+// SetNextURL sets the "next_url" field.
+func (m *GithubFlowMutation) SetNextURL(s string) {
+	m.next_url = &s
+}
+
+// NextURL returns the value of the "next_url" field in the mutation.
+func (m *GithubFlowMutation) NextURL() (r string, exists bool) {
+	v := m.next_url
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldNextURL returns the old "next_url" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldNextURL(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldNextURL is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldNextURL requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldNextURL: %w", err)
+	}
+	return oldValue.NextURL, nil
+}
+
+// ResetNextURL resets all changes to the "next_url" field.
+func (m *GithubFlowMutation) ResetNextURL() {
+	m.next_url = nil
+}
+
+// SetOrganization sets the "organization" field.
+func (m *GithubFlowMutation) SetOrganization(s string) {
+	m.organization = &s
+}
+
+// Organization returns the value of the "organization" field in the mutation.
+func (m *GithubFlowMutation) Organization() (r string, exists bool) {
+	v := m.organization
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldOrganization returns the old "organization" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldOrganization(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldOrganization is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldOrganization requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldOrganization: %w", err)
+	}
+	return oldValue.Organization, nil
+}
+
+// ClearOrganization clears the value of the "organization" field.
+func (m *GithubFlowMutation) ClearOrganization() {
+	m.organization = nil
+	m.clearedFields[githubflow.FieldOrganization] = struct{}{}
+}
+
+// OrganizationCleared returns if the "organization" field was cleared in this mutation.
+func (m *GithubFlowMutation) OrganizationCleared() bool {
+	_, ok := m.clearedFields[githubflow.FieldOrganization]
+	return ok
+}
+
+// ResetOrganization resets all changes to the "organization" field.
+func (m *GithubFlowMutation) ResetOrganization() {
+	m.organization = nil
+	delete(m.clearedFields, githubflow.FieldOrganization)
+}
+
+// SetDeviceIdentifier sets the "device_identifier" field.
+func (m *GithubFlowMutation) SetDeviceIdentifier(s string) {
+	m.device_identifier = &s
+}
+
+// DeviceIdentifier returns the value of the "device_identifier" field in the mutation.
+func (m *GithubFlowMutation) DeviceIdentifier() (r string, exists bool) {
+	v := m.device_identifier
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldDeviceIdentifier returns the old "device_identifier" field's value of the GithubFlow entity.
+// If the GithubFlow object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *GithubFlowMutation) OldDeviceIdentifier(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldDeviceIdentifier is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldDeviceIdentifier requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldDeviceIdentifier: %w", err)
+	}
+	return oldValue.DeviceIdentifier, nil
+}
+
+// ClearDeviceIdentifier clears the value of the "device_identifier" field.
+func (m *GithubFlowMutation) ClearDeviceIdentifier() {
+	m.device_identifier = nil
+	m.clearedFields[githubflow.FieldDeviceIdentifier] = struct{}{}
+}
+
+// DeviceIdentifierCleared returns if the "device_identifier" field was cleared in this mutation.
+func (m *GithubFlowMutation) DeviceIdentifierCleared() bool {
+	_, ok := m.clearedFields[githubflow.FieldDeviceIdentifier]
+	return ok
+}
+
+// ResetDeviceIdentifier resets all changes to the "device_identifier" field.
+func (m *GithubFlowMutation) ResetDeviceIdentifier() {
+	m.device_identifier = nil
+	delete(m.clearedFields, githubflow.FieldDeviceIdentifier)
+}
+
+// Where appends a list predicates to the GithubFlowMutation builder.
+func (m *GithubFlowMutation) Where(ps ...predicate.GithubFlow) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// Op returns the operation name.
+func (m *GithubFlowMutation) Op() Op {
+	return m.op
+}
+
+// Type returns the node type of this mutation (GithubFlow).
+func (m *GithubFlowMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *GithubFlowMutation) Fields() []string {
+	fields := make([]string, 0, 7)
+	if m.created_at != nil {
+		fields = append(fields, githubflow.FieldCreatedAt)
+	}
+	if m.state != nil {
+		fields = append(fields, githubflow.FieldState)
+	}
+	if m.verifier != nil {
+		fields = append(fields, githubflow.FieldVerifier)
+	}
+	if m.challenge != nil {
+		fields = append(fields, githubflow.FieldChallenge)
+	}
+	if m.next_url != nil {
+		fields = append(fields, githubflow.FieldNextURL)
+	}
+	if m.organization != nil {
+		fields = append(fields, githubflow.FieldOrganization)
+	}
+	if m.device_identifier != nil {
+		fields = append(fields, githubflow.FieldDeviceIdentifier)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *GithubFlowMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case githubflow.FieldCreatedAt:
+		return m.CreatedAt()
+	case githubflow.FieldState:
+		return m.State()
+	case githubflow.FieldVerifier:
+		return m.Verifier()
+	case githubflow.FieldChallenge:
+		return m.Challenge()
+	case githubflow.FieldNextURL:
+		return m.NextURL()
+	case githubflow.FieldOrganization:
+		return m.Organization()
+	case githubflow.FieldDeviceIdentifier:
+		return m.DeviceIdentifier()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *GithubFlowMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case githubflow.FieldCreatedAt:
+		return m.OldCreatedAt(ctx)
+	case githubflow.FieldState:
+		return m.OldState(ctx)
+	case githubflow.FieldVerifier:
+		return m.OldVerifier(ctx)
+	case githubflow.FieldChallenge:
+		return m.OldChallenge(ctx)
+	case githubflow.FieldNextURL:
+		return m.OldNextURL(ctx)
+	case githubflow.FieldOrganization:
+		return m.OldOrganization(ctx)
+	case githubflow.FieldDeviceIdentifier:
+		return m.OldDeviceIdentifier(ctx)
+	}
+	return nil, fmt.Errorf("unknown GithubFlow field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *GithubFlowMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case githubflow.FieldCreatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCreatedAt(v)
+		return nil
+	case githubflow.FieldState:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetState(v)
+		return nil
+	case githubflow.FieldVerifier:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetVerifier(v)
+		return nil
+	case githubflow.FieldChallenge:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetChallenge(v)
+		return nil
+	case githubflow.FieldNextURL:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetNextURL(v)
+		return nil
+	case githubflow.FieldOrganization:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetOrganization(v)
+		return nil
+	case githubflow.FieldDeviceIdentifier:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetDeviceIdentifier(v)
+		return nil
+	}
+	return fmt.Errorf("unknown GithubFlow field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *GithubFlowMutation) AddedFields() []string {
+	return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *GithubFlowMutation) AddedField(name string) (ent.Value, bool) {
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *GithubFlowMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	}
+	return fmt.Errorf("unknown GithubFlow numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *GithubFlowMutation) ClearedFields() []string {
+	var fields []string
+	if m.FieldCleared(githubflow.FieldOrganization) {
+		fields = append(fields, githubflow.FieldOrganization)
+	}
+	if m.FieldCleared(githubflow.FieldDeviceIdentifier) {
+		fields = append(fields, githubflow.FieldDeviceIdentifier)
+	}
+	return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *GithubFlowMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *GithubFlowMutation) ClearField(name string) error {
+	switch name {
+	case githubflow.FieldOrganization:
+		m.ClearOrganization()
+		return nil
+	case githubflow.FieldDeviceIdentifier:
+		m.ClearDeviceIdentifier()
+		return nil
+	}
+	return fmt.Errorf("unknown GithubFlow nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *GithubFlowMutation) ResetField(name string) error {
+	switch name {
+	case githubflow.FieldCreatedAt:
+		m.ResetCreatedAt()
+		return nil
+	case githubflow.FieldState:
+		m.ResetState()
+		return nil
+	case githubflow.FieldVerifier:
+		m.ResetVerifier()
+		return nil
+	case githubflow.FieldChallenge:
+		m.ResetChallenge()
+		return nil
+	case githubflow.FieldNextURL:
+		m.ResetNextURL()
+		return nil
+	case githubflow.FieldOrganization:
+		m.ResetOrganization()
+		return nil
+	case githubflow.FieldDeviceIdentifier:
+		m.ResetDeviceIdentifier()
+		return nil
+	}
+	return fmt.Errorf("unknown GithubFlow field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *GithubFlowMutation) AddedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *GithubFlowMutation) AddedIDs(name string) []ent.Value {
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *GithubFlowMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *GithubFlowMutation) RemovedIDs(name string) []ent.Value {
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *GithubFlowMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *GithubFlowMutation) EdgeCleared(name string) bool {
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *GithubFlowMutation) ClearEdge(name string) error {
+	return fmt.Errorf("unknown GithubFlow unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *GithubFlowMutation) ResetEdge(name string) error {
+	return fmt.Errorf("unknown GithubFlow edge %s", name)
+}
diff --git a/internal/ent/predicate/predicate.go b/internal/ent/predicate/predicate.go
new file mode 100644
index 0000000..2993737
--- /dev/null
+++ b/internal/ent/predicate/predicate.go
@@ -0,0 +1,29 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package predicate
+
+import (
+	"entgo.io/ent/dialect/sql"
+)
+
+// DeviceFlow is the predicate function for deviceflow builders.
+type DeviceFlow func(*sql.Selector)
+
+// GithubFlow is the predicate function for githubflow builders.
+type GithubFlow func(*sql.Selector)
diff --git a/internal/ent/runtime.go b/internal/ent/runtime.go
new file mode 100644
index 0000000..88dafa9
--- /dev/null
+++ b/internal/ent/runtime.go
@@ -0,0 +1,77 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"time"
+
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/internal/ent/schema"
+)
+
+// The init function reads all schema descriptors with runtime code
+// (default values, validators, hooks and policies) and stitches it
+// to their package variables.
+func init() {
+	deviceflowFields := schema.DeviceFlow{}.Fields()
+	_ = deviceflowFields
+	// deviceflowDescCreatedAt is the schema descriptor for created_at field.
+	deviceflowDescCreatedAt := deviceflowFields[0].Descriptor()
+	// deviceflow.DefaultCreatedAt holds the default value on creation for the created_at field.
+	deviceflow.DefaultCreatedAt = deviceflowDescCreatedAt.Default.(func() time.Time)
+	// deviceflowDescLastPoll is the schema descriptor for last_poll field.
+	deviceflowDescLastPoll := deviceflowFields[1].Descriptor()
+	// deviceflow.DefaultLastPoll holds the default value on creation for the last_poll field.
+	deviceflow.DefaultLastPoll = deviceflowDescLastPoll.Default.(func() time.Time)
+	// deviceflowDescIdentifier is the schema descriptor for identifier field.
+	deviceflowDescIdentifier := deviceflowFields[2].Descriptor()
+	// deviceflow.IdentifierValidator is a validator for the "identifier" field. It is called by the builders before save.
+	deviceflow.IdentifierValidator = deviceflowDescIdentifier.Validators[0].(func(string) error)
+	// deviceflowDescDeviceCode is the schema descriptor for device_code field.
+	deviceflowDescDeviceCode := deviceflowFields[3].Descriptor()
+	// deviceflow.DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save.
+	deviceflow.DeviceCodeValidator = deviceflowDescDeviceCode.Validators[0].(func(string) error)
+	// deviceflowDescUserCode is the schema descriptor for user_code field.
+	deviceflowDescUserCode := deviceflowFields[4].Descriptor()
+	// deviceflow.UserCodeValidator is a validator for the "user_code" field. It is called by the builders before save.
+	deviceflow.UserCodeValidator = deviceflowDescUserCode.Validators[0].(func(string) error)
+	githubflowFields := schema.GithubFlow{}.Fields()
+	_ = githubflowFields
+	// githubflowDescCreatedAt is the schema descriptor for created_at field.
+	githubflowDescCreatedAt := githubflowFields[0].Descriptor()
+	// githubflow.DefaultCreatedAt holds the default value on creation for the created_at field.
+	githubflow.DefaultCreatedAt = githubflowDescCreatedAt.Default.(func() time.Time)
+	// githubflowDescState is the schema descriptor for state field.
+	githubflowDescState := githubflowFields[1].Descriptor()
+	// githubflow.StateValidator is a validator for the "state" field. It is called by the builders before save.
+	githubflow.StateValidator = githubflowDescState.Validators[0].(func(string) error)
+	// githubflowDescVerifier is the schema descriptor for verifier field.
+	githubflowDescVerifier := githubflowFields[2].Descriptor()
+	// githubflow.VerifierValidator is a validator for the "verifier" field. It is called by the builders before save.
+	githubflow.VerifierValidator = githubflowDescVerifier.Validators[0].(func(string) error)
+	// githubflowDescChallenge is the schema descriptor for challenge field.
+	githubflowDescChallenge := githubflowFields[3].Descriptor()
+	// githubflow.ChallengeValidator is a validator for the "challenge" field. It is called by the builders before save.
+	githubflow.ChallengeValidator = githubflowDescChallenge.Validators[0].(func(string) error)
+	// githubflowDescNextURL is the schema descriptor for next_url field.
+	githubflowDescNextURL := githubflowFields[4].Descriptor()
+	// githubflow.NextURLValidator is a validator for the "next_url" field. It is called by the builders before save.
+	githubflow.NextURLValidator = githubflowDescNextURL.Validators[0].(func(string) error)
+}
diff --git a/internal/ent/runtime/runtime.go b/internal/ent/runtime/runtime.go
new file mode 100644
index 0000000..d82c8b3
--- /dev/null
+++ b/internal/ent/runtime/runtime.go
@@ -0,0 +1,26 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Code generated by ent, DO NOT EDIT.
+
+package runtime
+
+// The schema-stitching logic is generated in github.com/loopholelabs/auth/internal/ent/runtime.go
+
+const (
+	Version = "v0.11.4"                                         // Version of ent codegen.
+	Sum     = "h1:grwVY0fp31BZ6oEo3YrXenAuv8VJmEw7F/Bi6WqeH3Q=" // Sum of ent codegen.
+)
diff --git a/internal/ent/schema/deviceFlow.go b/internal/ent/schema/deviceFlow.go
new file mode 100644
index 0000000..a4892c4
--- /dev/null
+++ b/internal/ent/schema/deviceFlow.go
@@ -0,0 +1,46 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/schema/field"
+	"time"
+)
+
+// DeviceFlow holds the schema definition for the Device Flow entity.
+type DeviceFlow struct {
+	ent.Schema
+}
+
+// Fields of the DeviceFlow.
+func (DeviceFlow) Fields() []ent.Field {
+	return []ent.Field{
+		field.Time("created_at").Immutable().Default(time.Now),
+		field.Time("last_poll").Default(time.Now),
+		field.String("identifier").Unique().Immutable().NotEmpty(),
+		field.String("device_code").Unique().Immutable().NotEmpty(),
+		field.String("user_code").Unique().Immutable().NotEmpty(),
+		field.String("session").Unique().Optional(),
+		field.Time("expires_at").Optional(),
+	}
+}
+
+// Edges of the DeviceFlow.
+func (DeviceFlow) Edges() []ent.Edge {
+	return nil
+}
diff --git a/internal/ent/schema/githubFlow.go b/internal/ent/schema/githubFlow.go
new file mode 100644
index 0000000..13e2c95
--- /dev/null
+++ b/internal/ent/schema/githubFlow.go
@@ -0,0 +1,46 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/schema/field"
+	"time"
+)
+
+// GithubFlow holds the schema definition for the Flow entity.
+type GithubFlow struct {
+	ent.Schema
+}
+
+// Fields of the GithubFlow.
+func (GithubFlow) Fields() []ent.Field {
+	return []ent.Field{
+		field.Time("created_at").Immutable().Default(time.Now),
+		field.String("state").Unique().Immutable().NotEmpty(),
+		field.String("verifier").Unique().Immutable().NotEmpty(),
+		field.String("challenge").Unique().Immutable().NotEmpty(),
+		field.String("next_url").Immutable().NotEmpty(),
+		field.String("organization").Immutable().Optional(),
+		field.String("device_identifier").Unique().Immutable().Optional(),
+	}
+}
+
+// Edges of the GithubFlow.
+func (GithubFlow) Edges() []ent.Edge {
+	return nil
+}
diff --git a/pkg/storage/default/ent/tx.go b/internal/ent/tx.go
similarity index 79%
rename from pkg/storage/default/ent/tx.go
rename to internal/ent/tx.go
index 2fc5cd5..6f0eb35 100644
--- a/pkg/storage/default/ent/tx.go
+++ b/internal/ent/tx.go
@@ -1,3 +1,19 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 // Code generated by ent, DO NOT EDIT.
 
 package ent
@@ -12,22 +28,14 @@ import (
 // Tx is a transactional client that is created by calling Client.Tx().
 type Tx struct {
 	config
-	// APIKey is the client for interacting with the APIKey builders.
-	APIKey *APIKeyClient
-	// ServiceKey is the client for interacting with the ServiceKey builders.
-	ServiceKey *ServiceKeyClient
-	// User is the client for interacting with the User builders.
-	User *UserClient
+	// DeviceFlow is the client for interacting with the DeviceFlow builders.
+	DeviceFlow *DeviceFlowClient
+	// GithubFlow is the client for interacting with the GithubFlow builders.
+	GithubFlow *GithubFlowClient
 
 	// lazily loaded.
 	client     *Client
 	clientOnce sync.Once
-
-	// completion callbacks.
-	mu         sync.Mutex
-	onCommit   []CommitHook
-	onRollback []RollbackHook
-
 	// ctx lives for the life of the transaction. It is
 	// the same context used by the underlying connection.
 	ctx context.Context
@@ -72,9 +80,9 @@ func (tx *Tx) Commit() error {
 	var fn Committer = CommitFunc(func(context.Context, *Tx) error {
 		return txDriver.tx.Commit()
 	})
-	tx.mu.Lock()
-	hooks := append([]CommitHook(nil), tx.onCommit...)
-	tx.mu.Unlock()
+	txDriver.mu.Lock()
+	hooks := append([]CommitHook(nil), txDriver.onCommit...)
+	txDriver.mu.Unlock()
 	for i := len(hooks) - 1; i >= 0; i-- {
 		fn = hooks[i](fn)
 	}
@@ -83,9 +91,10 @@ func (tx *Tx) Commit() error {
 
 // OnCommit adds a hook to call on commit.
 func (tx *Tx) OnCommit(f CommitHook) {
-	tx.mu.Lock()
-	defer tx.mu.Unlock()
-	tx.onCommit = append(tx.onCommit, f)
+	txDriver := tx.config.driver.(*txDriver)
+	txDriver.mu.Lock()
+	txDriver.onCommit = append(txDriver.onCommit, f)
+	txDriver.mu.Unlock()
 }
 
 type (
@@ -127,9 +136,9 @@ func (tx *Tx) Rollback() error {
 	var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
 		return txDriver.tx.Rollback()
 	})
-	tx.mu.Lock()
-	hooks := append([]RollbackHook(nil), tx.onRollback...)
-	tx.mu.Unlock()
+	txDriver.mu.Lock()
+	hooks := append([]RollbackHook(nil), txDriver.onRollback...)
+	txDriver.mu.Unlock()
 	for i := len(hooks) - 1; i >= 0; i-- {
 		fn = hooks[i](fn)
 	}
@@ -138,9 +147,10 @@ func (tx *Tx) Rollback() error {
 
 // OnRollback adds a hook to call on rollback.
 func (tx *Tx) OnRollback(f RollbackHook) {
-	tx.mu.Lock()
-	defer tx.mu.Unlock()
-	tx.onRollback = append(tx.onRollback, f)
+	txDriver := tx.config.driver.(*txDriver)
+	txDriver.mu.Lock()
+	txDriver.onRollback = append(txDriver.onRollback, f)
+	txDriver.mu.Unlock()
 }
 
 // Client returns a Client that binds to current transaction.
@@ -153,9 +163,8 @@ func (tx *Tx) Client() *Client {
 }
 
 func (tx *Tx) init() {
-	tx.APIKey = NewAPIKeyClient(tx.config)
-	tx.ServiceKey = NewServiceKeyClient(tx.config)
-	tx.User = NewUserClient(tx.config)
+	tx.DeviceFlow = NewDeviceFlowClient(tx.config)
+	tx.GithubFlow = NewGithubFlowClient(tx.config)
 }
 
 // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
@@ -165,7 +174,7 @@ func (tx *Tx) init() {
 // of them in order to commit or rollback the transaction.
 //
 // If a closed transaction is embedded in one of the generated entities, and the entity
-// applies a query, for example: APIKey.QueryXXX(), the query will be executed
+// applies a query, for example: DeviceFlow.QueryXXX(), the query will be executed
 // through the driver which created this transaction.
 //
 // Note that txDriver is not goroutine safe.
@@ -174,6 +183,10 @@ type txDriver struct {
 	drv dialect.Driver
 	// tx is the underlying transaction.
 	tx dialect.Tx
+	// completion hooks.
+	mu         sync.Mutex
+	onCommit   []CommitHook
+	onRollback []RollbackHook
 }
 
 // newTx creates a new transactional driver.
diff --git a/main.go b/main.go
deleted file mode 100644
index daa39bf..0000000
--- a/main.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package main
-
-import (
-	"errors"
-	dexStorage "github.com/dexidp/dex/storage"
-	"github.com/loopholelabs/auth/pkg/config"
-	"github.com/loopholelabs/auth/pkg/options"
-	"github.com/loopholelabs/auth/pkg/providers"
-	"github.com/loopholelabs/auth/pkg/server"
-	database "github.com/loopholelabs/auth/pkg/storage/default"
-	"github.com/sirupsen/logrus"
-)
-
-func main() {
-	logger := logrus.New()
-	logger.Info("Starting Auth Server")
-	conf := config.New()
-
-	logger.Infof("Config: %+v", conf)
-
-	d, err := database.New(conf.Database.Type, conf.Database.URL, conf.DexDatabase.Type, conf.DexDatabase.URL, logrus.NewEntry(logger).WithField("COMPONENT", "DATABASE"))
-	if err != nil {
-		panic(err)
-	}
-
-	var gh *providers.GithubProvider
-	if conf.OAuth.GithubOAuth.Enabled {
-		gh = &providers.GithubProvider{
-			ID:           "github",
-			ClientID:     conf.OAuth.GithubOAuth.ClientID,
-			ClientSecret: conf.OAuth.GithubOAuth.ClientSecret,
-			RedirectURI:  conf.OAuth.GithubOAuth.RedirectURI,
-		}
-	}
-	err = server.BootstrapConnectors(d, gh)
-	if err != nil {
-		panic(err)
-	}
-
-	o := &options.Options{
-		Issuer:         conf.Issuer,
-		AllowedOrigins: []string{"*"},
-		Storage:        d,
-		Logger:         logrus.New(),
-		NewUser:        d.NewUser,
-	}
-
-	s, err := server.New(o)
-	if err != nil {
-		panic(err)
-	}
-
-	for _, c := range conf.Clients {
-		err = server.CreateClient(d, c.ID, c.Secret, []string{c.RedirectURI}, c.Public, c.ID, c.Logo)
-		if err != nil {
-			if errors.Is(err, dexStorage.ErrAlreadyExists) {
-				logger.Infof("Client %s already exists, checking if it needs to be updated", c.ID)
-				cl, err := server.GetClient(d, c.ID)
-				if err != nil {
-					panic(err)
-				}
-				if cl.Secret != c.Secret || (len(cl.RedirectURIs) == 1 && cl.RedirectURIs[0] != c.RedirectURI) || cl.Public != c.Public || cl.LogoURL != c.Logo {
-					logger.Infof("Client %s needs to be updated", c.ID)
-					err = server.UpdateClient(d, c.ID, c.Secret, []string{c.RedirectURI}, c.Public, c.ID, c.Logo)
-					if err != nil {
-						panic(err)
-					}
-					logger.Infof("Client %s updated", c.ID)
-				} else {
-					logger.Infof("Client %s does not need to be updated", c.ID)
-				}
-			} else {
-				panic(err)
-			}
-		}
-	}
-
-	err = s.App().Listen(conf.Listen)
-	if err != nil {
-		panic(err)
-	}
-}
diff --git a/pkg/api/api.go b/pkg/api/api.go
new file mode 100644
index 0000000..e77753f
--- /dev/null
+++ b/pkg/api/api.go
@@ -0,0 +1,98 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package api
+
+import (
+	"github.com/gofiber/fiber/v2"
+	"github.com/gofiber/fiber/v2/middleware/cors"
+	v1 "github.com/loopholelabs/auth/pkg/api/v1"
+	v1Docs "github.com/loopholelabs/auth/pkg/api/v1/docs"
+	v1Options "github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+	"net"
+)
+
+const (
+	V1Path = "/v1"
+)
+
+type API struct {
+	logger    *zerolog.Logger
+	app       *fiber.App
+	v1Options *v1Options.Options
+}
+
+func New(v1Options *v1Options.Options, logger *zerolog.Logger) *API {
+	l := logger.With().Str("AUTH", "API").Logger()
+	s := &API{
+		logger:    &l,
+		app:       utils.DefaultFiberApp(),
+		v1Options: v1Options,
+	}
+
+	s.init()
+
+	return s
+}
+
+func (s *API) init() {
+	s.logger.Debug().Msg("initializing")
+	s.app.Use(cors.New())
+	s.app.Mount(V1Path, v1.New(s.v1Options, s.logger).App())
+}
+
+func (s *API) Start(addr string, host string, tls bool) error {
+	listener, err := net.Listen("tcp", addr)
+	if err != nil {
+		return err
+	}
+	v1Docs.SwaggerInfoapi.Host = host
+	v1Docs.SwaggerInfoapi.Schemes = []string{"http"}
+	if tls {
+		v1Docs.SwaggerInfoapi.Schemes = []string{"https"}
+	}
+	return s.app.Listener(listener)
+}
+
+func (s *API) Stop() error {
+	err := s.app.Shutdown()
+	if err != nil {
+		return err
+	}
+
+	err = s.v1Options.Manager().Stop()
+	if err != nil {
+		return err
+	}
+
+	if s.v1Options.Github() != nil {
+		err = s.v1Options.Github().Stop()
+		if err != nil {
+			return err
+		}
+	}
+
+	if s.v1Options.Device() != nil {
+		err = s.v1Options.Device().Stop()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/pkg/api/v1/config/config.go b/pkg/api/v1/config/config.go
new file mode 100644
index 0000000..a5b3640
--- /dev/null
+++ b/pkg/api/v1/config/config.go
@@ -0,0 +1,72 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package config
+
+import (
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth/pkg/api/v1/models"
+	"github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+)
+
+type Config struct {
+	logger  *zerolog.Logger
+	app     *fiber.App
+	options *options.Options
+}
+
+func New(options *options.Options, logger *zerolog.Logger) *Config {
+	l := logger.With().Str("ROUTER", "CONFIG").Logger()
+	i := &Config{
+		logger:  &l,
+		app:     utils.DefaultFiberApp(),
+		options: options,
+	}
+
+	i.init()
+
+	return i
+}
+
+func (a *Config) init() {
+	a.logger.Debug().Msg("initializing")
+	a.app.Get("/", a.Config)
+}
+
+func (a *Config) App() *fiber.App {
+	return a.app
+}
+
+// Config godoc
+// @Summary      Config gets the public configuration of the API
+// @Description  Config gets the public configuration of the API
+// @Tags         config
+// @Accept       json
+// @Produce      json
+// @Success      200  {array} models.ConfigResponse
+// @Failure      401  {string} string
+// @Failure      500  {string} string
+// @Router       /config [get]
+func (a *Config) Config(ctx *fiber.Ctx) error {
+	a.logger.Debug().Msgf("received Config from %s", ctx.IP())
+	res := new(models.ConfigResponse)
+	if a.options.Github() != nil {
+		res.GithubEnabled = true
+	}
+	return ctx.JSON(res)
+}
diff --git a/pkg/api/v1/device/device.go b/pkg/api/v1/device/device.go
new file mode 100644
index 0000000..acf1cf4
--- /dev/null
+++ b/pkg/api/v1/device/device.go
@@ -0,0 +1,177 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package device
+
+import (
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth/internal/ent"
+	"github.com/loopholelabs/auth/pkg/api/v1/models"
+	"github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+	"time"
+)
+
+const (
+	DefaultPollingRate = 5 // 5 seconds
+)
+
+type Device struct {
+	logger  *zerolog.Logger
+	app     *fiber.App
+	options *options.Options
+}
+
+func New(options *options.Options, logger *zerolog.Logger) *Device {
+	l := logger.With().Str("ROUTER", "DEVICE").Logger()
+	i := &Device{
+		logger:  &l,
+		app:     utils.DefaultFiberApp(),
+		options: options,
+	}
+
+	i.init()
+
+	return i
+}
+
+func (d *Device) init() {
+	d.logger.Debug().Msg("initializing")
+	d.app.Post("/flow", d.DeviceFlow)
+	d.app.Post("/callback", d.DeviceCallback)
+	d.app.Post("/poll", d.DevicePoll)
+}
+
+func (d *Device) App() *fiber.App {
+	return d.app
+}
+
+// DeviceFlow godoc
+// @Summary      DeviceFlow starts the device code flow
+// @Description  DeviceFlow starts the device code flow
+// @Tags         device, login
+// @Accept       json
+// @Produce      json
+// @Success      200 {object} models.DeviceFlowResponse
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /device/flow [post]
+func (d *Device) DeviceFlow(ctx *fiber.Ctx) error {
+	d.logger.Debug().Msgf("received DeviceFlow from %s", ctx.IP())
+	if d.options.Device() == nil {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("device code provider is not enabled")
+	}
+
+	deviceCode, userCode, err := d.options.Device().StartFlow(ctx.Context())
+	if err != nil {
+		d.logger.Error().Err(err).Msg("failed to get device code and user code")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("failed to get device code and user code")
+	}
+
+	return ctx.JSON(&models.DeviceFlowResponse{
+		DeviceCode:  deviceCode,
+		UserCode:    userCode,
+		PollingRate: DefaultPollingRate,
+	})
+}
+
+// DeviceCallback godoc
+// @Summary      DeviceCallback validates the device code and returns the flow identifier
+// @Description  DeviceCallback validates the device code and returns the flow identifier
+// @Tags         device, callback
+// @Accept       json
+// @Produce      json
+// @Param        code query string true "device code"
+// @Success      200 {object} models.DeviceCallbackResponse
+// @Failure      400 {string} string
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /device/callback [post]
+func (d *Device) DeviceCallback(ctx *fiber.Ctx) error {
+	d.logger.Debug().Msgf("received DeviceCallback from %s", ctx.IP())
+	if d.options.Device() == nil {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("device code provider is not enabled")
+	}
+
+	code := ctx.Query("code")
+	if code == "" {
+		return ctx.Status(fiber.StatusBadRequest).SendString("code is required")
+	}
+
+	identifier, err := d.options.Device().ValidateFlow(ctx.Context(), code)
+	if err != nil {
+		if ent.IsNotFound(err) {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("invalid code")
+		}
+		d.logger.Error().Err(err).Msg("failed to validate device code")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("failed to validate device code")
+	}
+
+	return ctx.JSON(&models.DeviceCallbackResponse{
+		Identifier: identifier,
+	})
+}
+
+// DevicePoll godoc
+// @Summary      DevicePoll polls the device code flow using the user code
+// @Description  DevicePoll polls the device code flow using the user code
+// @Tags         device, poll
+// @Accept       json
+// @Produce      json
+// @Param        code query string true "user code"
+// @Success      200 {string} string
+// @Failure      400 {string} string
+// @Failure      401 {string} string
+// @Failure      403 {string} string
+// @Failure      500 {string} string
+// @Router       /device/poll [post]
+func (d *Device) DevicePoll(ctx *fiber.Ctx) error {
+	d.logger.Debug().Msgf("received DevicePoll from %s", ctx.IP())
+	if d.options.Device() == nil {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("device code provider is not enabled")
+	}
+
+	code := ctx.Query("code")
+	if code == "" {
+		return ctx.Status(fiber.StatusBadRequest).SendString("code is required")
+	}
+
+	session, expires, lastPoll, err := d.options.Device().PollFlow(ctx.Context(), code)
+	if err != nil {
+		if ent.IsNotFound(err) {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("invalid code")
+		}
+		d.logger.Error().Err(err).Msg("failed to poll device code")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("failed to poll device code")
+	}
+
+	if lastPoll.Add(DefaultPollingRate * time.Second).After(time.Now()) {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("polling rate exceeded")
+	}
+
+	if session != "" {
+		if expires.Before(time.Now()) {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("code expired")
+		}
+
+		ctx.Cookie(d.options.Manager().GenerateCookie(session, expires))
+
+		return ctx.SendString("success")
+	}
+
+	return ctx.Status(fiber.StatusForbidden).SendString("code not yet authorized")
+}
diff --git a/pkg/api/v1/docs/api_docs.go b/pkg/api/v1/docs/api_docs.go
new file mode 100644
index 0000000..56eb06b
--- /dev/null
+++ b/pkg/api/v1/docs/api_docs.go
@@ -0,0 +1,511 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+// Package docs GENERATED BY SWAG; DO NOT EDIT
+// This file was generated by swaggo/swag
+package docs
+
+import "github.com/swaggo/swag"
+
+const docTemplateapi = `{
+    "schemes": {{ marshal .Schemes }},
+    "swagger": "2.0",
+    "info": {
+        "description": "{{escape .Description}}",
+        "title": "{{.Title}}",
+        "termsOfService": "https://loopholelabs.io/privacy",
+        "contact": {
+            "name": "API Support",
+            "email": "admin@loopholelabs.io"
+        },
+        "license": {
+            "name": "Apache 2.0",
+            "url": "https://www.apache.org/licenses/LICENSE-2.0.html"
+        },
+        "version": "{{.Version}}"
+    },
+    "host": "{{.Host}}",
+    "basePath": "{{.BasePath}}",
+    "paths": {
+        "/config": {
+            "get": {
+                "description": "Config gets the public configuration of the API",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config"
+                ],
+                "summary": "Config gets the public configuration of the API",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "array",
+                            "items": {
+                                "$ref": "#/definitions/models.ConfigResponse"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/callback": {
+            "post": {
+                "description": "DeviceCallback validates the device code and returns the flow identifier",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "callback"
+                ],
+                "summary": "DeviceCallback validates the device code and returns the flow identifier",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "device code",
+                        "name": "code",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/models.DeviceCallbackResponse"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/flow": {
+            "post": {
+                "description": "DeviceFlow starts the device code flow",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "login"
+                ],
+                "summary": "DeviceFlow starts the device code flow",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/models.DeviceFlowResponse"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/poll": {
+            "post": {
+                "description": "DevicePoll polls the device code flow using the user code",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "poll"
+                ],
+                "summary": "DevicePoll polls the device code flow using the user code",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "user code",
+                        "name": "code",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "403": {
+                        "description": "Forbidden",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/github/callback": {
+            "get": {
+                "description": "GithubCallback logs in a user with Github",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "github",
+                    "callback"
+                ],
+                "summary": "GithubCallback logs in a user with Github",
+                "responses": {
+                    "307": {
+                        "description": "Temporary Redirect",
+                        "headers": {
+                            "Location": {
+                                "type": "string",
+                                "description": "Redirects to Next URL"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "403": {
+                        "description": "Forbidden",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "404": {
+                        "description": "Not Found",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/github/login": {
+            "get": {
+                "description": "GithubLogin logs in a user with Github",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "github",
+                    "login"
+                ],
+                "summary": "GithubLogin logs in a user with Github",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "Next Redirect URL",
+                        "name": "next",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "description": "Organization",
+                        "name": "organization",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "description": "Device Code Identifier",
+                        "name": "identifier",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "307": {
+                        "description": "Temporary Redirect",
+                        "headers": {
+                            "Location": {
+                                "type": "string",
+                                "description": "Redirects to Github"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/loggedin": {
+            "post": {
+                "description": "IsLoggedIn checks if a user is logged in",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "login"
+                ],
+                "summary": "IsLoggedIn checks if a user is logged in",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/logout": {
+            "post": {
+                "description": "Logout logs out a user",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "logout"
+                ],
+                "summary": "Logout logs out a user",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/servicekey/login": {
+            "post": {
+                "description": "ServiceKeyLogin logs in a user with their Service Key",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "servicekey",
+                    "login"
+                ],
+                "summary": "ServiceKeyLogin logs in a user with their Service Key",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "Service Key",
+                        "name": "servicekey",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        }
+    },
+    "definitions": {
+        "models.ConfigResponse": {
+            "type": "object",
+            "properties": {
+                "github_enabled": {
+                    "type": "boolean"
+                }
+            }
+        },
+        "models.DeviceCallbackResponse": {
+            "type": "object",
+            "properties": {
+                "identifier": {
+                    "type": "string"
+                }
+            }
+        },
+        "models.DeviceFlowResponse": {
+            "type": "object",
+            "properties": {
+                "device_code": {
+                    "type": "string"
+                },
+                "polling_rate": {
+                    "type": "integer"
+                },
+                "user_code": {
+                    "type": "string"
+                }
+            }
+        }
+    }
+}`
+
+// SwaggerInfoapi holds exported Swagger Info so clients can modify it
+var SwaggerInfoapi = &swag.Spec{
+	Version:          "1.0",
+	Host:             "localhost:8080",
+	BasePath:         "/v1",
+	Schemes:          []string{"https"},
+	Title:            "Auth API V1",
+	Description:      "Auth API, V1",
+	InfoInstanceName: "api",
+	SwaggerTemplate:  docTemplateapi,
+}
+
+func init() {
+	swag.Register(SwaggerInfoapi.InstanceName(), SwaggerInfoapi)
+}
diff --git a/pkg/api/v1/docs/api_swagger.json b/pkg/api/v1/docs/api_swagger.json
new file mode 100644
index 0000000..fb651c8
--- /dev/null
+++ b/pkg/api/v1/docs/api_swagger.json
@@ -0,0 +1,475 @@
+{
+    "schemes": [
+        "https"
+    ],
+    "swagger": "2.0",
+    "info": {
+        "description": "Auth API, V1",
+        "title": "Auth API V1",
+        "termsOfService": "https://loopholelabs.io/privacy",
+        "contact": {
+            "name": "API Support",
+            "email": "admin@loopholelabs.io"
+        },
+        "license": {
+            "name": "Apache 2.0",
+            "url": "https://www.apache.org/licenses/LICENSE-2.0.html"
+        },
+        "version": "1.0"
+    },
+    "host": "localhost:8080",
+    "basePath": "/v1",
+    "paths": {
+        "/config": {
+            "get": {
+                "description": "Config gets the public configuration of the API",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config"
+                ],
+                "summary": "Config gets the public configuration of the API",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "array",
+                            "items": {
+                                "$ref": "#/definitions/models.ConfigResponse"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/callback": {
+            "post": {
+                "description": "DeviceCallback validates the device code and returns the flow identifier",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "callback"
+                ],
+                "summary": "DeviceCallback validates the device code and returns the flow identifier",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "device code",
+                        "name": "code",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/models.DeviceCallbackResponse"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/flow": {
+            "post": {
+                "description": "DeviceFlow starts the device code flow",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "login"
+                ],
+                "summary": "DeviceFlow starts the device code flow",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/models.DeviceFlowResponse"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/device/poll": {
+            "post": {
+                "description": "DevicePoll polls the device code flow using the user code",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "device",
+                    "poll"
+                ],
+                "summary": "DevicePoll polls the device code flow using the user code",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "user code",
+                        "name": "code",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "403": {
+                        "description": "Forbidden",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/github/callback": {
+            "get": {
+                "description": "GithubCallback logs in a user with Github",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "github",
+                    "callback"
+                ],
+                "summary": "GithubCallback logs in a user with Github",
+                "responses": {
+                    "307": {
+                        "description": "Temporary Redirect",
+                        "headers": {
+                            "Location": {
+                                "type": "string",
+                                "description": "Redirects to Next URL"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "403": {
+                        "description": "Forbidden",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "404": {
+                        "description": "Not Found",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/github/login": {
+            "get": {
+                "description": "GithubLogin logs in a user with Github",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "github",
+                    "login"
+                ],
+                "summary": "GithubLogin logs in a user with Github",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "Next Redirect URL",
+                        "name": "next",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "description": "Organization",
+                        "name": "organization",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "description": "Device Code Identifier",
+                        "name": "identifier",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "307": {
+                        "description": "Temporary Redirect",
+                        "headers": {
+                            "Location": {
+                                "type": "string",
+                                "description": "Redirects to Github"
+                            }
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/loggedin": {
+            "post": {
+                "description": "IsLoggedIn checks if a user is logged in",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "login"
+                ],
+                "summary": "IsLoggedIn checks if a user is logged in",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/logout": {
+            "post": {
+                "description": "Logout logs out a user",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "logout"
+                ],
+                "summary": "Logout logs out a user",
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        },
+        "/servicekey/login": {
+            "post": {
+                "description": "ServiceKeyLogin logs in a user with their Service Key",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "servicekey",
+                    "login"
+                ],
+                "summary": "ServiceKeyLogin logs in a user with their Service Key",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "Service Key",
+                        "name": "servicekey",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "401": {
+                        "description": "Unauthorized",
+                        "schema": {
+                            "type": "string"
+                        }
+                    },
+                    "500": {
+                        "description": "Internal Server Error",
+                        "schema": {
+                            "type": "string"
+                        }
+                    }
+                }
+            }
+        }
+    },
+    "definitions": {
+        "models.ConfigResponse": {
+            "type": "object",
+            "properties": {
+                "github_enabled": {
+                    "type": "boolean"
+                }
+            }
+        },
+        "models.DeviceCallbackResponse": {
+            "type": "object",
+            "properties": {
+                "identifier": {
+                    "type": "string"
+                }
+            }
+        },
+        "models.DeviceFlowResponse": {
+            "type": "object",
+            "properties": {
+                "device_code": {
+                    "type": "string"
+                },
+                "polling_rate": {
+                    "type": "integer"
+                },
+                "user_code": {
+                    "type": "string"
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/pkg/api/v1/docs/api_swagger.yaml b/pkg/api/v1/docs/api_swagger.yaml
new file mode 100644
index 0000000..1f8a1ce
--- /dev/null
+++ b/pkg/api/v1/docs/api_swagger.yaml
@@ -0,0 +1,315 @@
+basePath: /v1
+definitions:
+  models.ConfigResponse:
+    properties:
+      github_enabled:
+        type: boolean
+    type: object
+  models.DeviceCallbackResponse:
+    properties:
+      identifier:
+        type: string
+    type: object
+  models.DeviceFlowResponse:
+    properties:
+      device_code:
+        type: string
+      polling_rate:
+        type: integer
+      user_code:
+        type: string
+    type: object
+host: localhost:8080
+info:
+  contact:
+    email: admin@loopholelabs.io
+    name: API Support
+  description: Auth API, V1
+  license:
+    name: Apache 2.0
+    url: https://www.apache.org/licenses/LICENSE-2.0.html
+  termsOfService: https://loopholelabs.io/privacy
+  title: Auth API V1
+  version: "1.0"
+paths:
+  /config:
+    get:
+      consumes:
+      - application/json
+      description: Config gets the public configuration of the API
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            items:
+              $ref: '#/definitions/models.ConfigResponse'
+            type: array
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: Config gets the public configuration of the API
+      tags:
+      - config
+  /device/callback:
+    post:
+      consumes:
+      - application/json
+      description: DeviceCallback validates the device code and returns the flow identifier
+      parameters:
+      - description: device code
+        in: query
+        name: code
+        required: true
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/models.DeviceCallbackResponse'
+        "400":
+          description: Bad Request
+          schema:
+            type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: DeviceCallback validates the device code and returns the flow identifier
+      tags:
+      - device
+      - callback
+  /device/flow:
+    post:
+      consumes:
+      - application/json
+      description: DeviceFlow starts the device code flow
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/models.DeviceFlowResponse'
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: DeviceFlow starts the device code flow
+      tags:
+      - device
+      - login
+  /device/poll:
+    post:
+      consumes:
+      - application/json
+      description: DevicePoll polls the device code flow using the user code
+      parameters:
+      - description: user code
+        in: query
+        name: code
+        required: true
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            type: string
+        "400":
+          description: Bad Request
+          schema:
+            type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "403":
+          description: Forbidden
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: DevicePoll polls the device code flow using the user code
+      tags:
+      - device
+      - poll
+  /github/callback:
+    get:
+      consumes:
+      - application/json
+      description: GithubCallback logs in a user with Github
+      produces:
+      - application/json
+      responses:
+        "307":
+          description: Temporary Redirect
+          headers:
+            Location:
+              description: Redirects to Next URL
+              type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "403":
+          description: Forbidden
+          schema:
+            type: string
+        "404":
+          description: Not Found
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: GithubCallback logs in a user with Github
+      tags:
+      - github
+      - callback
+  /github/login:
+    get:
+      consumes:
+      - application/json
+      description: GithubLogin logs in a user with Github
+      parameters:
+      - description: Next Redirect URL
+        in: query
+        name: next
+        type: string
+      - description: Organization
+        in: query
+        name: organization
+        type: string
+      - description: Device Code Identifier
+        in: query
+        name: identifier
+        type: string
+      produces:
+      - application/json
+      responses:
+        "307":
+          description: Temporary Redirect
+          headers:
+            Location:
+              description: Redirects to Github
+              type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: GithubLogin logs in a user with Github
+      tags:
+      - github
+      - login
+  /loggedin:
+    post:
+      consumes:
+      - application/json
+      description: IsLoggedIn checks if a user is logged in
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            type: string
+        "400":
+          description: Bad Request
+          schema:
+            type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: IsLoggedIn checks if a user is logged in
+      tags:
+      - login
+  /logout:
+    post:
+      consumes:
+      - application/json
+      description: Logout logs out a user
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            type: string
+        "400":
+          description: Bad Request
+          schema:
+            type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: Logout logs out a user
+      tags:
+      - logout
+  /servicekey/login:
+    post:
+      consumes:
+      - application/json
+      description: ServiceKeyLogin logs in a user with their Service Key
+      parameters:
+      - description: Service Key
+        in: query
+        name: servicekey
+        required: true
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            type: string
+        "401":
+          description: Unauthorized
+          schema:
+            type: string
+        "500":
+          description: Internal Server Error
+          schema:
+            type: string
+      summary: ServiceKeyLogin logs in a user with their Service Key
+      tags:
+      - servicekey
+      - login
+schemes:
+- https
+swagger: "2.0"
diff --git a/pkg/api/v1/github/github.go b/pkg/api/v1/github/github.go
new file mode 100644
index 0000000..d83ab47
--- /dev/null
+++ b/pkg/api/v1/github/github.go
@@ -0,0 +1,151 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package github
+
+import (
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth/internal/ent"
+	"github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/kind"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+)
+
+type Github struct {
+	logger  *zerolog.Logger
+	app     *fiber.App
+	options *options.Options
+}
+
+func New(options *options.Options, logger *zerolog.Logger) *Github {
+	l := logger.With().Str("ROUTER", "GITHUB").Logger()
+	i := &Github{
+		logger:  &l,
+		app:     utils.DefaultFiberApp(),
+		options: options,
+	}
+
+	i.init()
+
+	return i
+}
+
+func (a *Github) init() {
+	a.logger.Debug().Msg("initializing")
+	a.app.Get("/login", a.GithubLogin)
+	a.app.Get("/callback", a.GithubCallback)
+}
+
+func (a *Github) App() *fiber.App {
+	return a.app
+}
+
+// GithubLogin godoc
+// @Summary      GithubLogin logs in a user with Github
+// @Description  GithubLogin logs in a user with Github
+// @Tags         github, login
+// @Accept       json
+// @Produce      json
+// @Param        next         query string false "Next Redirect URL"
+// @Param        organization query string false "Organization"
+// @Param        identifier   query string false "Device Code Identifier"
+// @Success      307
+// @Header       307 {string} Location "Redirects to Github"
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /github/login [get]
+func (a *Github) GithubLogin(ctx *fiber.Ctx) error {
+	a.logger.Debug().Msgf("received GithubLogin from %s", ctx.IP())
+	if a.options.Github() == nil {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("github provider is not enabled")
+	}
+
+	redirect, err := a.options.Github().StartFlow(ctx.Context(), ctx.Query("next", a.options.NextURL()), ctx.Query("organization"), ctx.Query("identifier"))
+	if err != nil {
+		a.logger.Error().Err(err).Msg("failed to get redirect")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("failed to get redirect")
+	}
+	return ctx.Redirect(redirect, fiber.StatusTemporaryRedirect)
+}
+
+// GithubCallback godoc
+// @Summary      GithubCallback logs in a user with Github
+// @Description  GithubCallback logs in a user with Github
+// @Tags         github, callback
+// @Accept       json
+// @Produce      json
+// @Success      307
+// @Header       307 {string} Location "Redirects to Next URL"
+// @Failure      401 {string} string
+// @Failure      403 {string} string
+// @Failure      404 {string} string
+// @Failure      500 {string} string
+// @Router       /github/callback [get]
+func (a *Github) GithubCallback(ctx *fiber.Ctx) error {
+	a.logger.Debug().Msgf("received GithubCallback from %s", ctx.IP())
+	if a.options.Github() == nil {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("github provider is not enabled")
+	}
+
+	code := ctx.Query("code")
+	if code == "" {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("code is required")
+	}
+
+	state := ctx.Query("state")
+	if state == "" {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("state is required")
+	}
+
+	a.logger.Debug().Msgf("completing flow for state %s", state)
+	userID, organization, nextURL, deviceIdentifier, err := a.options.Github().CompleteFlow(ctx.Context(), code, state)
+	if err != nil {
+		if ent.IsNotFound(err) {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("code is invalid")
+		}
+		a.logger.Error().Err(err).Msg("failed to get token")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("failed to get token")
+	}
+
+	a.logger.Debug().Msgf("creating session for user %s", userID)
+
+	sessionKind := kind.Default
+	if deviceIdentifier != "" {
+		if a.options.Device() == nil {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("device provider is not enabled")
+		}
+		sessionKind = kind.Device
+	}
+
+	cookie, err := a.options.Manager().CreateSession(ctx, sessionKind, a.options.Github().Key(), userID, organization)
+	if cookie == nil {
+		return err
+	}
+
+	if deviceIdentifier != "" {
+		err = a.options.Device().CompleteFlow(ctx.Context(), deviceIdentifier, cookie.Value, cookie.Expires)
+		if err != nil {
+			a.logger.Error().Err(err).Msg("failed to complete device flow")
+			return ctx.Status(fiber.StatusInternalServerError).SendString("failed to complete device flow")
+		}
+	} else {
+		ctx.Cookie(cookie)
+	}
+
+	return ctx.Redirect(nextURL, fiber.StatusTemporaryRedirect)
+
+}
diff --git a/pkg/api/v1/models/models.go b/pkg/api/v1/models/models.go
new file mode 100644
index 0000000..c1999e0
--- /dev/null
+++ b/pkg/api/v1/models/models.go
@@ -0,0 +1,41 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package models
+
+type ConfigResponse struct {
+	GithubEnabled bool `json:"github_enabled"`
+}
+
+type DeviceFlowResponse struct {
+	DeviceCode  string `json:"device_code"`
+	UserCode    string `json:"user_code"`
+	PollingRate int64  `json:"polling_rate"`
+}
+
+type DeviceCallbackResponse struct {
+	Identifier string `json:"identifier"`
+}
+
+type ServiceKeyLoginResponse struct {
+	ServiceKeySessionID     string `json:"service_key_session_id"`
+	ServiceKeySessionSecret string `json:"service_key_session_secret"`
+	ServiceKeyID            string `json:"service_key_id"`
+	UserID                  string `json:"user_id"`
+	Organization            string `json:"organization"`
+	ResourceType            string `json:"resource_type"`
+	ResourceID              string `json:"resource_id"`
+}
diff --git a/pkg/api/v1/options/options.go b/pkg/api/v1/options/options.go
new file mode 100644
index 0000000..5c1346f
--- /dev/null
+++ b/pkg/api/v1/options/options.go
@@ -0,0 +1,91 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package options
+
+import (
+	"github.com/loopholelabs/auth/pkg/manager"
+	"github.com/loopholelabs/auth/pkg/provider/device"
+	"github.com/loopholelabs/auth/pkg/provider/github"
+)
+
+type Github func() *github.Github
+
+type Device func() *device.Device
+
+type NextURL func() string
+
+type Modifier func(*Options)
+
+func WithGithub(github Github) Modifier {
+	return func(options *Options) {
+		options.github = github
+	}
+}
+
+func WithDevice(device Device) Modifier {
+	return func(options *Options) {
+		options.device = device
+	}
+}
+
+type Options struct {
+	github  Github
+	device  Device
+	nextURL NextURL
+	manager *manager.Manager
+}
+
+func New(manager *manager.Manager, nextURL NextURL, modifiers ...Modifier) *Options {
+	options := &Options{
+		manager: manager,
+		nextURL: nextURL,
+	}
+
+	for _, modifier := range modifiers {
+		modifier(options)
+	}
+
+	if options.github == nil {
+		options.github = func() *github.Github {
+			return nil
+		}
+	}
+
+	if options.device == nil {
+		options.device = func() *device.Device {
+			return nil
+		}
+	}
+
+	return options
+}
+
+func (o *Options) Github() *github.Github {
+	return o.github()
+}
+
+func (o *Options) Device() *device.Device {
+	return o.device()
+}
+
+func (o *Options) Manager() *manager.Manager {
+	return o.manager
+}
+
+func (o *Options) NextURL() string {
+	return o.nextURL()
+}
diff --git a/pkg/api/v1/servicekey/servicekey.go b/pkg/api/v1/servicekey/servicekey.go
new file mode 100644
index 0000000..6f64e8f
--- /dev/null
+++ b/pkg/api/v1/servicekey/servicekey.go
@@ -0,0 +1,108 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package servicekey
+
+import (
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth"
+	"github.com/loopholelabs/auth/pkg/api/v1/models"
+	"github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/manager"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+	"strings"
+)
+
+type ServiceKey struct {
+	logger  *zerolog.Logger
+	app     *fiber.App
+	options *options.Options
+}
+
+func New(options *options.Options, logger *zerolog.Logger) *ServiceKey {
+	l := logger.With().Str("ROUTER", "SERVICEKEY").Logger()
+	i := &ServiceKey{
+		logger:  &l,
+		app:     utils.DefaultFiberApp(),
+		options: options,
+	}
+
+	i.init()
+
+	return i
+}
+
+func (a *ServiceKey) init() {
+	a.logger.Debug().Msg("initializing")
+	a.app.Post("/login", a.ServiceKeyLogin)
+}
+
+func (a *ServiceKey) App() *fiber.App {
+	return a.app
+}
+
+// ServiceKeyLogin godoc
+// @Summary      ServiceKeyLogin logs in a user with their Service Key
+// @Description  ServiceKeyLogin logs in a user with their Service Key
+// @Tags         servicekey, login
+// @Accept       json
+// @Produce      json
+// @Param        servicekey query string true "Service Key"
+// @Success      200 {string} string
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /servicekey/login [post]
+func (a *ServiceKey) ServiceKeyLogin(ctx *fiber.Ctx) error {
+	a.logger.Debug().Msgf("received ServiceKeyLogin from %s", ctx.IP())
+
+	servicekey := ctx.Query("servicekey")
+	if servicekey == "" {
+		return ctx.Status(fiber.StatusBadRequest).SendString("service key is required")
+	}
+
+	if !strings.HasPrefix(servicekey, auth.ServiceKeyPrefixString) {
+		return ctx.Status(fiber.StatusBadRequest).SendString("invalid service key")
+	}
+
+	keySplit := strings.Split(servicekey, manager.KeyDelimiterString)
+	if len(keySplit) != 2 {
+		return ctx.Status(fiber.StatusUnauthorized).SendString("invalid service key")
+	}
+
+	keyID := keySplit[0]
+	keySecret := []byte(keySplit[1])
+
+	a.logger.Debug().Msgf("logging in user with service key ID %s", keyID)
+	sess, secret, err := a.options.Manager().CreateServiceKeySession(ctx, keyID, keySecret)
+	if sess == nil || secret == nil {
+		return err
+	}
+
+	return ctx.JSON(&models.ServiceKeyLoginResponse{
+		ServiceKeySessionID:     sess.ID,
+		ServiceKeySessionSecret: string(secret),
+		ServiceKeyID:            sess.ServiceKeyID,
+		UserID:                  sess.UserID,
+		Organization:            sess.Organization,
+		ResourceType:            sess.ResourceType,
+		ResourceID:              sess.ResourceID,
+	})
+}
+
+func (a *ServiceKey) ServiceKeyLogout(ctx *fiber.Ctx) error {
+	return nil
+}
diff --git a/pkg/api/v1/v1.go b/pkg/api/v1/v1.go
new file mode 100644
index 0000000..f763ca6
--- /dev/null
+++ b/pkg/api/v1/v1.go
@@ -0,0 +1,131 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth"
+	"github.com/loopholelabs/auth/pkg/api/v1/config"
+	"github.com/loopholelabs/auth/pkg/api/v1/device"
+	"github.com/loopholelabs/auth/pkg/api/v1/docs"
+	"github.com/loopholelabs/auth/pkg/api/v1/github"
+	"github.com/loopholelabs/auth/pkg/api/v1/options"
+	"github.com/loopholelabs/auth/pkg/api/v1/servicekey"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+)
+
+//go:generate swag init -g v1.go -o docs --parseDependency --instanceName api -d ./
+type V1 struct {
+	logger  *zerolog.Logger
+	app     *fiber.App
+	options *options.Options
+}
+
+func New(options *options.Options, logger *zerolog.Logger) *V1 {
+	l := logger.With().Str("VERSION", "v1").Logger()
+	v := &V1{
+		logger:  &l,
+		app:     utils.DefaultFiberApp(),
+		options: options,
+	}
+
+	v.init()
+
+	return v
+}
+
+// @title Auth API V1
+// @version 1.0
+// @description Auth API, V1
+// @termsOfService https://loopholelabs.io/privacy
+// @contact.name API Support
+// @contact.email admin@loopholelabs.io
+// @license.name Apache 2.0
+// @license.url https://www.apache.org/licenses/LICENSE-2.0.html
+// @host localhost:8080
+// @schemes https
+// @BasePath /v1
+func (v *V1) init() {
+	v.logger.Debug().Msg("initializing")
+
+	v.app.Mount("/config", config.New(v.options, v.logger).App())
+	v.app.Mount("/github", github.New(v.options, v.logger).App())
+	v.app.Mount("/device", device.New(v.options, v.logger).App())
+	v.app.Mount("/servicekey", servicekey.New(v.options, v.logger).App())
+
+	v.app.Post("/logout", v.Logout)
+	v.app.Post("/loggedin", v.options.Manager().Validate, v.IsLoggedIn)
+
+	v.app.Get("/swagger.json", func(ctx *fiber.Ctx) error {
+		ctx.Response().Header.SetContentType("application/json")
+		return ctx.SendString(docs.SwaggerInfoapi.ReadDoc())
+	})
+}
+
+func (v *V1) App() *fiber.App {
+	return v.app
+}
+
+// Logout godoc
+// @Summary      Logout logs out a user
+// @Description  Logout logs out a user
+// @Tags         logout
+// @Accept       json
+// @Produce      json
+// @Success      200 {string} string
+// @Failure      400 {string} string
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /logout [post]
+func (v *V1) Logout(ctx *fiber.Ctx) error {
+	v.logger.Debug().Msgf("received Logout from %s", ctx.IP())
+
+	err := v.options.Manager().LogoutSession(ctx)
+	if err != nil {
+		return err
+	}
+
+	err = v.options.Manager().LogoutServiceKeySession(ctx)
+	if err != nil {
+		return err
+	}
+
+	return ctx.SendString("logged out")
+}
+
+// IsLoggedIn godoc
+// @Summary      IsLoggedIn checks if a user is logged in
+// @Description  IsLoggedIn checks if a user is logged in
+// @Tags         login
+// @Accept       json
+// @Produce      json
+// @Success      200 {string} string
+// @Failure      400 {string} string
+// @Failure      401 {string} string
+// @Failure      500 {string} string
+// @Router       /loggedin [post]
+func (v *V1) IsLoggedIn(ctx *fiber.Ctx) error {
+	v.logger.Debug().Msgf("received IsLoggedIn from %s", ctx.IP())
+	userID, ok := ctx.Locals(auth.UserContextKey).(string)
+	if !ok {
+		v.logger.Error().Msg("failed to get userID from context")
+		return ctx.Status(fiber.StatusInternalServerError).SendString("error getting userID from context")
+	}
+	return ctx.SendString(fmt.Sprintf("logged in user %s", userID))
+}
diff --git a/pkg/apikey/apikey.go b/pkg/apikey/apikey.go
new file mode 100644
index 0000000..0132740
--- /dev/null
+++ b/pkg/apikey/apikey.go
@@ -0,0 +1,32 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package apikey
+
+// APIKey is a user's API Key
+type APIKey struct {
+	// ID is the API Key's unique identifier
+	ID string `json:"id"`
+
+	// Hash is the hashed secret of the API Key
+	Hash []byte `json:"hash"`
+
+	// UserID is the user's unique identifier
+	UserID string `json:"user_id"`
+
+	// Organization is the organization that the API Key belongs to (optional)
+	Organization string `json:"organization"`
+}
diff --git a/pkg/refreshpolicy/refreshpolicy.go b/pkg/claims/claims.go
similarity index 70%
rename from pkg/refreshpolicy/refreshpolicy.go
rename to pkg/claims/claims.go
index c805a44..a344c86 100644
--- a/pkg/refreshpolicy/refreshpolicy.go
+++ b/pkg/claims/claims.go
@@ -14,13 +14,10 @@
 	limitations under the License.
 */
 
-package dex
+package claims
 
-import (
-	"github.com/dexidp/dex/pkg/log"
-	"github.com/dexidp/dex/server"
-)
-
-func DefaultRefreshPolicy(logger log.Logger) (*server.RefreshTokenPolicy, error) {
-	return server.NewRefreshTokenPolicy(logger, true, "24h", "720h", "")
+// Claims contains the claims for a user from an authentication provider
+type Claims struct {
+	// UserID is the unique identifier for the user. This is often an email address.
+	UserID string `json:"user_id"`
 }
diff --git a/pkg/client/client.go b/pkg/client/client.go
deleted file mode 100644
index e975513..0000000
--- a/pkg/client/client.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package client
-
-import (
-	"bytes"
-	"context"
-	"crypto/tls"
-	"fmt"
-	"github.com/go-openapi/runtime/client"
-	"github.com/loopholelabs/auth/pkg/client/discover"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"golang.org/x/oauth2"
-	"io"
-	"net/http"
-	"net/url"
-	"strings"
-)
-
-func ContextClient(ctx context.Context, client *http.Client) context.Context {
-	return context.WithValue(ctx, oauth2.HTTPClient, client)
-}
-
-type CompatibleClient struct {
-	transport http.RoundTripper
-}
-
-func NewCompatibleClient(transport http.RoundTripper) *CompatibleClient {
-	return &CompatibleClient{transport: transport}
-}
-
-func (c *CompatibleClient) PostForm(uri string, data url.Values) (*http.Response, error) {
-	if !(strings.HasPrefix(uri, "https://") || strings.HasPrefix(uri, "http://")) {
-		uri = "https://" + uri
-	}
-	req, err := http.NewRequest("POST", uri, nil)
-	if err != nil {
-		return nil, err
-	}
-	req.Form = make(url.Values)
-	for k, v := range data {
-		req.Form.Set(k, v[0])
-	}
-	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-	req.Body = io.NopCloser(bytes.NewBufferString(req.Form.Encode()))
-	return c.transport.RoundTrip(req)
-}
-
-func UnauthenticatedClient(endpoint string, basePath string, schemes []string, tlsConfig *tls.Config) (*client.Runtime, *http.Client) {
-	if strings.HasPrefix(endpoint, "https://") {
-		endpoint = strings.TrimPrefix(endpoint, "https://")
-	}
-	if strings.HasPrefix(endpoint, "http://") {
-		endpoint = strings.TrimPrefix(endpoint, "http://")
-	}
-
-	transport := &http.Transport{
-		TLSClientConfig: tlsConfig,
-	}
-	httpClient := &http.Client{Transport: transport}
-	return client.NewWithClient(endpoint, basePath, schemes, httpClient), httpClient
-}
-
-func AuthenticatedClient(endpoint string, basePath string, schemes []string, tlsConfig *tls.Config, authEndpoint string, clientID string, kind tokenKind.Kind, token *Token) (TokenSource, *client.Runtime, error) {
-	if strings.HasPrefix(endpoint, "https://") {
-		endpoint = strings.TrimPrefix(endpoint, "https://")
-	}
-	if strings.HasPrefix(endpoint, "http://") {
-		endpoint = strings.TrimPrefix(endpoint, "http://")
-	}
-
-	_, hc := UnauthenticatedClient(endpoint, basePath, schemes, tlsConfig)
-
-	var conf *Config
-	switch kind {
-	case tokenKind.OAuthKind:
-		discovery, err := discover.Discover(hc.Transport, fmt.Sprintf("https://%s", authEndpoint))
-		if err != nil {
-			return nil, nil, err
-		}
-		conf = NewConfig(clientID, "", discovery.Auth, discovery.Token)
-	case tokenKind.APITokenKind, tokenKind.ServiceTokenKind:
-		conf = NewConfig(clientID, "", fmt.Sprintf("https://%s/exchange", authEndpoint), fmt.Sprintf("https://%s/refresh", authEndpoint))
-	default:
-		return nil, nil, fmt.Errorf("unknown token kind: %s", kind)
-	}
-
-	clientContext := ContextClient(context.Background(), &http.Client{
-		Transport: hc.Transport,
-	})
-	tokenSource := NewTokenSource(clientContext, conf, token)
-
-	return tokenSource, client.NewWithClient(endpoint, basePath, schemes, NewClient(clientContext, tokenSource)), nil
-}
diff --git a/pkg/client/discover/discover.go b/pkg/client/discover/discover.go
deleted file mode 100644
index e0f30ab..0000000
--- a/pkg/client/discover/discover.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package discover
-
-import (
-	"encoding/json"
-	"fmt"
-	"github.com/cli/oauth"
-	"golang.org/x/oauth2"
-	"io"
-	"mime"
-	"net/http"
-	"strings"
-)
-
-type Discovery struct {
-	Issuer            string   `json:"issuer"`
-	Auth              string   `json:"authorization_endpoint"`
-	Token             string   `json:"token_endpoint"`
-	Keys              string   `json:"jwks_uri"`
-	UserInfo          string   `json:"userinfo_endpoint"`
-	DeviceEndpoint    string   `json:"device_authorization_endpoint"`
-	GrantTypes        []string `json:"grant_types_supported"`
-	ResponseTypes     []string `json:"response_types_supported"`
-	Subjects          []string `json:"subject_types_supported"`
-	IDTokenAlgs       []string `json:"id_token_signing_alg_values_supported"`
-	CodeChallengeAlgs []string `json:"code_challenge_methods_supported"`
-	Scopes            []string `json:"scopes_supported"`
-	AuthMethods       []string `json:"token_endpoint_auth_methods_supported"`
-	Claims            []string `json:"claims_supported"`
-}
-
-func Discover(transport http.RoundTripper, issuer string) (*Discovery, error) {
-	wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
-	req, err := http.NewRequest(http.MethodGet, wellKnown, nil)
-	if err != nil {
-		return nil, err
-	}
-	resp, err := transport.RoundTrip(req)
-	if err != nil {
-		return nil, err
-	}
-
-	body, err := io.ReadAll(resp.Body)
-	if resp.StatusCode != http.StatusOK {
-		if err == nil {
-			return nil, fmt.Errorf("%s: %s", resp.Status, string(body))
-		}
-		return nil, fmt.Errorf("%s: %v", resp.Status, err)
-	}
-
-	d := new(Discovery)
-	err = json.Unmarshal(body, d)
-	if err != nil {
-		ct := resp.Header.Get("Content-Type")
-		mediaType, _, parseErr := mime.ParseMediaType(ct)
-		if parseErr == nil && mediaType == "application/json" {
-			return nil, fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
-		}
-		return nil, fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
-	}
-
-	return d, nil
-}
-
-func (d *Discovery) GetHosts() *oauth.Host {
-	return &oauth.Host{
-		DeviceCodeURL: d.DeviceEndpoint,
-		AuthorizeURL:  d.Auth,
-		TokenURL:      d.Token,
-	}
-}
-
-func (d *Discovery) GetScopes() []string {
-	return d.Scopes
-}
-
-func (d *Discovery) GetEndpoints() *oauth2.Endpoint {
-	return &oauth2.Endpoint{
-		AuthURL:  d.Auth,
-		TokenURL: d.Token,
-	}
-}
diff --git a/pkg/client/oauth.go b/pkg/client/oauth.go
deleted file mode 100644
index 576d38b..0000000
--- a/pkg/client/oauth.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package client
-
-import (
-	"context"
-	"encoding/json"
-	"github.com/cli/oauth"
-	"golang.org/x/oauth2"
-	"net/http"
-	"net/url"
-	"strconv"
-	"time"
-)
-
-type Config oauth2.Config
-
-func NewConfig(clientID string, clientSecret string, authURL string, tokenURL string) *Config {
-	return (*Config)(&oauth2.Config{
-		ClientID:     clientID,
-		ClientSecret: clientSecret,
-		Endpoint: oauth2.Endpoint{
-			AuthURL:   authURL,
-			TokenURL:  tokenURL,
-			AuthStyle: oauth2.AuthStyleInParams,
-		},
-	})
-}
-
-type Flow oauth.Flow
-
-type Token oauth2.Token
-
-type Client interface {
-	PostForm(url string, data url.Values) (*http.Response, error)
-}
-
-func NewToken(accessToken string, tokenType string, refreshToken string, expiry time.Time) *Token {
-	return (*Token)(&oauth2.Token{
-		AccessToken:  accessToken,
-		TokenType:    tokenType,
-		RefreshToken: refreshToken,
-		Expiry:       expiry,
-	})
-}
-
-func UnmarshalToken(data []byte) (*Token, error) {
-	t := new(oauth2.Token)
-	return (*Token)(t), json.Unmarshal(data, t)
-}
-
-type TokenSource oauth2.TokenSource
-
-func OAuthConfig(endpoint *oauth2.Endpoint, scopes []string, clientID string) *Config {
-	return (*Config)(&oauth2.Config{
-		ClientID: clientID,
-		Endpoint: *endpoint,
-		Scopes:   scopes,
-	})
-}
-
-func DeviceFlow(hosts *oauth.Host, client Client, scopes []string, clientID string, displayCode func(string, string) error, browser func(string) error) *Flow {
-	return (*Flow)(&oauth.Flow{
-		Host:        hosts,
-		Scopes:      scopes,
-		ClientID:    clientID,
-		DisplayCode: displayCode,
-		BrowseURL:   browser,
-		HTTPClient:  client,
-	})
-}
-
-func GetToken(flow *Flow) (*Token, error) {
-	flowToken, err := (*oauth.Flow)(flow).DeviceFlow()
-	if err != nil {
-		return nil, err
-	}
-
-	expiry, err := strconv.Atoi(flowToken.ExpiresIn)
-	if err != nil {
-		return nil, err
-	}
-
-	return (*Token)(&oauth2.Token{
-		AccessToken:  flowToken.Token,
-		TokenType:    flowToken.Type,
-		RefreshToken: flowToken.RefreshToken,
-		Expiry:       time.Now().Add(time.Duration(expiry) * time.Second),
-	}), nil
-}
-
-func NewTokenSource(ctx context.Context, config *Config, token *Token) TokenSource {
-	return (*oauth2.Config)(config).TokenSource(ctx, (*oauth2.Token)(token))
-}
-
-func NewClient(ctx context.Context, source TokenSource) *http.Client {
-	return oauth2.NewClient(ctx, source)
-}
diff --git a/pkg/config/config.go b/pkg/config/config.go
deleted file mode 100644
index e1d117a..0000000
--- a/pkg/config/config.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package config
-
-import (
-	"github.com/joho/godotenv"
-	"os"
-	"strconv"
-)
-
-func init() {
-	_ = godotenv.Load()
-}
-
-type Config struct {
-	Debug       bool     `json:"debug"`
-	Listen      string   `json:"listen"`
-	Issuer      string   `json:"issuer"`
-	TLS         bool     `json:"tls"`
-	Database    Database `json:"database"`
-	DexDatabase Database `json:"dex_database"`
-	OAuth       OAuth    `json:"oauth"`
-	Clients     []Client `json:"clients"`
-}
-
-type Database struct {
-	Type string `json:"type"`
-	URL  string `json:"url"`
-}
-
-type OAuth struct {
-	GithubOAuth GithubOAuth `json:"github"`
-}
-
-type GithubOAuth struct {
-	Enabled      bool   `json:"enabled"`
-	RedirectURI  string `json:"redirect_uri"`
-	ClientID     string `json:"client_id,omitempty"`
-	ClientSecret string `json:"client_secret,omitempty"`
-}
-
-type Client struct {
-	ID          string `json:"id"`
-	Secret      string `json:"secret"`
-	RedirectURI string `json:"redirect_uri"`
-	Public      bool   `json:"public"`
-	Logo        string `json:"logo"`
-}
-
-func New() Config {
-	debug, err := strconv.ParseBool(os.Getenv("DEBUG"))
-	if err != nil {
-		debug = false
-	}
-
-	listen, exists := os.LookupEnv("LISTEN")
-	if !exists {
-		listen = ":8080"
-	}
-
-	issuer, exists := os.LookupEnv("ISSUER")
-	if err != nil {
-		issuer = "http://localhost:8080"
-	}
-
-	tls, err := strconv.ParseBool(os.Getenv("TLS"))
-	if err != nil {
-		tls = false
-	}
-
-	databaseType, exists := os.LookupEnv("DATABASE_TYPE")
-	if !exists {
-		databaseType = "sqlite3"
-	}
-
-	databaseURL, exists := os.LookupEnv("DATABASE_URL")
-	if !exists {
-		databaseURL = ":memory:?_fk=1"
-	}
-
-	dexDatabaseType, exists := os.LookupEnv("DEX_DATABASE_TYPE")
-	if !exists {
-		dexDatabaseType = "sqlite3"
-	}
-
-	dexDatabaseURL, exists := os.LookupEnv("DEX_DATABASE_URL")
-	if !exists {
-		dexDatabaseURL = ":memory:?_fk=1"
-	}
-
-	oauthGithubEnabled, err := strconv.ParseBool(os.Getenv("OAUTH_GITHUB_ENABLED"))
-	if err != nil {
-		oauthGithubEnabled = false
-	}
-
-	oauthGithubClientID, exists := os.LookupEnv("OAUTH_GITHUB_CLIENT_ID")
-	if !exists {
-		oauthGithubClientID = ""
-	}
-
-	oauthGithubClientSecret, exists := os.LookupEnv("OAUTH_GITHUB_CLIENT_SECRET")
-	if !exists {
-		oauthGithubClientSecret = ""
-	}
-
-	oauthGithubRedirectURI, exists := os.LookupEnv("OAUTH_GITHUB_REDIRECT_URI")
-	if !exists {
-		oauthGithubRedirectURI = ""
-	}
-
-	numClients, err := strconv.Atoi(os.Getenv("NUM_CLIENTS"))
-	if err != nil {
-		numClients = 0
-	}
-
-	clients := make([]Client, numClients)
-	for i := 0; i < numClients; i++ {
-		clients[i] = Client{
-			ID:          os.Getenv("CLIENT_" + strconv.Itoa(i) + "_ID"),
-			Secret:      os.Getenv("CLIENT_" + strconv.Itoa(i) + "_SECRET"),
-			RedirectURI: os.Getenv("CLIENT_" + strconv.Itoa(i) + "_REDIRECT_URI"),
-			Public:      os.Getenv("CLIENT_"+strconv.Itoa(i)+"_PUBLIC") == "true",
-			Logo:        os.Getenv("CLIENT_" + strconv.Itoa(i) + "_LOGO"),
-		}
-	}
-
-	return Config{
-		Debug:  debug,
-		Listen: listen,
-		Issuer: issuer,
-		TLS:    tls,
-		Database: Database{
-			Type: databaseType,
-			URL:  databaseURL,
-		},
-		DexDatabase: Database{
-			Type: dexDatabaseType,
-			URL:  dexDatabaseURL,
-		},
-		OAuth: OAuth{
-			GithubOAuth: GithubOAuth{
-				Enabled:      oauthGithubEnabled,
-				RedirectURI:  oauthGithubRedirectURI,
-				ClientID:     oauthGithubClientID,
-				ClientSecret: oauthGithubClientSecret,
-			},
-		},
-		Clients: clients,
-	}
-}
diff --git a/pkg/database/database.go b/pkg/database/database.go
new file mode 100644
index 0000000..fe63109
--- /dev/null
+++ b/pkg/database/database.go
@@ -0,0 +1,141 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package database
+
+import (
+	"context"
+	"github.com/loopholelabs/auth/internal/ent"
+	"github.com/loopholelabs/auth/internal/ent/deviceflow"
+	"github.com/loopholelabs/auth/internal/ent/githubflow"
+	"github.com/loopholelabs/auth/pkg/provider/github"
+	"github.com/rs/zerolog"
+	"time"
+
+	_ "github.com/lib/pq"
+	_ "github.com/mattn/go-sqlite3"
+)
+
+var _ github.Database = (*Database)(nil)
+
+type Database struct {
+	logger *zerolog.Logger
+	client *ent.Client
+	ctx    context.Context
+	cancel context.CancelFunc
+}
+
+func New(connector string, url string, logger *zerolog.Logger) (*Database, error) {
+	l := logger.With().Str("AUTH", "DATABASE").Logger()
+
+	l.Debug().Msgf("connecting to %s (%s)", url, connector)
+	client, err := ent.Open(connector, url)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+
+	l.Info().Msg("running database migrations")
+	err = client.Schema.Create(ctx)
+	if err != nil {
+		cancel()
+		return nil, err
+	}
+	return &Database{
+		logger: &l,
+		client: client,
+		ctx:    ctx,
+		cancel: cancel,
+	}, nil
+}
+
+func (d *Database) Shutdown() error {
+	if d.cancel != nil {
+		d.cancel()
+	}
+
+	if d.client != nil {
+		err := d.client.Close()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *Database) SetGithubFlow(ctx context.Context, state string, verifier string, challenge string, nextURL string, organization string, deviceIdentifier string) error {
+	d.logger.Debug().Msgf("setting github flow for %s", state)
+	_, err := d.client.GithubFlow.Create().SetState(state).SetVerifier(verifier).SetChallenge(challenge).SetNextURL(nextURL).SetOrganization(organization).SetDeviceIdentifier(deviceIdentifier).Save(ctx)
+	return err
+}
+
+func (d *Database) GetGithubFlow(ctx context.Context, state string) (*ent.GithubFlow, error) {
+	d.logger.Debug().Msgf("getting github flow for %s", state)
+	return d.client.GithubFlow.Query().Where(githubflow.State(state)).Only(ctx)
+}
+
+func (d *Database) DeleteGithubFlow(ctx context.Context, state string) error {
+	d.logger.Debug().Msgf("deleting github flow for %s", state)
+	_, err := d.client.GithubFlow.Delete().Where(githubflow.State(state)).Exec(ctx)
+	return err
+}
+
+func (d *Database) GCGithubFlow(ctx context.Context, expiry time.Duration) (int, error) {
+	d.logger.Debug().Msgf("running github flow gc")
+	return d.client.GithubFlow.Delete().Where(githubflow.CreatedAtLT(time.Now().Add(expiry))).Exec(ctx)
+}
+
+func (d *Database) SetDeviceFlow(ctx context.Context, identifier string, deviceCode string, userCode string) error {
+	d.logger.Debug().Msgf("setting device flow for %s (device code %s, user code %s)", identifier, deviceCode, userCode)
+	_, err := d.client.DeviceFlow.Create().SetIdentifier(identifier).SetDeviceCode(deviceCode).SetUserCode(userCode).Save(ctx)
+	return err
+}
+
+func (d *Database) GetDeviceFlow(ctx context.Context, deviceCode string) (*ent.DeviceFlow, error) {
+	d.logger.Debug().Msgf("getting device flow for device code %s", deviceCode)
+	return d.client.DeviceFlow.Query().Where(deviceflow.DeviceCode(deviceCode)).Only(ctx)
+}
+
+func (d *Database) UpdateDeviceFlow(ctx context.Context, identifier string, session string, expiry time.Time) error {
+	d.logger.Debug().Msgf("updating device flow for %s (expiry %s)", identifier, expiry)
+	_, err := d.client.DeviceFlow.Update().Where(deviceflow.Identifier(identifier)).SetSession(session).SetExpiresAt(expiry).Save(ctx)
+	return err
+}
+
+func (d *Database) GetDeviceFlowUserCode(ctx context.Context, userCode string) (*ent.DeviceFlow, error) {
+	d.logger.Debug().Msgf("getting device flow for user code %s", userCode)
+	flow, err := d.client.DeviceFlow.Query().Where(deviceflow.UserCode(userCode)).Only(ctx)
+	if err != nil {
+		return nil, err
+	}
+	_, err = flow.Update().SetLastPoll(time.Now()).Save(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return flow, nil
+}
+
+func (d *Database) DeleteDeviceFlow(ctx context.Context, deviceCode string) error {
+	d.logger.Debug().Msgf("deleting device flow for device code %s", deviceCode)
+	_, err := d.client.DeviceFlow.Delete().Where(deviceflow.DeviceCode(deviceCode)).Exec(ctx)
+	return err
+}
+
+func (d *Database) GCDeviceFlow(ctx context.Context, expiry time.Duration) (int, error) {
+	d.logger.Debug().Msgf("running device flow gc")
+	return d.client.DeviceFlow.Delete().Where(deviceflow.CreatedAtLT(time.Now().Add(expiry))).Exec(ctx)
+}
diff --git a/pkg/keyset/keyset.go b/pkg/keyset/keyset.go
deleted file mode 100644
index 139987c..0000000
--- a/pkg/keyset/keyset.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package keyset
-
-import (
-	"github.com/dexidp/dex/storage"
-	"gopkg.in/square/go-jose.v2"
-	"sync"
-	"time"
-)
-
-type KeySet struct {
-	storage storage.Storage
-	updater *updater
-	mu      sync.RWMutex
-}
-
-type updater struct {
-	done     chan struct{}
-	keys     []jose.JSONWebKey
-	rotation time.Time
-	err      error
-}
-
-type Verifier interface {
-	Verify(jws *jose.JSONWebSignature) ([]byte, error)
-}
-
-func NewPublic(storage storage.Storage) *Public {
-	return &Public{
-		KeySet: KeySet{
-			storage: storage,
-		},
-	}
-}
-
-func NewPrivate(storage storage.Storage) *Private {
-	return &Private{
-		KeySet: KeySet{
-			storage: storage,
-		},
-	}
-}
-
-func NewRemote(jwksURL string) *Remote {
-	return newRemote(jwksURL, time.Now)
-}
diff --git a/pkg/keyset/private.go b/pkg/keyset/private.go
deleted file mode 100644
index d72b749..0000000
--- a/pkg/keyset/private.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package keyset
-
-import (
-	"errors"
-	"fmt"
-	"gopkg.in/square/go-jose.v2"
-	"time"
-)
-
-type Private struct {
-	KeySet
-	CachedKey    jose.JSONWebKey
-	NextRotation time.Time
-}
-
-func (k *Private) Sign(alg jose.SignatureAlgorithm, payload []byte) (string, error) {
-	var signingKey jose.SigningKey
-	k.mu.Lock()
-	if time.Now().Before(k.NextRotation) {
-		signingKey = jose.SigningKey{Key: &k.CachedKey, Algorithm: alg}
-		k.mu.Unlock()
-	} else {
-		k.mu.Unlock()
-		key, err := k.keyFromStorage()
-		if err != nil {
-			return "", fmt.Errorf("fetching keys %v", err)
-		}
-		signingKey = jose.SigningKey{Key: &key, Algorithm: alg}
-	}
-	signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{})
-	if err != nil {
-		return "", err
-	}
-	signature, err := signer.Sign(payload)
-	if err != nil {
-		return "", err
-	}
-	return signature.CompactSerialize()
-}
-
-func (k *Private) keyFromStorage() (jose.JSONWebKey, error) {
-	k.mu.Lock()
-
-	if k.updater == nil {
-		k.updater = &updater{
-			done: make(chan struct{}),
-		}
-
-		go func() {
-			key, rotation, err := k.updateKey()
-			k.updater.keys = []jose.JSONWebKey{key}
-			k.updater.rotation = rotation
-			k.updater.err = err
-			close(k.updater.done)
-
-			k.mu.Lock()
-			defer k.mu.Unlock()
-
-			if err == nil {
-				k.CachedKey = key
-				k.NextRotation = rotation
-			}
-
-			k.updater = nil
-		}()
-	}
-	updater := k.updater
-	k.mu.Unlock()
-
-	t := time.NewTimer(time.Second * 30)
-
-	select {
-	case <-t.C:
-		t.Stop()
-		return jose.JSONWebKey{}, errors.New("updating keys timed out")
-	case <-updater.done:
-		t.Stop()
-		return updater.keys[0], updater.err
-	}
-}
-
-func (k *Private) updateKey() (jose.JSONWebKey, time.Time, error) {
-	keys, err := k.storage.GetKeys()
-	if err != nil {
-		return jose.JSONWebKey{}, time.Time{}, err
-	}
-
-	if keys.SigningKey == nil {
-		return jose.JSONWebKey{}, time.Time{}, errors.New("no signing key found")
-	}
-
-	return *keys.SigningKey, keys.NextRotation, nil
-}
diff --git a/pkg/keyset/public.go b/pkg/keyset/public.go
deleted file mode 100644
index 6cd5ffa..0000000
--- a/pkg/keyset/public.go
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package keyset
-
-import (
-	"errors"
-	"fmt"
-	"gopkg.in/square/go-jose.v2"
-	"time"
-)
-
-type Public struct {
-	KeySet
-	CachedKeys []jose.JSONWebKey
-}
-
-func (k *Public) Verify(jws *jose.JSONWebSignature) ([]byte, error) {
-	// We don't support JWTs signed with multiple signatures.
-	keyID := ""
-	for _, sig := range jws.Signatures {
-		keyID = sig.Header.KeyID
-		break
-	}
-
-	keys := k.keysFromCache()
-	for _, key := range keys {
-		if keyID == "" || key.KeyID == keyID {
-			if payload, err := jws.Verify(&key); err == nil {
-				return payload, nil
-			}
-		}
-	}
-
-	// If the kid doesn't match, check for new keys from the remote. This is the
-	// strategy recommended by the spec.
-	//
-	// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
-	keys, err := k.keysFromStorage()
-	if err != nil {
-		return nil, fmt.Errorf("fetching keys %v", err)
-	}
-
-	for _, key := range keys {
-		if keyID == "" || key.KeyID == keyID {
-			if payload, err := jws.Verify(&key); err == nil {
-				return payload, nil
-			}
-		}
-	}
-	return nil, errors.New("failed to verify token signature")
-}
-
-func (k *Public) keysFromCache() (keys []jose.JSONWebKey) {
-	k.mu.RLock()
-	defer k.mu.RUnlock()
-	return k.CachedKeys
-}
-
-func (k *Public) keysFromStorage() ([]jose.JSONWebKey, error) {
-	k.mu.Lock()
-
-	if k.updater == nil {
-		k.updater = &updater{
-			done: make(chan struct{}),
-		}
-
-		go func() {
-			keys, err := k.updateKeys()
-			k.updater.keys = keys
-			k.updater.err = err
-			close(k.updater.done)
-
-			k.mu.Lock()
-			defer k.mu.Unlock()
-
-			if err == nil {
-				k.CachedKeys = keys
-			}
-
-			k.updater = nil
-		}()
-	}
-	updater := k.updater
-	k.mu.Unlock()
-
-	t := time.NewTimer(time.Second * 30)
-
-	select {
-	case <-t.C:
-		t.Stop()
-		return nil, errors.New("updating keys timed out")
-	case <-updater.done:
-		t.Stop()
-		return updater.keys, updater.err
-	}
-}
-
-func (k *Public) updateKeys() ([]jose.JSONWebKey, error) {
-	keys, err := k.storage.GetKeys()
-	if err != nil {
-		return nil, err
-	}
-
-	if keys.SigningKeyPub == nil {
-		return nil, errors.New("no public key found")
-	}
-
-	jwks := jose.JSONWebKeySet{
-		Keys: make([]jose.JSONWebKey, len(keys.VerificationKeys)+1),
-	}
-	jwks.Keys[0] = *keys.SigningKeyPub
-	for i, verificationKey := range keys.VerificationKeys {
-		jwks.Keys[i+1] = *verificationKey.PublicKey
-	}
-	return jwks.Keys, nil
-}
diff --git a/pkg/keyset/remote.go b/pkg/keyset/remote.go
deleted file mode 100644
index 792d16a..0000000
--- a/pkg/keyset/remote.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package keyset
-
-import (
-	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"golang.org/x/oauth2"
-	"gopkg.in/square/go-jose.v2"
-	"io"
-	"mime"
-	"net/http"
-	"sync"
-	"time"
-)
-
-func newRemote(jwksURL string, now func() time.Time) *Remote {
-	if now == nil {
-		now = time.Now
-	}
-	return &Remote{jwksURL: jwksURL, ctx: context.Background(), now: now}
-}
-
-// Remote is a KeySet implementation that validates JSON web tokens against
-// a jwks_uri endpoint.
-type Remote struct {
-	jwksURL    string
-	ctx        context.Context
-	now        func() time.Time
-	mu         sync.RWMutex
-	inflight   *inflight
-	cachedKeys []jose.JSONWebKey
-}
-
-type inflight struct {
-	doneCh chan struct{}
-	keys   []jose.JSONWebKey
-	err    error
-}
-
-func newInflight() *inflight {
-	return &inflight{doneCh: make(chan struct{})}
-}
-
-// wait returns a channel that multiple goroutines can receive on. Once it returns
-// a value, the inflight request is done and result() can be inspected.
-func (i *inflight) wait() <-chan struct{} {
-	return i.doneCh
-}
-
-// done can only be called by a single goroutine. It records the result of the
-// inflight request and signals other goroutines that the result is safe to
-// inspect.
-func (i *inflight) done(keys []jose.JSONWebKey, err error) {
-	i.keys = keys
-	i.err = err
-	close(i.doneCh)
-}
-
-// result cannot be called until the wait() channel has returned a value.
-func (i *inflight) result() ([]jose.JSONWebKey, error) {
-	return i.keys, i.err
-}
-
-func (r *Remote) Verify(jws *jose.JSONWebSignature) ([]byte, error) {
-	// We don't support JWTs signed with multiple signatures.
-	keyID := ""
-	for _, sig := range jws.Signatures {
-		keyID = sig.Header.KeyID
-		break
-	}
-
-	keys := r.keysFromCache()
-	for _, key := range keys {
-		if keyID == "" || key.KeyID == keyID {
-			if payload, err := jws.Verify(&key); err == nil {
-				return payload, nil
-			}
-		}
-	}
-
-	// If the kid doesn't match, check for new keys from the remote. This is the
-	// strategy recommended by the spec.
-	//
-	// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
-	keys, err := r.keysFromRemote(r.ctx)
-	if err != nil {
-		return nil, fmt.Errorf("fetching keys %v", err)
-	}
-
-	for _, key := range keys {
-		if keyID == "" || key.KeyID == keyID {
-			if payload, err := jws.Verify(&key); err == nil {
-				return payload, nil
-			}
-		}
-	}
-	return nil, errors.New("failed to verify id token signature")
-}
-
-func (r *Remote) keysFromCache() (keys []jose.JSONWebKey) {
-	r.mu.RLock()
-	defer r.mu.RUnlock()
-	return r.cachedKeys
-}
-
-// keysFromRemote syncs the key set from the remote set, records the values in the
-// cache, and returns the key set.
-func (r *Remote) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
-	// Need to lock to inspect the inflight request field.
-	r.mu.Lock()
-	// If there's not a current inflight request, create one.
-	if r.inflight == nil {
-		r.inflight = newInflight()
-
-		// This goroutine has exclusive ownership over the current inflight
-		// request. It releases the resource by nil'ing the inflight field
-		// once the goroutine is done.
-		go func() {
-			// Sync keys and finish inflight when that's done.
-			keys, err := r.updateKeys()
-
-			r.inflight.done(keys, err)
-
-			// Lock to update the keys and indicate that there is no longer an
-			// inflight request.
-			r.mu.Lock()
-			defer r.mu.Unlock()
-
-			if err == nil {
-				r.cachedKeys = keys
-			}
-
-			// Free inflight so a different request can run.
-			r.inflight = nil
-		}()
-	}
-	inflight := r.inflight
-	r.mu.Unlock()
-
-	select {
-	case <-ctx.Done():
-		return nil, ctx.Err()
-	case <-inflight.wait():
-		return inflight.result()
-	}
-}
-
-func (r *Remote) updateKeys() ([]jose.JSONWebKey, error) {
-	req, err := http.NewRequest("GET", r.jwksURL, nil)
-	if err != nil {
-		return nil, fmt.Errorf("oidc: can't create request: %v", err)
-	}
-
-	resp, err := doRequest(r.ctx, req)
-	if err != nil {
-		return nil, fmt.Errorf("oidc: get keys failed %v", err)
-	}
-	defer func() {
-		_ = resp.Body.Close()
-	}()
-
-	body, err := io.ReadAll(resp.Body)
-	if err != nil {
-		return nil, fmt.Errorf("unable to read response body: %v", err)
-	}
-
-	if resp.StatusCode != http.StatusOK {
-		return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
-	}
-
-	var keySet jose.JSONWebKeySet
-	err = unmarshalResp(resp, body, &keySet)
-	if err != nil {
-		return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
-	}
-	return keySet.Keys, nil
-}
-
-func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
-	client := http.DefaultClient
-	if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
-		client = c
-	}
-	return client.Do(req.WithContext(ctx))
-}
-
-func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
-	err := json.Unmarshal(body, &v)
-	if err == nil {
-		return nil
-	}
-	ct := r.Header.Get("Content-Type")
-	mediaType, _, parseErr := mime.ParseMediaType(ct)
-	if parseErr == nil && mediaType == "application/json" {
-		return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
-	}
-	return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
-}
diff --git a/pkg/token/tokenKind/tokenKind.go b/pkg/kind/kind.go
similarity index 69%
rename from pkg/token/tokenKind/tokenKind.go
rename to pkg/kind/kind.go
index 170cc13..759435d 100644
--- a/pkg/token/tokenKind/tokenKind.go
+++ b/pkg/kind/kind.go
@@ -1,5 +1,5 @@
 /*
-	Copyright 2022 Loophole Labs
+	Copyright 2023 Loophole Labs
 
 	Licensed under the Apache License, Version 2.0 (the "License");
 	you may not use this file except in compliance with the License.
@@ -14,18 +14,11 @@
 	limitations under the License.
 */
 
-package tokenKind
+package kind
 
 type Kind string
 
-var LUT = map[string]Kind{
-	"A": APITokenKind,
-	"S": ServiceTokenKind,
-}
-
 const (
-	OAuthKind        Kind = "oauth"
-	APITokenKind     Kind = "api"
-	ServiceTokenKind Kind = "service"
-	RefreshTokenKind Kind = "refresh"
+	Default Kind = "default"
+	Device  Kind = "device"
 )
diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go
new file mode 100644
index 0000000..b6bc50c
--- /dev/null
+++ b/pkg/manager/manager.go
@@ -0,0 +1,699 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package manager
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/gofiber/fiber/v2"
+	"github.com/loopholelabs/auth"
+	"github.com/loopholelabs/auth/internal/aes"
+	"github.com/loopholelabs/auth/pkg/apikey"
+	"github.com/loopholelabs/auth/pkg/claims"
+	"github.com/loopholelabs/auth/pkg/kind"
+	"github.com/loopholelabs/auth/pkg/provider"
+	"github.com/loopholelabs/auth/pkg/servicekey"
+	"github.com/loopholelabs/auth/pkg/session"
+	"github.com/loopholelabs/auth/pkg/storage"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+	"golang.org/x/crypto/bcrypt"
+	"sync"
+	"time"
+)
+
+const (
+	CookieKeyString           = "auth-session"
+	AuthorizationHeaderString = "Authorization"
+	BearerHeaderString        = "Bearer "
+	KeyDelimiterString        = "."
+)
+
+var (
+	CookieKey           = []byte(CookieKeyString)
+	AuthorizationHeader = []byte(AuthorizationHeaderString)
+	BearerHeader        = []byte(BearerHeaderString)
+	KeyDelimiter        = []byte(KeyDelimiterString)
+)
+
+type Manager struct {
+	logger  *zerolog.Logger
+	storage storage.Storage
+	domain  string
+	tls     bool
+	wg      sync.WaitGroup
+	ctx     context.Context
+	cancel  context.CancelFunc
+
+	secretKey    []byte
+	oldSecretKey []byte
+	secretKeyMu  sync.RWMutex
+
+	registration   bool
+	registrationMu sync.RWMutex
+
+	sessions   map[string]struct{}
+	sessionsMu sync.RWMutex
+
+	servicekeySessions   map[string]*servicekey.Session
+	servicekeySessionsMu sync.RWMutex
+
+	apikeys   map[string]*apikey.APIKey
+	apikeysMu sync.RWMutex
+}
+
+func New(domain string, tls bool, storage storage.Storage, logger *zerolog.Logger) *Manager {
+	l := logger.With().Str("AUTH", "SESSION-MANAGER").Logger()
+	ctx, cancel := context.WithCancel(context.Background())
+	return &Manager{
+		logger:             &l,
+		storage:            storage,
+		domain:             domain,
+		tls:                tls,
+		ctx:                ctx,
+		cancel:             cancel,
+		sessions:           make(map[string]struct{}),
+		servicekeySessions: make(map[string]*servicekey.Session),
+		apikeys:            make(map[string]*apikey.APIKey),
+	}
+}
+
+func (m *Manager) Start() error {
+	m.logger.Info().Msg("starting manager")
+
+	m.secretKeyMu.Lock()
+	secretKeyEvents, err := m.storage.SubscribeToSecretKey(m.ctx)
+	if err != nil {
+		m.secretKeyMu.Unlock()
+		return fmt.Errorf("failed to subscribe to secret key events: %w", err)
+	}
+	m.wg.Add(1)
+	go m.subscribeToSecretKeyEvents(secretKeyEvents)
+	m.logger.Info().Msg("subscribed to secret key events")
+	m.secretKey, err = m.storage.GetSecretKey(m.ctx)
+	if err != nil {
+		if errors.Is(err, storage.ErrNotFound) {
+			m.logger.Info().Msg("no secret key found, generating new one")
+			m.secretKey = utils.RandomBytes(32)
+			err = m.storage.SetSecretKey(m.ctx, m.secretKey)
+			m.secretKeyMu.Unlock()
+			if err != nil {
+				return fmt.Errorf("failed to set secret key: %w", err)
+			}
+		} else {
+			m.secretKeyMu.Unlock()
+			return fmt.Errorf("failed to get secret key: %w", err)
+		}
+	} else {
+		m.secretKeyMu.Unlock()
+	}
+	m.logger.Info().Msg("retrieved secret key")
+
+	m.registrationMu.Lock()
+	registrationEvents, err := m.storage.SubscribeToRegistration(m.ctx)
+	if err != nil {
+		m.registrationMu.Unlock()
+		return fmt.Errorf("failed to subscribe to registration events: %w", err)
+	}
+	m.wg.Add(1)
+	go m.subscribeToRegistrationEvents(registrationEvents)
+	m.logger.Info().Msg("subscribed to registration events")
+	m.registration, err = m.storage.GetRegistration(m.ctx)
+	m.registrationMu.Unlock()
+	if err != nil {
+		return fmt.Errorf("failed to get registration: %w", err)
+	}
+	m.logger.Info().Msg("retrieved registration")
+
+	m.sessionsMu.Lock()
+	sessionEvents, err := m.storage.SubscribeToSessionIDs(m.ctx)
+	if err != nil {
+		m.sessionsMu.Unlock()
+		return fmt.Errorf("failed to subscribe to session events: %w", err)
+	}
+	m.wg.Add(1)
+	go m.subscribeToSessionIDEvents(sessionEvents)
+	m.logger.Info().Msg("subscribed to session ID events")
+	sessions, err := m.storage.ListSessionIDs(m.ctx)
+	if err != nil {
+		m.sessionsMu.Unlock()
+		return fmt.Errorf("failed to list session IDs: %w", err)
+	}
+	for _, sess := range sessions {
+		m.sessions[sess] = struct{}{}
+	}
+	m.sessionsMu.Unlock()
+	m.logger.Info().Msg("retrieved session IDs")
+
+	m.servicekeySessionsMu.Lock()
+	servicekeySessionEvents, err := m.storage.SubscribeToServiceKeySessions(m.ctx)
+	if err != nil {
+		m.servicekeySessionsMu.Unlock()
+		return fmt.Errorf("failed to subscribe to service key session events: %w", err)
+	}
+	m.wg.Add(1)
+	go m.subscribeToServiceKeySessionEvents(servicekeySessionEvents)
+	m.logger.Info().Msg("subscribed to service key session events")
+	servicekeySessionIDs, err := m.storage.ListServiceKeySessions(m.ctx)
+	if err != nil {
+		m.servicekeySessionsMu.Unlock()
+		return fmt.Errorf("failed to list service key session IDs: %w", err)
+	}
+	for _, sess := range servicekeySessionIDs {
+		m.servicekeySessions[sess.ID] = sess
+	}
+	m.servicekeySessionsMu.Unlock()
+	m.logger.Info().Msg("retrieved service key sessions")
+
+	m.apikeysMu.Lock()
+	apikeyEvents, err := m.storage.SubscribeToAPIKeys(m.ctx)
+	if err != nil {
+		m.apikeysMu.Unlock()
+		return fmt.Errorf("failed to subscribe to api key events: %w", err)
+	}
+	m.wg.Add(1)
+	go m.subscribeToAPIKeyEvents(apikeyEvents)
+	m.logger.Info().Msg("subscribed to api key events")
+	apikeys, err := m.storage.ListAPIKeys(m.ctx)
+	if err != nil {
+		m.apikeysMu.Unlock()
+		return fmt.Errorf("failed to list api keys: %w", err)
+	}
+	for _, key := range apikeys {
+		m.apikeys[key.ID] = key
+	}
+	m.apikeysMu.Unlock()
+	m.logger.Info().Msg("retrieved api keys")
+
+	return nil
+}
+
+func (m *Manager) Stop() error {
+	m.logger.Info().Msg("stopping manager")
+	m.cancel()
+	m.wg.Wait()
+	return nil
+}
+
+func (m *Manager) GenerateCookie(session string, expiry time.Time) *fiber.Cookie {
+	m.logger.Debug().Msgf("generating cookie with expiry %s", expiry)
+	return &fiber.Cookie{
+		Name:     CookieKeyString,
+		Value:    session,
+		Domain:   m.domain,
+		Expires:  expiry,
+		Secure:   m.tls,
+		HTTPOnly: true,
+		SameSite: fiber.CookieSameSiteLaxMode,
+	}
+}
+
+func (m *Manager) CreateSession(ctx *fiber.Ctx, kind kind.Kind, provider provider.Key, userID string, organization string) (*fiber.Cookie, error) {
+	m.logger.Debug().Msgf("creating session for user %s (org '%s')", userID, organization)
+	exists, err := m.storage.UserExists(ctx.Context(), userID)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to check if user exists")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if user exists")
+	}
+
+	if !exists {
+		if organization != "" {
+			return nil, ctx.Status(fiber.StatusNotFound).SendString("user does not exist")
+		}
+
+		m.logger.Debug().Msgf("user %s does not exist", userID)
+		m.registrationMu.RLock()
+		registration := m.registration
+		m.registrationMu.RUnlock()
+
+		if !registration {
+			return nil, ctx.Status(fiber.StatusNotFound).SendString("user does not exist")
+		}
+
+		m.logger.Debug().Msgf("creating user %s", userID)
+
+		c := &claims.Claims{
+			UserID: userID,
+		}
+
+		err = m.storage.NewUser(ctx.Context(), c)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to create user")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to create user")
+		}
+	}
+
+	if organization != "" {
+		m.logger.Debug().Msgf("checking if user %s is member of organization %s", userID, organization)
+		exists, err = m.storage.UserOrganizationExists(ctx.Context(), userID, organization)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to check if organization exists")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if organization exists")
+		}
+
+		if !exists {
+			return nil, ctx.Status(fiber.StatusForbidden).SendString("invalid organization")
+		}
+	}
+
+	sess := session.New(kind, provider, userID, organization)
+	data, err := json.Marshal(sess)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to marshal session")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to marshal session")
+	}
+
+	m.secretKeyMu.RLock()
+	secretKey := m.secretKey
+	m.secretKeyMu.RUnlock()
+
+	encrypted, err := aes.Encrypt(secretKey, CookieKey, data)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to encrypt session")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to encrypt session")
+	}
+
+	err = m.storage.SetSession(ctx.Context(), sess)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to set session")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to set session")
+	}
+
+	m.logger.Debug().Msgf("created session %s for user %s (org '%s') with expiry %s", sess.ID, sess.UserID, sess.Organization, sess.Expiry)
+
+	return m.GenerateCookie(encrypted, sess.Expiry), nil
+}
+
+func (m *Manager) GetSession(ctx *fiber.Ctx, cookie string) (*session.Session, error) {
+	m.secretKeyMu.RLock()
+	secretKey := m.secretKey
+	oldSecretKey := m.oldSecretKey
+	m.secretKeyMu.RUnlock()
+
+	oldSecretKeyUsed := false
+	decrypted, err := aes.Decrypt(secretKey, CookieKey, cookie)
+	if err != nil {
+		if errors.Is(err, aes.ErrInvalidContent) {
+			if oldSecretKey != nil {
+				decrypted, err = aes.Decrypt(oldSecretKey, CookieKey, cookie)
+				if err != nil {
+					if errors.Is(err, aes.ErrInvalidContent) {
+						return nil, ctx.Status(fiber.StatusUnauthorized).SendString("invalid session cookie")
+					}
+					m.logger.Error().Err(err).Msg("failed to decrypt session with old secret key")
+					return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to decrypt session")
+				}
+				oldSecretKeyUsed = true
+			} else {
+				return nil, ctx.Status(fiber.StatusUnauthorized).SendString("invalid session cookie")
+			}
+		} else {
+			m.logger.Error().Err(err).Msg("failed to decrypt session")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to decrypt session")
+		}
+	}
+
+	sess := new(session.Session)
+	err = json.Unmarshal(decrypted, sess)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to unmarshal session")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to unmarshal session")
+	}
+
+	if sess.Expired() {
+		return nil, ctx.Status(fiber.StatusUnauthorized).SendString("session expired")
+	}
+
+	m.sessionsMu.RLock()
+	_, exists := m.sessions[sess.ID]
+	m.sessionsMu.RUnlock()
+	if !exists {
+		exists, err = m.storage.SessionIDExists(ctx.Context(), sess.ID)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to check if session exists")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if session exists")
+		}
+		if !exists {
+			return nil, ctx.Status(fiber.StatusUnauthorized).SendString("session does not exist")
+		}
+	}
+
+	if oldSecretKeyUsed || sess.CloseToExpiry() {
+		sess.Refresh()
+		data, err := json.Marshal(sess)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to marshal refreshed session")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to marshal session")
+		}
+
+		encrypted, err := aes.Encrypt(secretKey, CookieKey, data)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to encrypt refreshed session")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to encrypt session")
+		}
+
+		err = m.storage.SetSession(ctx.Context(), sess)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to set refreshed session")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to set session")
+		}
+
+		ctx.Cookie(m.GenerateCookie(encrypted, sess.Expiry))
+	}
+
+	return sess, nil
+}
+
+func (m *Manager) GetAPIKey(ctx *fiber.Ctx, keyID string, keySecret []byte) (*apikey.APIKey, error) {
+	m.apikeysMu.RLock()
+	key, ok := m.apikeys[keyID]
+	m.apikeysMu.RUnlock()
+	if !ok {
+		var err error
+		key, err = m.storage.GetAPIKey(ctx.Context(), keyID)
+		if err != nil {
+			if errors.Is(err, storage.ErrNotFound) {
+				return nil, ctx.Status(fiber.StatusUnauthorized).SendString("api key does not exist")
+			}
+			m.logger.Error().Err(err).Msg("failed to check if api key exists")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if api key exists")
+		}
+	}
+
+	if bcrypt.CompareHashAndPassword(keySecret, key.Hash) != nil {
+		return nil, ctx.Status(fiber.StatusUnauthorized).SendString("invalid api key")
+	}
+
+	return key, nil
+}
+
+func (m *Manager) GetServiceKey(ctx *fiber.Ctx, keyID string, keySecret []byte) (*servicekey.ServiceKey, error) {
+	key, err := m.storage.GetServiceKey(ctx.Context(), keyID)
+	if err != nil {
+		if errors.Is(err, storage.ErrNotFound) {
+			return nil, ctx.Status(fiber.StatusUnauthorized).SendString("service key does not exist")
+		}
+		m.logger.Error().Err(err).Msg("failed to check if service key exists")
+		return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if service key exists")
+	}
+
+	if bcrypt.CompareHashAndPassword(keySecret, key.Hash) != nil {
+		return nil, ctx.Status(fiber.StatusUnauthorized).SendString("invalid service key")
+	}
+
+	return key, nil
+}
+
+func (m *Manager) CreateServiceKeySession(ctx *fiber.Ctx, keyID string, keySecret []byte) (*servicekey.Session, []byte, error) {
+	serviceKey, err := m.GetServiceKey(ctx, keyID, keySecret)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if !serviceKey.Expires.IsZero() && time.Now().After(serviceKey.Expires) {
+		return nil, nil, ctx.Status(fiber.StatusUnauthorized).SendString("service key expired")
+	}
+
+	if serviceKey.MaxUses != 0 && serviceKey.NumUsed >= serviceKey.MaxUses {
+		return nil, nil, ctx.Status(fiber.StatusUnauthorized).SendString("service key has reached its maximum uses")
+	}
+
+	err = m.storage.IncrementServiceKeyNumUsed(ctx.Context(), serviceKey.ID)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to increment service key num used")
+		return nil, nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to increment service key num used")
+	}
+
+	sess, secret, err := servicekey.NewSession(serviceKey)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to create service key session")
+		return nil, nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to create service key session")
+	}
+
+	err = m.storage.SetServiceKeySession(ctx.Context(), sess)
+	if err != nil {
+		m.logger.Error().Err(err).Msg("failed to set service key session")
+		return nil, nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to set service key session")
+	}
+
+	m.logger.Debug().Msgf("created service key session %s for user %s (org '%s')", sess.ID, sess.UserID, sess.Organization)
+
+	return sess, secret, nil
+}
+
+func (m *Manager) GetServiceKeySession(ctx *fiber.Ctx, sessionID string, sessionSecret []byte) (*servicekey.Session, error) {
+	m.servicekeySessionsMu.RLock()
+	sess, ok := m.servicekeySessions[sessionID]
+	m.servicekeySessionsMu.RUnlock()
+	if !ok {
+		var err error
+		sess, err = m.storage.GetServiceKeySession(ctx.Context(), sessionID)
+		if err != nil {
+			if errors.Is(err, storage.ErrNotFound) {
+				return nil, ctx.Status(fiber.StatusUnauthorized).SendString("service key session does not exist")
+			}
+			m.logger.Error().Err(err).Msg("failed to check if service key session exists")
+			return nil, ctx.Status(fiber.StatusInternalServerError).SendString("failed to check if service key session exists")
+		}
+	}
+
+	if bcrypt.CompareHashAndPassword(sessionSecret, sess.Hash) != nil {
+		return nil, ctx.Status(fiber.StatusUnauthorized).SendString("invalid service key session")
+	}
+
+	return sess, nil
+}
+
+func (m *Manager) Validate(ctx *fiber.Ctx) error {
+	cookie := ctx.Cookies(CookieKeyString)
+	if cookie != "" {
+		sess, err := m.GetSession(ctx, cookie)
+		if sess == nil {
+			return err
+		}
+
+		ctx.Locals(auth.KindContextKey, auth.KindSession)
+		ctx.Locals(auth.SessionContextKey, sess)
+		ctx.Locals(auth.UserContextKey, sess.UserID)
+		ctx.Locals(auth.OrganizationContextKey, sess.Organization)
+		return ctx.Next()
+	}
+
+	authHeader := ctx.Request().Header.PeekBytes(AuthorizationHeader)
+	if len(authHeader) > len(BearerHeader) {
+		if !bytes.Equal(authHeader[:len(BearerHeader)], BearerHeader) {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("invalid authorization header")
+		}
+		authHeader = authHeader[len(BearerHeader):]
+		keySplit := bytes.Split(authHeader, KeyDelimiter)
+		if len(keySplit) != 2 {
+			return ctx.Status(fiber.StatusUnauthorized).SendString("invalid authorization header")
+		}
+
+		keyID := string(keySplit[0])
+		keySecret := keySplit[1]
+
+		if bytes.HasPrefix(authHeader, auth.APIKeyPrefix) {
+			key, err := m.GetAPIKey(ctx, keyID, keySecret)
+			if key == nil {
+				return err
+			}
+
+			ctx.Locals(auth.KindContextKey, auth.KindAPIKey)
+			ctx.Locals(auth.APIKeyContextKey, key)
+			ctx.Locals(auth.UserContextKey, key.UserID)
+			ctx.Locals(auth.OrganizationContextKey, key.Organization)
+			return ctx.Next()
+		}
+
+		if bytes.HasPrefix(authHeader, auth.ServiceKeySessionPrefix) {
+			key, err := m.GetServiceKeySession(ctx, keyID, keySecret)
+			if key == nil {
+				return err
+			}
+
+			ctx.Locals(auth.KindContextKey, auth.KindServiceKey)
+			ctx.Locals(auth.ServiceKeySessionContextKey, key)
+			ctx.Locals(auth.UserContextKey, key.UserID)
+			ctx.Locals(auth.OrganizationContextKey, key.Organization)
+			return ctx.Next()
+		}
+	}
+
+	return ctx.Status(fiber.StatusUnauthorized).SendString("no valid session cookie or authorization header")
+}
+
+func (m *Manager) LogoutSession(ctx *fiber.Ctx) error {
+	cookie := ctx.Cookies(CookieKeyString)
+	if cookie != "" {
+		err := m.storage.DeleteSession(ctx.Context(), cookie)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to delete session")
+			return ctx.Status(fiber.StatusInternalServerError).SendString("failed to delete session")
+		}
+	}
+
+	ctx.ClearCookie(CookieKeyString)
+	return nil
+}
+
+func (m *Manager) LogoutServiceKeySession(ctx *fiber.Ctx) error {
+	authHeader := ctx.Request().Header.PeekBytes(AuthorizationHeader)
+	if len(authHeader) > len(BearerHeader) {
+		if !bytes.Equal(authHeader[:len(BearerHeader)], BearerHeader) {
+			return nil
+		}
+
+		authHeader = authHeader[len(BearerHeader):]
+		if !bytes.HasPrefix(authHeader, auth.ServiceKeySessionPrefix) {
+			return nil
+		}
+
+		keySplit := bytes.Split(authHeader, KeyDelimiter)
+		if len(keySplit) != 2 {
+			return nil
+		}
+
+		keyID := string(keySplit[0])
+		keySecret := keySplit[1]
+
+		sess, err := m.GetServiceKeySession(ctx, keyID, keySecret)
+		if sess == nil {
+			return err
+		}
+
+		err = m.storage.DeleteServiceKeySession(ctx.Context(), sess.ID)
+		if err != nil {
+			m.logger.Error().Err(err).Msg("failed to delete service key session")
+			return ctx.Status(fiber.StatusInternalServerError).SendString("failed to delete service key session")
+		}
+	}
+	return nil
+}
+
+func (m *Manager) subscribeToSecretKeyEvents(events <-chan *storage.SecretKeyEvent) {
+	defer m.wg.Done()
+	for {
+		select {
+		case <-m.ctx.Done():
+			m.logger.Info().Msg("secret key event subscription stopped")
+			return
+		case event := <-events:
+			m.logger.Info().Msg("secret key updated")
+			m.secretKeyMu.Lock()
+			m.oldSecretKey = m.secretKey
+			m.secretKey = event.SecretKey
+			m.secretKeyMu.Unlock()
+		}
+	}
+}
+
+func (m *Manager) subscribeToRegistrationEvents(events <-chan *storage.RegistrationEvent) {
+	defer m.wg.Done()
+	for {
+		select {
+		case <-m.ctx.Done():
+			m.logger.Info().Msg("registration event subscription stopped")
+			return
+		case event := <-events:
+			m.logger.Info().Msg("registration updated")
+			m.registrationMu.Lock()
+			m.registration = event.Enabled
+			m.registrationMu.Unlock()
+		}
+	}
+}
+
+func (m *Manager) subscribeToSessionIDEvents(events <-chan *storage.SessionEvent) {
+	defer m.wg.Done()
+	for {
+		select {
+		case <-m.ctx.Done():
+			m.logger.Info().Msg("session event subscription stopped")
+			return
+		case event := <-events:
+			if event.Deleted {
+				m.logger.Debug().Msgf("session %s deleted", event.SessionID)
+				m.sessionsMu.Lock()
+				delete(m.sessions, event.SessionID)
+				m.sessionsMu.Unlock()
+			} else {
+				m.logger.Debug().Msgf("session %s created", event.SessionID)
+				m.sessionsMu.Lock()
+				m.sessions[event.SessionID] = struct{}{}
+				m.sessionsMu.Unlock()
+			}
+		}
+	}
+}
+
+func (m *Manager) subscribeToAPIKeyEvents(events <-chan *storage.APIKeyEvent) {
+	defer m.wg.Done()
+	for {
+		select {
+		case <-m.ctx.Done():
+			m.logger.Info().Msg("api key event subscription stopped")
+			return
+		case event := <-events:
+			if event.Deleted {
+				m.logger.Debug().Msgf("api key %s deleted", event.APIKeyID)
+				m.apikeysMu.Lock()
+				delete(m.apikeys, event.APIKeyID)
+				m.apikeysMu.Unlock()
+			} else {
+				m.logger.Debug().Msgf("api key %s created or updated", event.APIKeyID)
+				if event.APIKey == nil {
+					m.logger.Error().Msgf("api key in create or update event for api key ID %s is nil", event.APIKeyID)
+				} else {
+					m.apikeysMu.Lock()
+					m.apikeys[event.APIKeyID] = event.APIKey
+					m.apikeysMu.Unlock()
+				}
+			}
+		}
+	}
+}
+
+func (m *Manager) subscribeToServiceKeySessionEvents(events <-chan *storage.ServiceKeySessionEvent) {
+	defer m.wg.Done()
+	for {
+		select {
+		case <-m.ctx.Done():
+			m.logger.Info().Msg("service key session event subscription stopped")
+			return
+		case event := <-events:
+			if event.Deleted {
+				m.logger.Debug().Msgf("service key session %s deleted", event.ServiceKeySessionID)
+				m.servicekeySessionsMu.Lock()
+				delete(m.servicekeySessions, event.ServiceKeySessionID)
+				m.servicekeySessionsMu.Unlock()
+			} else {
+				m.logger.Debug().Msgf("service key session %s created or updated", event.ServiceKeySessionID)
+				if event.ServiceKeySession == nil {
+					m.logger.Error().Msgf("service key session in create or update event for service key session ID %s is nil", event.ServiceKeySessionID)
+				} else {
+					m.servicekeySessionsMu.Lock()
+					m.servicekeySessions[event.ServiceKeySessionID] = event.ServiceKeySession
+					m.servicekeySessionsMu.Unlock()
+				}
+			}
+		}
+	}
+}
diff --git a/pkg/options/options.go b/pkg/options/options.go
deleted file mode 100644
index c9a24c4..0000000
--- a/pkg/options/options.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package options
-
-import (
-	"errors"
-	"github.com/dexidp/dex/pkg/log"
-	"github.com/dexidp/dex/server"
-	"github.com/dexidp/dex/web"
-	"github.com/loopholelabs/auth/pkg/storage"
-	"github.com/loopholelabs/auth/pkg/token/identity"
-)
-
-var (
-	InvalidIssuerError        = errors.New("invalid issuer")
-	InvalidAllowsOriginsError = errors.New("invalid allowed origins")
-	InvalidStorageError       = errors.New("invalid storage")
-	InvalidLoggerError        = errors.New("invalid logger")
-	InvalidNewUserError       = errors.New("invalid new user callback")
-)
-
-type Options struct {
-	Issuer         string
-	AllowedOrigins []string
-
-	Storage   storage.Storage
-	Logger    log.Logger
-	WebConfig *server.WebConfig
-
-	Enabled      func() bool
-	Registration func() bool
-	NewUser      func(claims *identity.IDToken) error
-}
-
-func (o *Options) Validate() error {
-	if o.Issuer == "" {
-		return InvalidIssuerError
-	}
-
-	if len(o.AllowedOrigins) == 0 {
-		return InvalidAllowsOriginsError
-	}
-
-	if o.Storage == nil {
-		return InvalidStorageError
-	}
-
-	if o.Logger == nil {
-		return InvalidLoggerError
-	}
-
-	if o.WebConfig == nil {
-		o.WebConfig = &server.WebConfig{
-			WebFS: web.FS(),
-		}
-	}
-
-	if o.Enabled == nil {
-		o.Enabled = func() bool { return true }
-	}
-
-	if o.Registration == nil {
-		o.Registration = func() bool { return true }
-	}
-
-	if o.NewUser == nil {
-		return InvalidNewUserError
-	}
-
-	return nil
-}
diff --git a/pkg/provider/device/database.go b/pkg/provider/device/database.go
new file mode 100644
index 0000000..69cc972
--- /dev/null
+++ b/pkg/provider/device/database.go
@@ -0,0 +1,32 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package device
+
+import (
+	"context"
+	"github.com/loopholelabs/auth/internal/ent"
+	"time"
+)
+
+type Database interface {
+	SetDeviceFlow(ctx context.Context, identifier string, deviceCode string, userCode string) error
+	GetDeviceFlow(ctx context.Context, deviceCode string) (*ent.DeviceFlow, error)
+	UpdateDeviceFlow(ctx context.Context, identifier string, session string, expiry time.Time) error
+	GetDeviceFlowUserCode(ctx context.Context, userCode string) (*ent.DeviceFlow, error)
+	DeleteDeviceFlow(ctx context.Context, deviceCode string) error
+	GCDeviceFlow(ctx context.Context, expiry time.Duration) (int, error)
+}
diff --git a/pkg/provider/device/device.go b/pkg/provider/device/device.go
new file mode 100644
index 0000000..395d5c7
--- /dev/null
+++ b/pkg/provider/device/device.go
@@ -0,0 +1,137 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package device
+
+import (
+	"context"
+	"github.com/google/uuid"
+	"github.com/loopholelabs/auth/pkg/provider"
+	"github.com/loopholelabs/auth/pkg/utils"
+	"github.com/rs/zerolog"
+	"strings"
+	"sync"
+	"time"
+)
+
+var _ provider.Provider = (*Device)(nil)
+
+const (
+	Key        = "device"
+	GCInterval = time.Minute
+	Expiry     = time.Minute * 5
+)
+
+type Device struct {
+	logger   *zerolog.Logger
+	database Database
+	wg       sync.WaitGroup
+	ctx      context.Context
+	cancel   context.CancelFunc
+}
+
+func New(database Database, logger *zerolog.Logger) *Device {
+	l := logger.With().Str("AUTH", "DEVICE-FLOW").Logger()
+	ctx, cancel := context.WithCancel(context.Background())
+
+	return &Device{
+		logger:   &l,
+		database: database,
+		ctx:      ctx,
+		cancel:   cancel,
+	}
+}
+
+func (g *Device) Key() provider.Key {
+	return Key
+}
+
+func (g *Device) Start() error {
+	g.wg.Add(1)
+	go g.gc()
+	return nil
+}
+
+func (g *Device) Stop() error {
+	g.cancel()
+	g.wg.Wait()
+	return nil
+}
+
+func (g *Device) StartFlow(ctx context.Context) (string, string, error) {
+	deviceCode := strings.ToUpper(utils.RandomString(8))
+	userCode := uuid.New().String()
+	identifier := uuid.New().String()
+
+	err := g.database.SetDeviceFlow(ctx, identifier, deviceCode, userCode)
+	if err != nil {
+		return "", "", err
+	}
+
+	return deviceCode, userCode, nil
+}
+
+func (g *Device) ValidateFlow(ctx context.Context, deviceCode string) (string, error) {
+	flow, err := g.database.GetDeviceFlow(ctx, deviceCode)
+	if err != nil {
+		return "", err
+	}
+
+	return flow.Identifier, nil
+}
+
+func (g *Device) PollFlow(ctx context.Context, userCode string) (string, time.Time, time.Time, error) {
+	flow, err := g.database.GetDeviceFlowUserCode(ctx, userCode)
+	if err != nil {
+		return "", time.Time{}, time.Time{}, err
+	}
+
+	if flow.Session != "" {
+		err = g.database.DeleteDeviceFlow(ctx, flow.DeviceCode)
+		if err != nil {
+			return "", time.Time{}, time.Time{}, err
+		}
+	}
+
+	return flow.Session, flow.ExpiresAt, flow.LastPoll, nil
+}
+
+func (g *Device) CompleteFlow(ctx context.Context, identifier string, session string, expiry time.Time) error {
+	err := g.database.UpdateDeviceFlow(ctx, identifier, session, expiry)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (g *Device) gc() {
+	defer g.wg.Done()
+	for {
+		select {
+		case <-g.ctx.Done():
+			g.logger.Info().Msg("GC Stopped")
+			return
+		case <-time.After(GCInterval):
+			deleted, err := g.database.GCDeviceFlow(g.ctx, Expiry)
+			if err != nil {
+				g.logger.Error().Err(err).Msg("failed to garbage collect expired device flows")
+			} else {
+				g.logger.Debug().Msgf("garbage collected %d expired device flows", deleted)
+			}
+		}
+	}
+}
diff --git a/pkg/healthcheck/healthcheck.go b/pkg/provider/github/database.go
similarity index 53%
rename from pkg/healthcheck/healthcheck.go
rename to pkg/provider/github/database.go
index ab15319..a22f189 100644
--- a/pkg/healthcheck/healthcheck.go
+++ b/pkg/provider/github/database.go
@@ -1,5 +1,5 @@
 /*
-	Copyright 2022 Loophole Labs
+	Copyright 2023 Loophole Labs
 
 	Licensed under the Apache License, Version 2.0 (the "License");
 	you may not use this file except in compliance with the License.
@@ -14,24 +14,17 @@
 	limitations under the License.
 */
 
-package healthcheck
+package github
 
 import (
-	"github.com/AppsFlyer/go-sundheit"
+	"context"
+	"github.com/loopholelabs/auth/internal/ent"
+	"time"
 )
 
-type Noop struct{}
-
-func NewNoop() *Noop {
-	return new(Noop)
-}
-
-func (h *Noop) RegisterCheck(_ gosundheit.Check, _ ...gosundheit.CheckOption) error {
-	return nil
-}
-func (h *Noop) Deregister(_ string) {}
-func (h *Noop) Results() (map[string]gosundheit.Result, bool) {
-	return nil, true
+type Database interface {
+	SetGithubFlow(ctx context.Context, state string, verifier string, challenge string, nextURL string, organization string, deviceIdentifier string) error
+	GetGithubFlow(ctx context.Context, state string) (*ent.GithubFlow, error)
+	DeleteGithubFlow(ctx context.Context, state string) error
+	GCGithubFlow(ctx context.Context, expiry time.Duration) (int, error)
 }
-func (h *Noop) IsHealthy() bool { return true }
-func (h *Noop) DeregisterAll()  {}
diff --git a/pkg/provider/github/github.go b/pkg/provider/github/github.go
new file mode 100644
index 0000000..3fda7b7
--- /dev/null
+++ b/pkg/provider/github/github.go
@@ -0,0 +1,201 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package github
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"github.com/google/uuid"
+	"github.com/grokify/go-pkce"
+	"github.com/loopholelabs/auth/pkg/provider"
+	"github.com/rs/zerolog"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/github"
+	"io"
+	"net/http"
+	"net/url"
+	"sync"
+	"time"
+)
+
+var _ provider.Provider = (*Github)(nil)
+
+var (
+	ErrInvalidResponse = errors.New("invalid response")
+)
+
+const (
+	Key        = "github"
+	GCInterval = time.Minute
+	Expiry     = time.Minute * 5
+)
+
+var (
+	defaultScopes = []string{"user:email"}
+	defaultURL    = &url.URL{
+		Scheme: "https",
+		Host:   "api.github.com",
+		Path:   "/user/emails",
+	}
+)
+
+type email struct {
+	Email      string `json:"email"`
+	Primary    bool   `json:"primary"`
+	Verified   bool   `json:"verified"`
+	Visibility string `json:"visibility"`
+}
+
+type Github struct {
+	logger   *zerolog.Logger
+	conf     *oauth2.Config
+	database Database
+	wg       sync.WaitGroup
+	ctx      context.Context
+	cancel   context.CancelFunc
+}
+
+func New(clientID string, clientSecret string, database Database, logger *zerolog.Logger) *Github {
+	l := logger.With().Str("AUTH", "GITHUB-OAUTH-PROVIDER").Logger()
+	ctx, cancel := context.WithCancel(context.Background())
+
+	return &Github{
+		logger: &l,
+		conf: &oauth2.Config{
+			ClientID:     clientID,
+			ClientSecret: clientSecret,
+			Scopes:       defaultScopes,
+			Endpoint:     github.Endpoint,
+		},
+		database: database,
+		ctx:      ctx,
+		cancel:   cancel,
+	}
+}
+
+func (g *Github) Key() provider.Key {
+	return Key
+}
+
+func (g *Github) Start() error {
+	g.wg.Add(1)
+	go g.gc()
+	return nil
+}
+
+func (g *Github) Stop() error {
+	g.cancel()
+	g.wg.Wait()
+	return nil
+}
+
+func (g *Github) StartFlow(ctx context.Context, nextURL string, organization string, deviceIdentifier string) (string, error) {
+	verifier := pkce.NewCodeVerifier()
+	challenge := pkce.CodeChallengeS256(verifier)
+	state := uuid.New().String()
+
+	g.logger.Debug().Msgf("starting flow for state %s", state)
+	err := g.database.SetGithubFlow(ctx, state, verifier, challenge, nextURL, organization, deviceIdentifier)
+	if err != nil {
+		return "", err
+	}
+
+	return g.conf.AuthCodeURL(state, oauth2.AccessTypeOnline, oauth2.SetAuthURLParam(pkce.ParamCodeChallenge, challenge), oauth2.SetAuthURLParam(pkce.ParamCodeChallengeMethod, pkce.MethodS256)), nil
+}
+
+func (g *Github) CompleteFlow(ctx context.Context, code string, state string) (string, string, string, string, error) {
+	g.logger.Debug().Msgf("completing flow for state %s", state)
+	flow, err := g.database.GetGithubFlow(ctx, state)
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	g.logger.Debug().Msgf("found flow for state %s, deleting", state)
+	err = g.database.DeleteGithubFlow(ctx, state)
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	g.logger.Debug().Msgf("exchanging code for token for state %s", state)
+	token, err := g.conf.Exchange(ctx, code, oauth2.SetAuthURLParam(pkce.ParamCodeVerifier, flow.Verifier))
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	req := &http.Request{
+		Method: http.MethodGet,
+		URL:    defaultURL,
+		Header: http.Header{
+			"Authorization": []string{"token " + token.AccessToken},
+		},
+	}
+	req = req.WithContext(ctx)
+
+	g.logger.Debug().Msgf("fetching emails for state %s", state)
+	res, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	if res.StatusCode != http.StatusOK {
+		return "", "", "", "", ErrInvalidResponse
+	}
+
+	g.logger.Debug().Msgf("parsing emails for state %s", state)
+	body, err := io.ReadAll(res.Body)
+	_ = res.Body.Close()
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	var emails []email
+	err = json.Unmarshal(body, &emails)
+	if err != nil {
+		return "", "", "", "", err
+	}
+
+	g.logger.Debug().Msgf("found %d emails for state %s", len(emails), state)
+
+	for _, e := range emails {
+		if e.Primary && e.Verified {
+			g.logger.Debug().Msgf("found primary and verified email %s for state %s", e.Email, state)
+			return e.Email, flow.Organization, flow.NextURL, flow.DeviceIdentifier, nil
+		}
+	}
+
+	g.logger.Debug().Msgf("no primary and verified email found for state %s", state)
+	return "", "", "", "", ErrInvalidResponse
+}
+
+func (g *Github) gc() {
+	defer g.wg.Done()
+	for {
+		select {
+		case <-g.ctx.Done():
+			g.logger.Info().Msg("GC Stopped")
+			return
+		case <-time.After(GCInterval):
+			deleted, err := g.database.GCGithubFlow(g.ctx, Expiry)
+			if err != nil {
+				g.logger.Error().Err(err).Msg("failed to garbage collect expired github flows")
+			} else {
+				g.logger.Debug().Msgf("garbage collected %d expired github flows", deleted)
+			}
+		}
+	}
+}
diff --git a/pkg/server/models.go b/pkg/provider/provider.go
similarity index 61%
rename from pkg/server/models.go
rename to pkg/provider/provider.go
index 2f816ff..301b7d7 100644
--- a/pkg/server/models.go
+++ b/pkg/provider/provider.go
@@ -14,17 +14,20 @@
 	limitations under the License.
 */
 
-package server
+package provider
 
-import "github.com/loopholelabs/auth/pkg/token/identity"
+// Key uniquely identifies authentication providers
+type Key string
 
-type TokenError struct {
-	Error            string `json:"error"`
-	ErrorDescription string `json:"error_description"`
-}
+// Provider is an authentication provider that authorizes
+// a user and returns a set of claims
+type Provider interface {
+	// Key returns the authentication provider's unique key
+	Key() Key
 
-type ExchangeResponse identity.TokenResponse
-type RefreshResponse identity.TokenResponse
+	// Start starts the Provider
+	Start() error
 
-type ExchangeError TokenError
-type RefreshError TokenError
+	// Stop stops the Provider
+	Stop() error
+}
diff --git a/pkg/providers/github.go b/pkg/providers/github.go
deleted file mode 100644
index 35aad6c..0000000
--- a/pkg/providers/github.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package providers
-
-import (
-	"github.com/dexidp/dex/connector/github"
-)
-
-type GithubProvider struct {
-	ID           string `json:"id"`
-	ClientID     string `json:"client_id"`
-	ClientSecret string `json:"client_secret"`
-	RedirectURI  string `json:"redirect_uri"`
-}
-
-func (g *GithubProvider) Validate() bool {
-	return g.ClientID != "" && g.ClientSecret != "" && g.RedirectURI != ""
-}
-
-func (g *GithubProvider) Populate(conf *github.Config) {
-	g.ClientID = conf.ClientID
-	g.ClientSecret = conf.ClientSecret
-	g.RedirectURI = conf.RedirectURI
-}
-
-func (g *GithubProvider) Convert() *github.Config {
-	return &github.Config{
-		ClientID:     g.ClientID,
-		ClientSecret: g.ClientSecret,
-		RedirectURI:  g.RedirectURI,
-	}
-}
diff --git a/pkg/server/middleware.go b/pkg/server/middleware.go
deleted file mode 100644
index df3f435..0000000
--- a/pkg/server/middleware.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package server
-
-import (
-	"encoding/json"
-	"errors"
-	"github.com/gofiber/fiber/v2"
-	"github.com/loopholelabs/auth/pkg/token/identity"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"github.com/valyala/fasthttp"
-	"gopkg.in/square/go-jose.v2"
-	"gopkg.in/square/go-jose.v2/jwt"
-)
-
-var (
-	IDPError     = errors.New("error while communicating with Dex IdP")
-	NewUserError = errors.New("error while creating new user")
-)
-
-func passthrough(handler fasthttp.RequestHandler) fiber.Handler {
-	return func(ctx *fiber.Ctx) error {
-		handler(ctx.Context())
-		return nil
-	}
-}
-
-func (s *Server) customClaims(handler fiber.Handler) fiber.Handler {
-	return func(ctx *fiber.Ctx) error {
-		err := handler(ctx)
-		if err != nil {
-			return err
-		}
-		response := ctx.Response()
-		if response.StatusCode() == 200 {
-			tokenResponse := new(identity.TokenResponse)
-			err = json.Unmarshal(response.Body(), tokenResponse)
-			if err != nil {
-				s.logger.Errorf("error while unmarshalling tokenResponse from response body: %s", err)
-				return IDPError
-			}
-
-			if tokenResponse.AccessToken != "" {
-				accessToken, err := jwt.ParseSigned(tokenResponse.AccessToken)
-				if err != nil {
-					s.logger.Errorf("error parsing access tokenResponse from Dex: %s", err)
-					return IDPError
-				}
-
-				tokenResponse.AccessToken, err = s.parseAndModify(accessToken)
-				if err != nil {
-					s.logger.Errorf("error modifying access tokenResponse from Dex: %s", err)
-					return IDPError
-				}
-				s.logger.Debugf("modified access tokenResponse: %s", tokenResponse.AccessToken)
-			}
-
-			if tokenResponse.IDToken != "" {
-				idToken, err := jwt.ParseSigned(tokenResponse.IDToken)
-				if err != nil {
-					s.logger.Errorf("error parsing id tokenResponse from Dex: %s", err)
-					return IDPError
-				}
-
-				tokenResponse.IDToken, err = s.parseAndModify(idToken)
-				if err != nil {
-					s.logger.Errorf("error modifying id tokenResponse from Dex: %s", err)
-					return IDPError
-				}
-				s.logger.Debugf("modified id tokenResponse: %s", tokenResponse.AccessToken)
-			}
-
-			payload, err := json.Marshal(tokenResponse)
-			if err != nil {
-				s.logger.Errorf("error marshalling modified tokenResponse: %s", err)
-				return IDPError
-			}
-
-			response.SetBody(payload)
-		}
-		return nil
-	}
-}
-
-func (s *Server) parseAndModify(jwt *jwt.JSONWebToken) (string, error) {
-	claims := new(identity.IDToken)
-	err := jwt.UnsafeClaimsWithoutVerification(&claims)
-	if err != nil {
-		s.logger.Errorf("error while retrieving claims from JWT: %s", err)
-		return "", IDPError
-	}
-
-	if len(claims.Email) == 0 {
-		s.logger.Errorf("email is empty in tokenResponse")
-		return "", IDPError
-	}
-
-	exists, err := s.storage.UserExists(claims.Email)
-	if err != nil {
-		s.logger.Errorf("error while retrieving user from DB: %s", err)
-		return "", IDPError
-	}
-
-	if !exists {
-		if s.options.Registration() {
-			err = s.options.NewUser(claims)
-			if err != nil {
-				s.logger.Errorf("error while creating new user: %s", err)
-				return "", NewUserError
-			}
-			s.logger.Infof("created new user %s", claims.Email)
-		} else {
-
-		}
-	}
-
-	claims.Subject = claims.Email
-	claims.Kind = tokenKind.OAuthKind
-
-	payload, err := json.Marshal(claims)
-	if err != nil {
-		s.logger.Errorf("error marshalling modified claims: %s", err)
-		return "", IDPError
-	}
-
-	signedToken, err := s.privateKeys.Sign(jose.RS256, payload)
-	if err != nil {
-		s.logger.Errorf("error signing payload with modified claims: %s", err)
-		return "", IDPError
-	}
-
-	return signedToken, nil
-}
diff --git a/pkg/server/server.go b/pkg/server/server.go
deleted file mode 100644
index 5569a98..0000000
--- a/pkg/server/server.go
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package server
-
-import (
-	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"github.com/dexidp/dex/connector/github"
-	"github.com/dexidp/dex/pkg/log"
-	"github.com/dexidp/dex/server"
-	dexStorage "github.com/dexidp/dex/storage"
-	"github.com/gofiber/fiber/v2"
-	"github.com/loopholelabs/auth/pkg/healthcheck"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/options"
-	"github.com/loopholelabs/auth/pkg/providers"
-	"github.com/loopholelabs/auth/pkg/refreshpolicy"
-	"github.com/loopholelabs/auth/pkg/storage"
-	"github.com/loopholelabs/auth/pkg/token"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"github.com/loopholelabs/auth/pkg/utils"
-	"github.com/valyala/fasthttp/fasthttpadaptor"
-	"gopkg.in/square/go-jose.v2"
-	"net/http"
-	"time"
-)
-
-const (
-	githubID = "github"
-)
-
-const (
-	DefaultConnectorName = "Basic-Authentication"
-)
-
-var (
-	InvalidConnectorError = errors.New("invalid connector configuration")
-)
-
-type Server struct {
-	logger  log.Logger
-	storage storage.Storage
-
-	app         *fiber.App
-	server      *server.Server
-	options     *options.Options
-	publicKeys  *keyset.Public
-	privateKeys *keyset.Private
-}
-
-func New(options *options.Options) (*Server, error) {
-	err := options.Validate()
-	if err != nil {
-		return nil, fmt.Errorf("invalid options for auth server: %w", err)
-	}
-
-	refreshPolicy, err := dex.DefaultRefreshPolicy(options.Logger)
-	if err != nil {
-		return nil, fmt.Errorf("failed to create default refresh policy: %w", err)
-	}
-
-	s := &Server{
-		app: fiber.New(fiber.Config{
-			DisableStartupMessage: true,
-			ReadTimeout:           time.Second * 5,
-			WriteTimeout:          time.Second * 5,
-			IdleTimeout:           time.Second * 5,
-			JSONEncoder:           json.Marshal,
-			JSONDecoder:           json.Unmarshal,
-		}),
-		logger:      options.Logger,
-		storage:     options.Storage,
-		options:     options,
-		publicKeys:  keyset.NewPublic(options.Storage),
-		privateKeys: keyset.NewPrivate(options.Storage),
-	}
-
-	return s, s.setupDex(refreshPolicy)
-}
-
-func (s *Server) App() *fiber.App {
-	return s.app
-}
-
-func (s *Server) createDex(refreshPolicy *server.RefreshTokenPolicy) (*server.Server, error) {
-	return server.NewServer(context.Background(), server.Config{
-		Issuer:                 s.options.Issuer,
-		Storage:                s.storage,
-		SupportedResponseTypes: []string{"code"},
-		RefreshTokenPolicy:     refreshPolicy,
-		AllowedOrigins:         s.options.AllowedOrigins,
-		AlwaysShowLoginScreen:  false,
-		SkipApprovalScreen:     true,
-		Web:                    *s.options.WebConfig,
-		Logger:                 s.options.Logger,
-		HealthChecker:          healthcheck.NewNoop(),
-	})
-}
-
-func (s *Server) setupDex(refreshPolicy *server.RefreshTokenPolicy) error {
-	d, err := s.createDex(refreshPolicy)
-	if err != nil {
-		return fmt.Errorf("failed to create dex server: %w", err)
-	}
-
-	s.server = d
-
-	passThrough := passthrough(fasthttpadaptor.NewFastHTTPHandler(http.HandlerFunc(s.server.ServeHTTP)))
-	customClaims := s.customClaims(passThrough)
-
-	enabled := func(ctx *fiber.Ctx) error {
-		if !s.options.Enabled() {
-			return fiber.NewError(fiber.StatusForbidden, "identity service is disabled")
-		}
-		return ctx.Next()
-	}
-
-	s.app.Post("/exchange", s.exchange)
-	s.app.Post("/refresh", s.refresh)
-	s.app.All("/token", enabled, customClaims)
-	s.app.All("/*", enabled, passThrough)
-
-	return nil
-}
-
-func (s *Server) githubProviderID(name string) string {
-	return fmt.Sprintf("github-%s", name)
-}
-
-func (s *Server) exchange(ctx *fiber.Ctx) error {
-	if string(ctx.Request().Header.ContentType()) == fiber.MIMEApplicationForm && ctx.FormValue("grant_type") == "authorization_code" {
-		if key := ctx.FormValue("code"); len(key) != 73 {
-			clientID := ctx.FormValue("client_id")
-			if clientID == "" {
-				return ctx.Status(fiber.StatusUnauthorized).JSON(ExchangeError{
-					Error:            "invalid_client",
-					ErrorDescription: "invalid client",
-				})
-			}
-			client, err := s.storage.GetClient(clientID)
-			if err != nil || (!client.Public && (client.Secret != ctx.FormValue("client_secret"))) {
-				return ctx.Status(fiber.StatusUnauthorized).JSON(ExchangeError{
-					Error:            "invalid_client",
-					ErrorDescription: "invalid client",
-				})
-			}
-
-			kind, tokenIdentifier, tokenSecret, err := token.Decode(key)
-			if err != nil {
-				return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-					Error:            "invalid_grant",
-					ErrorDescription: "invalid or malformed key",
-				})
-			}
-
-			switch tokenKind.LUT[kind] {
-			case tokenKind.APITokenKind:
-				apiKey, err := s.storage.GetAPIKey(tokenIdentifier)
-				if err != nil {
-					return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-						Error:            "invalid_grant",
-						ErrorDescription: "invalid or malformed api key",
-					})
-				}
-				if !token.Verify(tokenSecret, apiKey.Secret) {
-					return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-						Error:            "invalid_grant",
-						ErrorDescription: "invalid or malformed api key",
-					})
-				}
-				apiToken := token.NewAPIToken(s.options.Issuer, apiKey, token.Audience{clientID})
-				refreshToken := token.NewRefreshTokenForAPIKey(s.options.Issuer, apiKey, token.Audience{clientID})
-
-				signedAPIToken, err := apiToken.Sign(s.privateKeys, jose.RS256)
-				if err != nil {
-					return ctx.Status(fiber.StatusInternalServerError).JSON(ExchangeError{
-						Error:            "server_error",
-						ErrorDescription: "internal server error",
-					})
-				}
-
-				signedRefreshToken, err := refreshToken.Sign(s.privateKeys, jose.RS256)
-				if err != nil {
-					return ctx.Status(fiber.StatusInternalServerError).JSON(ExchangeError{
-						Error:            "server_error",
-						ErrorDescription: "internal server error",
-					})
-				}
-
-				return ctx.JSON(ExchangeResponse{
-					AccessToken:  signedAPIToken,
-					TokenType:    "Bearer",
-					ExpiresIn:    int((time.Minute * 5).Seconds()),
-					RefreshToken: signedRefreshToken,
-				})
-			case tokenKind.ServiceTokenKind:
-				valid := func(key *token.ServiceKey) error {
-					if key.Expires > 0 && utils.Int64ToTime(key.Expires).Before(time.Now()) {
-						return errors.New("service key has expired")
-					}
-					if key.MaxUses > 0 && key.NumUsed >= key.MaxUses {
-						return errors.New("service key has reached max uses")
-					}
-					if !token.Verify(tokenSecret, key.Secret) {
-						return errors.New("invalid service key")
-					}
-
-					return nil
-				}
-
-				update := func(key *token.ServiceKey) {
-					key.NumUsed++
-				}
-
-				serviceKey, err := s.storage.GetServiceKey(tokenIdentifier, valid, update)
-				if err != nil {
-					return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-						Error:            "invalid_grant",
-						ErrorDescription: "invalid or malformed service key",
-					})
-				}
-
-				serviceToken := token.NewServiceToken(s.options.Issuer, serviceKey, token.Audience{clientID})
-				refreshToken := token.NewRefreshTokenForServiceKey(s.options.Issuer, serviceKey, token.Audience{clientID})
-
-				signedServiceToken, err := serviceToken.Sign(s.privateKeys, jose.RS256)
-				if err != nil {
-					return ctx.Status(fiber.StatusInternalServerError).JSON(ExchangeError{
-						Error:            "server_error",
-						ErrorDescription: "internal server error",
-					})
-				}
-
-				signedRefreshToken, err := refreshToken.Sign(s.privateKeys, jose.RS256)
-				if err != nil {
-					return ctx.Status(fiber.StatusInternalServerError).JSON(ExchangeError{
-						Error:            "server_error",
-						ErrorDescription: "internal server error",
-					})
-				}
-
-				return ctx.JSON(ExchangeResponse{
-					AccessToken:  signedServiceToken,
-					TokenType:    "Bearer",
-					ExpiresIn:    int((time.Minute * 5).Seconds()),
-					RefreshToken: signedRefreshToken,
-				})
-			}
-			return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-				Error:            "invalid_grant",
-				ErrorDescription: "invalid or malformed key",
-			})
-		}
-		return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-			Error:            "invalid_request",
-			ErrorDescription: "invalid request",
-		})
-	}
-	return ctx.Status(fiber.StatusBadRequest).JSON(ExchangeError{
-		Error:            "unsupported_grant_type",
-		ErrorDescription: fmt.Sprintf("unsupported grant type: %s", ctx.FormValue("grant_type")),
-	})
-}
-
-func (s *Server) refresh(ctx *fiber.Ctx) error {
-	if string(ctx.Request().Header.ContentType()) == fiber.MIMEApplicationForm && ctx.FormValue("grant_type") == "refresh_token" {
-		if refreshToken := ctx.FormValue("refresh_token"); refreshToken != "" {
-			clientID := ctx.FormValue("client_id")
-			if clientID == "" {
-				return ctx.Status(fiber.StatusUnauthorized).JSON(ExchangeError{
-					Error:            "invalid_client",
-					ErrorDescription: "invalid client",
-				})
-			}
-			client, err := s.storage.GetClient(clientID)
-			if err != nil || (!client.Public && (client.Secret != ctx.FormValue("client_secret"))) {
-				return ctx.Status(fiber.StatusUnauthorized).JSON(ExchangeError{
-					Error:            "invalid_client",
-					ErrorDescription: "invalid client",
-				})
-			}
-			var r token.RefreshToken
-			err = r.Populate(refreshToken, s.publicKeys)
-			if err != nil {
-				return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-					Error:            "invalid_grant",
-					ErrorDescription: "invalid or malformed refresh token",
-				})
-			}
-
-			if r.Kind != tokenKind.RefreshTokenKind {
-				return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-					Error:            "invalid_grant",
-					ErrorDescription: "invalid or malformed refresh token",
-				})
-			}
-
-			if time.Time(r.Expiry).Before(time.Now()) {
-				return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-					Error:            "invalid_grant",
-					ErrorDescription: "refresh token has expired",
-				})
-			}
-
-			switch r.For {
-			case tokenKind.APITokenKind:
-				if apiKey, err := s.storage.GetAPIKey(r.ID); err == nil {
-					apiToken := token.NewAPIToken(s.options.Issuer, apiKey, token.Audience{clientID})
-					r.Expiry = token.Time(time.Now().Add(time.Hour * 24 * 7))
-					r.IssuedAt = token.Time(time.Now())
-
-					signedAPIToken, err := apiToken.Sign(s.privateKeys, jose.RS256)
-					if err != nil {
-						return ctx.Status(fiber.StatusInternalServerError).JSON(RefreshError{
-							Error:            "server_error",
-							ErrorDescription: "internal server error",
-						})
-					}
-
-					signedRefreshToken, err := r.Sign(s.privateKeys, jose.RS256)
-					if err != nil {
-						return ctx.Status(fiber.StatusInternalServerError).JSON(RefreshError{
-							Error:            "server_error",
-							ErrorDescription: "internal server error",
-						})
-					}
-
-					return ctx.JSON(RefreshResponse{
-						AccessToken:  signedAPIToken,
-						TokenType:    "Bearer",
-						ExpiresIn:    int((time.Minute * 5).Seconds()),
-						RefreshToken: signedRefreshToken,
-					})
-				} else {
-					return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-						Error:            "invalid_grant",
-						ErrorDescription: "invalid or malformed refresh token",
-					})
-				}
-			case tokenKind.ServiceTokenKind:
-				if serviceKey, err := s.storage.GetServiceKey(r.ID, nil, nil); err == nil {
-					serviceToken := token.NewServiceToken(s.options.Issuer, serviceKey, token.Audience{clientID})
-					r.Expiry = token.Time(time.Now().Add(time.Hour * 24 * 7))
-					r.IssuedAt = token.Time(time.Now())
-
-					signedServiceToken, err := serviceToken.Sign(s.privateKeys, jose.RS256)
-					if err != nil {
-						return ctx.Status(fiber.StatusInternalServerError).JSON(RefreshError{
-							Error:            "server_error",
-							ErrorDescription: "internal server error",
-						})
-					}
-
-					signedRefreshToken, err := r.Sign(s.privateKeys, jose.RS256)
-					if err != nil {
-						return ctx.Status(fiber.StatusInternalServerError).JSON(RefreshError{
-							Error:            "server_error",
-							ErrorDescription: "internal server error",
-						})
-					}
-
-					return ctx.JSON(RefreshResponse{
-						AccessToken:  signedServiceToken,
-						TokenType:    "Bearer",
-						ExpiresIn:    int((time.Minute * 5).Seconds()),
-						RefreshToken: signedRefreshToken,
-					})
-				} else {
-					return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-						Error:            "invalid_grant",
-						ErrorDescription: "invalid or malformed refresh token",
-					})
-				}
-			}
-			return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-				Error:            "invalid_grant",
-				ErrorDescription: "invalid or malformed refresh token",
-			})
-		}
-		return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-			Error:            "invalid_request",
-			ErrorDescription: "invalid request",
-		})
-	}
-	return ctx.Status(fiber.StatusBadRequest).JSON(RefreshError{
-		Error:            "unsupported_grant_type",
-		ErrorDescription: fmt.Sprintf("unsupported grant type: %s", ctx.FormValue("grant_type")),
-	})
-}
-
-func (s *Server) GetPasswordProvider() (dexStorage.Connector, error) {
-	return s.storage.GetConnector(server.LocalConnector)
-}
-
-func (s *Server) CreatePasswordProvider() error {
-	return s.storage.CreateConnector(dexStorage.Connector{
-		ID:   server.LocalConnector,
-		Type: server.LocalConnector,
-		Name: DefaultConnectorName,
-	})
-}
-
-func (s *Server) DeletePasswordProvider() error {
-	connectors, err := s.storage.ListConnectors()
-	if err != nil {
-		return err
-	}
-
-	if len(connectors) == 1 {
-		return InvalidConnectorError
-	}
-
-	return s.storage.DeleteConnector(server.LocalConnector)
-}
-
-func (s *Server) CreateGithubProvider(name string, provider *providers.GithubProvider) error {
-	configBytes, err := json.Marshal(provider.Convert())
-	if err != nil {
-		return err
-	}
-
-	connector := dexStorage.Connector{
-		ID:     s.githubProviderID(name),
-		Type:   githubID,
-		Name:   name,
-		Config: configBytes,
-	}
-
-	return s.storage.CreateConnector(connector)
-}
-
-func (s *Server) GetGithubProvider(name string) (*providers.GithubProvider, error) {
-	connector, err := s.storage.GetConnector(s.githubProviderID(name))
-	if err != nil {
-		return nil, err
-	}
-	config := new(github.Config)
-	err = json.Unmarshal(connector.Config, config)
-	if err != nil {
-		return nil, err
-	}
-	provider := new(providers.GithubProvider)
-	provider.Populate(config)
-	provider.ID = connector.Name
-	return provider, nil
-}
-
-func (s *Server) DeleteGithubProvider(name string) error {
-	connectors, err := s.storage.ListConnectors()
-	if err != nil {
-		return err
-	}
-
-	if len(connectors) == 1 {
-		return InvalidConnectorError
-	}
-	return s.storage.DeleteConnector(s.githubProviderID(name))
-}
-
-func (s *Server) UpdateGithubProvider(name string, provider *providers.GithubProvider) error {
-	_, err := s.GetGithubProvider(name)
-	if err != nil {
-		return err
-	}
-	err = s.storage.DeleteConnector(s.githubProviderID(name))
-	if err != nil {
-		return err
-	}
-	return s.CreateGithubProvider(name, provider)
-}
-
-func (s *Server) ListGithubProvider() ([]*providers.GithubProvider, error) {
-	connectors, err := s.storage.ListConnectors()
-	if err != nil {
-		return nil, err
-	}
-	var configs []*providers.GithubProvider
-	for _, connector := range connectors {
-		if connector.Type == githubID {
-			config := new(github.Config)
-			err = json.Unmarshal(connector.Config, config)
-			if err != nil {
-				break
-			}
-			provider := new(providers.GithubProvider)
-			provider.Populate(config)
-			provider.ID = connector.Name
-			configs = append(configs, provider)
-		}
-	}
-
-	return configs, err
-}
-
-func (s *Server) PublicKeySet() *keyset.Public {
-	return s.publicKeys
-}
-
-func (s *Server) PrivateKeySet() *keyset.Private {
-	return s.privateKeys
-}
-
-func CreateClient(st storage.Storage, id string, secret string, redirect []string, public bool, name string, logo string) error {
-	return st.CreateClient(dexStorage.Client{
-		ID:           id,
-		Secret:       secret,
-		RedirectURIs: redirect,
-		TrustedPeers: nil,
-		Public:       public,
-		Name:         name,
-		LogoURL:      logo,
-	})
-}
-
-func GetClient(st storage.Storage, id string) (dexStorage.Client, error) {
-	return st.GetClient(id)
-}
-
-func UpdateClient(st storage.Storage, id string, secret string, redirect []string, public bool, name string, logo string) error {
-	return st.UpdateClient(id, func(old dexStorage.Client) (dexStorage.Client, error) {
-		old.Secret = secret
-		old.RedirectURIs = redirect
-		old.Public = public
-		old.Name = name
-		old.LogoURL = logo
-
-		return old, nil
-	})
-}
-
-func BootstrapConnectors(storage storage.Storage, github *providers.GithubProvider) error {
-	connectors, err := storage.ListConnectors()
-	if err != nil {
-		return err
-	}
-	if github == nil {
-		if len(connectors) == 0 {
-			return storage.CreateConnector(dexStorage.Connector{
-				ID:     server.LocalConnector,
-				Type:   server.LocalConnector,
-				Name:   DefaultConnectorName,
-				Config: []byte("{}"),
-			})
-		}
-		return nil
-	}
-
-	if len(connectors) > 0 {
-		_ = storage.DeleteConnector(server.LocalConnector)
-	}
-
-	if github != nil {
-		configBytes, err := json.Marshal(github.Convert())
-		if err != nil {
-			return err
-		}
-
-		connector := dexStorage.Connector{
-			ID:     github.ID,
-			Type:   githubID,
-			Name:   "Github",
-			Config: configBytes,
-		}
-
-		err = storage.CreateConnector(connector)
-		if err != nil && !errors.Is(err, dexStorage.ErrAlreadyExists) {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/pkg/server/validate.go b/pkg/server/validate.go
deleted file mode 100644
index ed89967..0000000
--- a/pkg/server/validate.go
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package server
-
-import (
-	"encoding/json"
-	"errors"
-	"github.com/gofiber/fiber/v2"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/token"
-	"github.com/loopholelabs/auth/pkg/token/identity"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-)
-
-var (
-	BearerPrefix = []byte("Bearer ")
-)
-
-const (
-	KindKey       = "kind"
-	ClaimsKey     = "claims"
-	IdentifierKey = "identifier"
-	APIKey        = "api"
-	ServiceKey    = "service"
-)
-
-type Identity struct {
-	Kind       tokenKind.Kind
-	Claims     interface{}
-	Identifier string
-	Key        string
-}
-
-func ValidateHandler(clientIDs []string, issuer string, keySet keyset.Verifier) fiber.Handler {
-	validate := Validate(clientIDs, issuer, keySet)
-	return func(ctx *fiber.Ctx) error {
-		authorizationHeader := ctx.Request().Header.Peek("Authorization")
-		if authorizationHeader == nil || len(authorizationHeader) <= len(BearerPrefix) {
-			return fiber.NewError(fiber.StatusUnauthorized, "invalid authorization header")
-		}
-
-		i, err := validate(string(authorizationHeader[len(BearerPrefix):]))
-		if err != nil {
-			return fiber.NewError(fiber.StatusUnauthorized, err.Error())
-		}
-
-		ctx.Locals(KindKey, i.Kind)
-		ctx.Locals(ClaimsKey, i.Claims)
-		ctx.Locals(IdentifierKey, i.Identifier)
-		switch i.Kind {
-		case tokenKind.APITokenKind:
-			ctx.Locals(APIKey, i.Key)
-		case tokenKind.ServiceTokenKind:
-			ctx.Locals(ServiceKey, i.Key)
-		}
-
-		return ctx.Next()
-	}
-}
-
-func Validate(clientIDs []string, issuer string, keySet keyset.Verifier) func(rawToken string) (*Identity, error) {
-	return func(rawToken string) (*Identity, error) {
-		partialToken, payload, err := token.PartialPopulate(keySet, rawToken)
-		if err != nil {
-			return nil, err
-		}
-
-		if !partialToken.ValidExpiry() {
-			return nil, errors.New("token expired")
-		}
-
-		if !partialToken.ValidIssuer(issuer) {
-			return nil, errors.New("invalid issuer")
-		}
-
-		validAudience := false
-		for _, clientID := range clientIDs {
-			if partialToken.ValidAudience(clientID) {
-				validAudience = true
-				break
-			}
-		}
-		if !validAudience {
-			return nil, errors.New("invalid audience")
-		}
-
-		switch partialToken.Kind {
-		case tokenKind.APITokenKind:
-			apiClaims := new(token.APIClaims)
-			err = json.Unmarshal(payload, apiClaims)
-			if err != nil {
-				return nil, err
-			}
-			if !apiClaims.Valid() {
-				return nil, errors.New("invalid claims")
-			}
-
-			return &Identity{
-				Kind:       tokenKind.APITokenKind,
-				Claims:     apiClaims,
-				Identifier: partialToken.Subject,
-				Key:        apiClaims.ID,
-			}, nil
-		case tokenKind.ServiceTokenKind:
-			serviceClaims := new(token.ServiceClaims)
-			err = json.Unmarshal(payload, serviceClaims)
-			if err != nil {
-				return nil, err
-			}
-			if !serviceClaims.Valid() {
-				return nil, errors.New("invalid claims")
-			}
-
-			return &Identity{
-				Kind:       tokenKind.ServiceTokenKind,
-				Claims:     serviceClaims,
-				Identifier: partialToken.Subject,
-				Key:        serviceClaims.ID,
-			}, nil
-		case tokenKind.OAuthKind:
-			idClaims := new(identity.IDClaims)
-			err = json.Unmarshal(payload, idClaims)
-			if err != nil {
-				return nil, err
-			}
-
-			return &Identity{
-				Kind:       tokenKind.OAuthKind,
-				Claims:     idClaims,
-				Identifier: partialToken.Subject,
-			}, nil
-		case tokenKind.RefreshTokenKind:
-			fallthrough
-		default:
-			return nil, errors.New("invalid token kind")
-		}
-	}
-}
diff --git a/pkg/servicekey/servicekey.go b/pkg/servicekey/servicekey.go
new file mode 100644
index 0000000..c1cf03a
--- /dev/null
+++ b/pkg/servicekey/servicekey.go
@@ -0,0 +1,49 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package servicekey
+
+import "time"
+
+// ServiceKey represents a Service Key
+type ServiceKey struct {
+	// ID is the Service Key's unique identifier
+	ID string `json:"id"`
+
+	// Hash is the hashed secret of the Service Key session
+	Hash []byte `json:"hash"`
+
+	// UserID is the user's unique identifier
+	UserID string `json:"user_id"`
+
+	// Organization is the organization that the Service Key belongs to (optional)
+	Organization string `json:"organization"`
+
+	// ResourceType is the resource type that the Service Key is authorized to access (optional)
+	ResourceType string `json:"resource_type"`
+
+	// Resource is the resource that the Service Key is authorized to access (optional unless ResourceType is set)
+	ResourceID string `json:"resource_id"`
+
+	// MaxUses is the maximum number of times the Service Key can be used (optional)
+	MaxUses int64 `json:"max_uses"`
+
+	// NumUsed is the number of times the Service Key has been used (optional unless MaxUses is set)
+	NumUsed int64 `json:"num_used"`
+
+	// Expires is the time at which the Service Key expires (optional)
+	Expires time.Time `json:"expires"`
+}
diff --git a/pkg/servicekey/session.go b/pkg/servicekey/session.go
new file mode 100644
index 0000000..752b5b6
--- /dev/null
+++ b/pkg/servicekey/session.go
@@ -0,0 +1,66 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package servicekey
+
+import (
+	"github.com/google/uuid"
+	"github.com/loopholelabs/auth"
+	"golang.org/x/crypto/bcrypt"
+)
+
+// Session represents a user's authenticated service key session
+type Session struct {
+	// ID is the Service Key's unique identifier
+	ID string `json:"id"`
+
+	// Hash is the hashed secret of the Service Key session
+	Hash []byte `json:"hash"`
+
+	// ServiceKeyID is the ID of the Service Key that the session is associated with
+	ServiceKeyID string `json:"service_key_id"`
+
+	// UserID is the user's unique identifier
+	UserID string `json:"user_id"`
+
+	// Organization is the organization that the Service Key belongs to (optional)
+	Organization string `json:"organization"`
+
+	// ResourceType is the resource type that the Service Key is authorized to access (optional)
+	ResourceType string `json:"resource_type"`
+
+	// ResourceID is the resource that the Service Key is authorized to access (optional unless ResourceType is set)
+	ResourceID string `json:"resource_id"`
+}
+
+// NewSession returns a new session for a user with the given service key
+func NewSession(servicekey *ServiceKey) (*Session, []byte, error) {
+	id := uuid.New().String()
+	secret := []byte(auth.ServiceKeySessionPrefixString + uuid.New().String())
+	hash, err := bcrypt.GenerateFromPassword(secret, bcrypt.DefaultCost)
+	if err != nil {
+		return nil, nil, err
+	}
+	return &Session{
+		ID:           id,
+		Hash:         hash,
+		ServiceKeyID: servicekey.ID,
+		UserID:       servicekey.UserID,
+		Organization: servicekey.Organization,
+		ResourceType: servicekey.ResourceType,
+		ResourceID:   servicekey.ResourceID,
+	}, secret, nil
+}
diff --git a/pkg/session/session.go b/pkg/session/session.go
new file mode 100644
index 0000000..fa93287
--- /dev/null
+++ b/pkg/session/session.go
@@ -0,0 +1,70 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package session
+
+import (
+	"github.com/google/uuid"
+	"github.com/loopholelabs/auth/pkg/kind"
+	"github.com/loopholelabs/auth/pkg/provider"
+	"time"
+)
+
+const (
+	// Expiry is the session expiry time for garbage collection purposes
+	Expiry = time.Hour * 24 * 7 // 7 days
+
+	// Skew is the amount of time before a session expires that we will consider it close to expiring
+	Skew = time.Hour * 24 // 1 day
+)
+
+// Session represents a user's authenticated session
+type Session struct {
+	Creation     time.Time    `json:"creation"`
+	Expiry       time.Time    `json:"expiry"`
+	Kind         kind.Kind    `json:"kind"`
+	ID           string       `json:"id"`
+	Provider     provider.Key `json:"provider"`
+	UserID       string       `json:"user_id"`
+	Organization string       `json:"organization"`
+}
+
+// New returns a new session for a user with the given kind key, provider key, user ID, and organization
+func New(kind kind.Kind, provider provider.Key, userID string, organization string) *Session {
+	return &Session{
+		Creation:     time.Now(),
+		Expiry:       time.Now().Add(Expiry),
+		Kind:         kind,
+		ID:           uuid.New().String(),
+		Provider:     provider,
+		UserID:       userID,
+		Organization: organization,
+	}
+}
+
+// Expired returns true if the session has expired
+func (s *Session) Expired() bool {
+	return time.Now().After(s.Expiry)
+}
+
+// CloseToExpiry returns true if the session is close to expiring
+func (s *Session) CloseToExpiry() bool {
+	return time.Now().After(s.Expiry.Add(-Skew))
+}
+
+func (s *Session) Refresh() {
+	s.Expiry = time.Now().Add(Expiry)
+}
diff --git a/pkg/session/session_test.go b/pkg/session/session_test.go
new file mode 100644
index 0000000..b01e083
--- /dev/null
+++ b/pkg/session/session_test.go
@@ -0,0 +1,36 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+package session
+
+import (
+	"github.com/loopholelabs/auth/pkg/kind"
+	"github.com/loopholelabs/auth/pkg/provider"
+	"github.com/stretchr/testify/require"
+	"testing"
+)
+
+func TestSession(t *testing.T) {
+	pkey := provider.Key("test-provider")
+	sess := New(kind.Default, pkey, "test-userid", "test-organization")
+	require.Equal(t, kind.Default, sess.Kind)
+	require.Equal(t, pkey, sess.Provider)
+	require.Equal(t, "test-userid", sess.UserID)
+	require.Equal(t, "test-organization", sess.Organization)
+
+	require.False(t, sess.Expired())
+	require.False(t, sess.CloseToExpiry())
+}
diff --git a/pkg/storage/default/default.go b/pkg/storage/default/default.go
deleted file mode 100644
index 68fde0a..0000000
--- a/pkg/storage/default/default.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package database
-
-import (
-	"context"
-	"entgo.io/ent/dialect"
-	"fmt"
-	"github.com/dexidp/dex/pkg/log"
-	dexStorage "github.com/dexidp/dex/storage"
-	dexSQL "github.com/dexidp/dex/storage/sql"
-	"github.com/loopholelabs/auth/pkg/storage"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-	"github.com/loopholelabs/auth/pkg/token"
-	"github.com/loopholelabs/auth/pkg/token/identity"
-	"github.com/sirupsen/logrus"
-	"net"
-	nurl "net/url"
-	"strconv"
-	"strings"
-
-	_ "github.com/lib/pq"
-	_ "github.com/mattn/go-sqlite3"
-)
-
-var _ storage.Storage = (*Default)(nil)
-
-type Default struct {
-	client *ent.Client
-	dexStorage.Storage
-}
-
-func New(connector string, url string, dexConnector string, dexURL string, logger log.Logger) (*Default, error) {
-	logger.Infof("connecting to %s database at %s", connector, url)
-	client, err := ent.Open(connector, url)
-	if err != nil {
-		return nil, err
-	}
-
-	logger.Infof("running database migrations")
-	err = client.Schema.Create(context.Background())
-	if err != nil {
-		return nil, err
-	}
-
-	logger.Infof("creating database for dex")
-	var st dexStorage.Storage
-	switch dexConnector {
-	case dialect.Postgres:
-		logger.Infof("using postgres dialect")
-		parsed, err := parsePG(dexURL)
-		if err != nil {
-			return nil, err
-		}
-
-		logrus.Infof("parsed postgres url: %+v", parsed)
-
-		port, err := strconv.Atoi(parsed["port"])
-		if err != nil {
-			return nil, err
-		}
-		pg := dexSQL.Postgres{
-			NetworkDB: dexSQL.NetworkDB{
-				Database: parsed["dbname"],
-				User:     parsed["user"],
-				Password: parsed["password"],
-				Host:     parsed["host"],
-				Port:     uint16(port),
-			},
-			SSL: dexSQL.SSL{
-				Mode:   parsed["sslmode"],
-				CAFile: parsed["sslrootcert"],
-			},
-			Options: parsed["options"],
-		}
-
-		st, err = pg.Open(logger)
-		if err != nil {
-			return nil, err
-		}
-	case dialect.SQLite:
-		logger.Infof("using sqlite dialect")
-		s := dexSQL.SQLite3{
-			File: url,
-		}
-
-		st, err = s.Open(logger)
-		if err != nil {
-			return nil, err
-		}
-	default:
-		return nil, fmt.Errorf("unsupported connector %s", connector)
-	}
-
-	logrus.Info("created database for dex")
-
-	return &Default{
-		client:  client,
-		Storage: st,
-	}, nil
-}
-
-func (d *Default) UserExists(id string) (bool, error) {
-	return d.client.User.Query().Where(user.Username(id)).Exist(context.Background())
-}
-
-func (d *Default) GetAPIKey(id string) (*token.APIKey, error) {
-	a, err := d.client.APIKey.Query().Where(apikey.Value(id)).Only(context.Background())
-	if err != nil {
-		return nil, err
-	}
-
-	u, err := a.QueryOwner().Only(context.Background())
-	if err != nil {
-		return nil, err
-	}
-
-	return &token.APIKey{
-		Created: a.CreatedAt,
-		ID:      a.Value,
-		Secret:  a.Secret,
-		User:    u.Username,
-	}, nil
-}
-
-func (d *Default) CreateAPIKey(key *token.APIKey) error {
-	u, err := d.client.User.Query().Where(user.Username(key.User)).Only(context.Background())
-	if err != nil {
-		return err
-	}
-	_, err = d.client.APIKey.Create().SetOwner(u).SetValue(key.ID).SetSecret(key.Secret).Save(context.Background())
-	return err
-}
-
-func (d *Default) GetServiceKey(id string, valid storage.ServiceKeyValid, update storage.ServiceKeyUpdate) (*token.ServiceKey, error) {
-	s, err := d.client.ServiceKey.Query().Where(servicekey.Value(id)).Only(context.Background())
-	if err != nil {
-		return nil, err
-	}
-
-	u, err := s.QueryOwner().Only(context.Background())
-	if err != nil {
-		return nil, err
-	}
-
-	sk := &token.ServiceKey{
-		Created:  s.CreatedAt,
-		ID:       s.Value,
-		Secret:   s.Secret,
-		User:     u.Username,
-		Resource: s.Resource,
-		NumUsed:  s.NumUsed,
-		MaxUses:  s.MaxUses,
-		Expires:  s.Expires,
-	}
-
-	if valid != nil {
-		err = valid(sk)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	if update != nil {
-		update(sk)
-
-		_, err = d.client.ServiceKey.UpdateOne(s).SetResource(sk.Resource).SetNumUsed(sk.NumUsed).SetMaxUses(sk.MaxUses).SetExpires(sk.Expires).Save(context.Background())
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return sk, nil
-}
-
-func (d *Default) CreateServiceKey(key *token.ServiceKey) error {
-	u, err := d.client.User.Query().Where(user.Username(key.User)).Only(context.Background())
-	if err != nil {
-		return err
-	}
-	_, err = d.client.ServiceKey.Create().SetOwner(u).SetValue(key.ID).SetSecret(key.Secret).SetResource(key.Resource).SetNumUsed(key.NumUsed).SetMaxUses(key.MaxUses).SetExpires(key.Expires).Save(context.Background())
-	return err
-}
-
-func (d *Default) NewUser(claims *identity.IDToken) error {
-	_, err := d.client.User.Create().SetUsername(claims.Email).Save(context.Background())
-	return err
-}
-
-func (d *Default) Shutdown() error {
-	err := d.Storage.Close()
-	if err != nil {
-		return err
-	}
-	return d.client.Close()
-}
-
-func parsePG(url string) (map[string]string, error) {
-	values := make(map[string]string)
-	u, err := nurl.Parse(url)
-	if err != nil {
-		return nil, err
-	}
-
-	if u.Scheme != "postgres" && u.Scheme != "postgresql" {
-		return nil, fmt.Errorf("invalid connection protocol: %s", u.Scheme)
-	}
-
-	escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`)
-	append := func(k, v string) {
-		if v != "" {
-			values[k] = escaper.Replace(v)
-		}
-	}
-
-	if u.User != nil {
-		v := u.User.Username()
-		append("user", v)
-
-		v, _ = u.User.Password()
-		append("password", v)
-	}
-
-	if host, port, err := net.SplitHostPort(u.Host); err != nil {
-		append("host", u.Host)
-	} else {
-		append("host", host)
-		append("port", port)
-	}
-
-	if u.Path != "" {
-		append("dbname", u.Path[1:])
-	}
-
-	q := u.Query()
-	for k := range q {
-		append(k, q.Get(k))
-	}
-
-	return values, nil
-}
diff --git a/pkg/storage/default/ent/apikey.go b/pkg/storage/default/ent/apikey.go
deleted file mode 100644
index c2769a0..0000000
--- a/pkg/storage/default/ent/apikey.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"fmt"
-	"strings"
-
-	"entgo.io/ent/dialect/sql"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// APIKey is the model entity for the APIKey schema.
-type APIKey struct {
-	config `json:"-"`
-	// ID of the ent.
-	ID int `json:"id,omitempty"`
-	// CreatedAt holds the value of the "created_at" field.
-	CreatedAt int64 `json:"created_at,omitempty"`
-	// Name holds the value of the "name" field.
-	Name string `json:"name,omitempty"`
-	// Value holds the value of the "value" field.
-	Value string `json:"value,omitempty"`
-	// Secret holds the value of the "secret" field.
-	Secret []byte `json:"secret,omitempty"`
-	// Edges holds the relations/edges for other nodes in the graph.
-	// The values are being populated by the APIKeyQuery when eager-loading is set.
-	Edges        APIKeyEdges `json:"edges"`
-	user_apikeys *int
-}
-
-// APIKeyEdges holds the relations/edges for other nodes in the graph.
-type APIKeyEdges struct {
-	// Owner holds the value of the owner edge.
-	Owner *User `json:"owner,omitempty"`
-	// loadedTypes holds the information for reporting if a
-	// type was loaded (or requested) in eager-loading or not.
-	loadedTypes [1]bool
-}
-
-// OwnerOrErr returns the Owner value or an error if the edge
-// was not loaded in eager-loading, or loaded but was not found.
-func (e APIKeyEdges) OwnerOrErr() (*User, error) {
-	if e.loadedTypes[0] {
-		if e.Owner == nil {
-			// Edge was loaded but was not found.
-			return nil, &NotFoundError{label: user.Label}
-		}
-		return e.Owner, nil
-	}
-	return nil, &NotLoadedError{edge: "owner"}
-}
-
-// scanValues returns the types for scanning values from sql.Rows.
-func (*APIKey) scanValues(columns []string) ([]any, error) {
-	values := make([]any, len(columns))
-	for i := range columns {
-		switch columns[i] {
-		case apikey.FieldSecret:
-			values[i] = new([]byte)
-		case apikey.FieldID, apikey.FieldCreatedAt:
-			values[i] = new(sql.NullInt64)
-		case apikey.FieldName, apikey.FieldValue:
-			values[i] = new(sql.NullString)
-		case apikey.ForeignKeys[0]: // user_apikeys
-			values[i] = new(sql.NullInt64)
-		default:
-			return nil, fmt.Errorf("unexpected column %q for type APIKey", columns[i])
-		}
-	}
-	return values, nil
-}
-
-// assignValues assigns the values that were returned from sql.Rows (after scanning)
-// to the APIKey fields.
-func (ak *APIKey) assignValues(columns []string, values []any) error {
-	if m, n := len(values), len(columns); m < n {
-		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
-	}
-	for i := range columns {
-		switch columns[i] {
-		case apikey.FieldID:
-			value, ok := values[i].(*sql.NullInt64)
-			if !ok {
-				return fmt.Errorf("unexpected type %T for field id", value)
-			}
-			ak.ID = int(value.Int64)
-		case apikey.FieldCreatedAt:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for field created_at", values[i])
-			} else if value.Valid {
-				ak.CreatedAt = value.Int64
-			}
-		case apikey.FieldName:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field name", values[i])
-			} else if value.Valid {
-				ak.Name = value.String
-			}
-		case apikey.FieldValue:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field value", values[i])
-			} else if value.Valid {
-				ak.Value = value.String
-			}
-		case apikey.FieldSecret:
-			if value, ok := values[i].(*[]byte); !ok {
-				return fmt.Errorf("unexpected type %T for field secret", values[i])
-			} else if value != nil {
-				ak.Secret = *value
-			}
-		case apikey.ForeignKeys[0]:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for edge-field user_apikeys", value)
-			} else if value.Valid {
-				ak.user_apikeys = new(int)
-				*ak.user_apikeys = int(value.Int64)
-			}
-		}
-	}
-	return nil
-}
-
-// QueryOwner queries the "owner" edge of the APIKey entity.
-func (ak *APIKey) QueryOwner() *UserQuery {
-	return (&APIKeyClient{config: ak.config}).QueryOwner(ak)
-}
-
-// Update returns a builder for updating this APIKey.
-// Note that you need to call APIKey.Unwrap() before calling this method if this APIKey
-// was returned from a transaction, and the transaction was committed or rolled back.
-func (ak *APIKey) Update() *APIKeyUpdateOne {
-	return (&APIKeyClient{config: ak.config}).UpdateOne(ak)
-}
-
-// Unwrap unwraps the APIKey entity that was returned from a transaction after it was closed,
-// so that all future queries will be executed through the driver which created the transaction.
-func (ak *APIKey) Unwrap() *APIKey {
-	_tx, ok := ak.config.driver.(*txDriver)
-	if !ok {
-		panic("ent: APIKey is not a transactional entity")
-	}
-	ak.config.driver = _tx.drv
-	return ak
-}
-
-// String implements the fmt.Stringer.
-func (ak *APIKey) String() string {
-	var builder strings.Builder
-	builder.WriteString("APIKey(")
-	builder.WriteString(fmt.Sprintf("id=%v, ", ak.ID))
-	builder.WriteString("created_at=")
-	builder.WriteString(fmt.Sprintf("%v", ak.CreatedAt))
-	builder.WriteString(", ")
-	builder.WriteString("name=")
-	builder.WriteString(ak.Name)
-	builder.WriteString(", ")
-	builder.WriteString("value=")
-	builder.WriteString(ak.Value)
-	builder.WriteString(", ")
-	builder.WriteString("secret=")
-	builder.WriteString(fmt.Sprintf("%v", ak.Secret))
-	builder.WriteByte(')')
-	return builder.String()
-}
-
-// APIKeys is a parsable slice of APIKey.
-type APIKeys []*APIKey
-
-func (ak APIKeys) config(cfg config) {
-	for _i := range ak {
-		ak[_i].config = cfg
-	}
-}
diff --git a/pkg/storage/default/ent/apikey/apikey.go b/pkg/storage/default/ent/apikey/apikey.go
deleted file mode 100644
index 85e867a..0000000
--- a/pkg/storage/default/ent/apikey/apikey.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package apikey
-
-const (
-	// Label holds the string label denoting the apikey type in the database.
-	Label = "api_key"
-	// FieldID holds the string denoting the id field in the database.
-	FieldID = "id"
-	// FieldCreatedAt holds the string denoting the created_at field in the database.
-	FieldCreatedAt = "created_at"
-	// FieldName holds the string denoting the name field in the database.
-	FieldName = "name"
-	// FieldValue holds the string denoting the value field in the database.
-	FieldValue = "value"
-	// FieldSecret holds the string denoting the secret field in the database.
-	FieldSecret = "secret"
-	// EdgeOwner holds the string denoting the owner edge name in mutations.
-	EdgeOwner = "owner"
-	// Table holds the table name of the apikey in the database.
-	Table = "api_keys"
-	// OwnerTable is the table that holds the owner relation/edge.
-	OwnerTable = "api_keys"
-	// OwnerInverseTable is the table name for the User entity.
-	// It exists in this package in order to avoid circular dependency with the "user" package.
-	OwnerInverseTable = "users"
-	// OwnerColumn is the table column denoting the owner relation/edge.
-	OwnerColumn = "user_apikeys"
-)
-
-// Columns holds all SQL columns for apikey fields.
-var Columns = []string{
-	FieldID,
-	FieldCreatedAt,
-	FieldName,
-	FieldValue,
-	FieldSecret,
-}
-
-// ForeignKeys holds the SQL foreign-keys that are owned by the "api_keys"
-// table and are not defined as standalone fields in the schema.
-var ForeignKeys = []string{
-	"user_apikeys",
-}
-
-// ValidColumn reports if the column name is valid (part of the table columns).
-func ValidColumn(column string) bool {
-	for i := range Columns {
-		if column == Columns[i] {
-			return true
-		}
-	}
-	for i := range ForeignKeys {
-		if column == ForeignKeys[i] {
-			return true
-		}
-	}
-	return false
-}
-
-var (
-	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
-	DefaultCreatedAt func() int64
-	// NameValidator is a validator for the "name" field. It is called by the builders before save.
-	NameValidator func(string) error
-	// ValueValidator is a validator for the "value" field. It is called by the builders before save.
-	ValueValidator func(string) error
-)
diff --git a/pkg/storage/default/ent/apikey/where.go b/pkg/storage/default/ent/apikey/where.go
deleted file mode 100644
index e5ed0c3..0000000
--- a/pkg/storage/default/ent/apikey/where.go
+++ /dev/null
@@ -1,494 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package apikey
-
-import (
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-)
-
-// ID filters vertices based on their ID field.
-func ID(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDEQ applies the EQ predicate on the ID field.
-func IDEQ(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDNEQ applies the NEQ predicate on the ID field.
-func IDNEQ(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldID), id))
-	})
-}
-
-// IDIn applies the In predicate on the ID field.
-func IDIn(ids ...int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.In(s.C(FieldID), v...))
-	})
-}
-
-// IDNotIn applies the NotIn predicate on the ID field.
-func IDNotIn(ids ...int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.NotIn(s.C(FieldID), v...))
-	})
-}
-
-// IDGT applies the GT predicate on the ID field.
-func IDGT(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldID), id))
-	})
-}
-
-// IDGTE applies the GTE predicate on the ID field.
-func IDGTE(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldID), id))
-	})
-}
-
-// IDLT applies the LT predicate on the ID field.
-func IDLT(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldID), id))
-	})
-}
-
-// IDLTE applies the LTE predicate on the ID field.
-func IDLTE(id int) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldID), id))
-	})
-}
-
-// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
-func CreatedAt(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
-func Name(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldName), v))
-	})
-}
-
-// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
-func Value(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldValue), v))
-	})
-}
-
-// Secret applies equality check predicate on the "secret" field. It's identical to SecretEQ.
-func Secret(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldSecret), v))
-	})
-}
-
-// CreatedAtEQ applies the EQ predicate on the "created_at" field.
-func CreatedAtEQ(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
-func CreatedAtNEQ(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtIn applies the In predicate on the "created_at" field.
-func CreatedAtIn(vs ...int64) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
-func CreatedAtNotIn(vs ...int64) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtGT applies the GT predicate on the "created_at" field.
-func CreatedAtGT(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtGTE applies the GTE predicate on the "created_at" field.
-func CreatedAtGTE(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLT applies the LT predicate on the "created_at" field.
-func CreatedAtLT(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLTE applies the LTE predicate on the "created_at" field.
-func CreatedAtLTE(v int64) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// NameEQ applies the EQ predicate on the "name" field.
-func NameEQ(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldName), v))
-	})
-}
-
-// NameNEQ applies the NEQ predicate on the "name" field.
-func NameNEQ(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldName), v))
-	})
-}
-
-// NameIn applies the In predicate on the "name" field.
-func NameIn(vs ...string) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldName), v...))
-	})
-}
-
-// NameNotIn applies the NotIn predicate on the "name" field.
-func NameNotIn(vs ...string) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldName), v...))
-	})
-}
-
-// NameGT applies the GT predicate on the "name" field.
-func NameGT(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldName), v))
-	})
-}
-
-// NameGTE applies the GTE predicate on the "name" field.
-func NameGTE(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldName), v))
-	})
-}
-
-// NameLT applies the LT predicate on the "name" field.
-func NameLT(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldName), v))
-	})
-}
-
-// NameLTE applies the LTE predicate on the "name" field.
-func NameLTE(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldName), v))
-	})
-}
-
-// NameContains applies the Contains predicate on the "name" field.
-func NameContains(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldName), v))
-	})
-}
-
-// NameHasPrefix applies the HasPrefix predicate on the "name" field.
-func NameHasPrefix(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldName), v))
-	})
-}
-
-// NameHasSuffix applies the HasSuffix predicate on the "name" field.
-func NameHasSuffix(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldName), v))
-	})
-}
-
-// NameEqualFold applies the EqualFold predicate on the "name" field.
-func NameEqualFold(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldName), v))
-	})
-}
-
-// NameContainsFold applies the ContainsFold predicate on the "name" field.
-func NameContainsFold(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldName), v))
-	})
-}
-
-// ValueEQ applies the EQ predicate on the "value" field.
-func ValueEQ(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldValue), v))
-	})
-}
-
-// ValueNEQ applies the NEQ predicate on the "value" field.
-func ValueNEQ(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldValue), v))
-	})
-}
-
-// ValueIn applies the In predicate on the "value" field.
-func ValueIn(vs ...string) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldValue), v...))
-	})
-}
-
-// ValueNotIn applies the NotIn predicate on the "value" field.
-func ValueNotIn(vs ...string) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldValue), v...))
-	})
-}
-
-// ValueGT applies the GT predicate on the "value" field.
-func ValueGT(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldValue), v))
-	})
-}
-
-// ValueGTE applies the GTE predicate on the "value" field.
-func ValueGTE(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldValue), v))
-	})
-}
-
-// ValueLT applies the LT predicate on the "value" field.
-func ValueLT(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldValue), v))
-	})
-}
-
-// ValueLTE applies the LTE predicate on the "value" field.
-func ValueLTE(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldValue), v))
-	})
-}
-
-// ValueContains applies the Contains predicate on the "value" field.
-func ValueContains(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldValue), v))
-	})
-}
-
-// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
-func ValueHasPrefix(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldValue), v))
-	})
-}
-
-// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
-func ValueHasSuffix(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldValue), v))
-	})
-}
-
-// ValueEqualFold applies the EqualFold predicate on the "value" field.
-func ValueEqualFold(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldValue), v))
-	})
-}
-
-// ValueContainsFold applies the ContainsFold predicate on the "value" field.
-func ValueContainsFold(v string) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldValue), v))
-	})
-}
-
-// SecretEQ applies the EQ predicate on the "secret" field.
-func SecretEQ(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldSecret), v))
-	})
-}
-
-// SecretNEQ applies the NEQ predicate on the "secret" field.
-func SecretNEQ(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldSecret), v))
-	})
-}
-
-// SecretIn applies the In predicate on the "secret" field.
-func SecretIn(vs ...[]byte) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldSecret), v...))
-	})
-}
-
-// SecretNotIn applies the NotIn predicate on the "secret" field.
-func SecretNotIn(vs ...[]byte) predicate.APIKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldSecret), v...))
-	})
-}
-
-// SecretGT applies the GT predicate on the "secret" field.
-func SecretGT(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldSecret), v))
-	})
-}
-
-// SecretGTE applies the GTE predicate on the "secret" field.
-func SecretGTE(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldSecret), v))
-	})
-}
-
-// SecretLT applies the LT predicate on the "secret" field.
-func SecretLT(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldSecret), v))
-	})
-}
-
-// SecretLTE applies the LTE predicate on the "secret" field.
-func SecretLTE(v []byte) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldSecret), v))
-	})
-}
-
-// HasOwner applies the HasEdge predicate on the "owner" edge.
-func HasOwner() predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(OwnerTable, FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
-		)
-		sqlgraph.HasNeighbors(s, step)
-	})
-}
-
-// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
-func HasOwnerWith(preds ...predicate.User) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(OwnerInverseTable, FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
-		)
-		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
-			for _, p := range preds {
-				p(s)
-			}
-		})
-	})
-}
-
-// And groups predicates with the AND operator between them.
-func And(predicates ...predicate.APIKey) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for _, p := range predicates {
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Or groups predicates with the OR operator between them.
-func Or(predicates ...predicate.APIKey) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for i, p := range predicates {
-			if i > 0 {
-				s1.Or()
-			}
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Not applies the not operator on the given predicate.
-func Not(p predicate.APIKey) predicate.APIKey {
-	return predicate.APIKey(func(s *sql.Selector) {
-		p(s.Not())
-	})
-}
diff --git a/pkg/storage/default/ent/apikey_create.go b/pkg/storage/default/ent/apikey_create.go
deleted file mode 100644
index b64f0ad..0000000
--- a/pkg/storage/default/ent/apikey_create.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// APIKeyCreate is the builder for creating a APIKey entity.
-type APIKeyCreate struct {
-	config
-	mutation *APIKeyMutation
-	hooks    []Hook
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (akc *APIKeyCreate) SetCreatedAt(i int64) *APIKeyCreate {
-	akc.mutation.SetCreatedAt(i)
-	return akc
-}
-
-// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
-func (akc *APIKeyCreate) SetNillableCreatedAt(i *int64) *APIKeyCreate {
-	if i != nil {
-		akc.SetCreatedAt(*i)
-	}
-	return akc
-}
-
-// SetName sets the "name" field.
-func (akc *APIKeyCreate) SetName(s string) *APIKeyCreate {
-	akc.mutation.SetName(s)
-	return akc
-}
-
-// SetValue sets the "value" field.
-func (akc *APIKeyCreate) SetValue(s string) *APIKeyCreate {
-	akc.mutation.SetValue(s)
-	return akc
-}
-
-// SetSecret sets the "secret" field.
-func (akc *APIKeyCreate) SetSecret(b []byte) *APIKeyCreate {
-	akc.mutation.SetSecret(b)
-	return akc
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (akc *APIKeyCreate) SetOwnerID(id int) *APIKeyCreate {
-	akc.mutation.SetOwnerID(id)
-	return akc
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (akc *APIKeyCreate) SetNillableOwnerID(id *int) *APIKeyCreate {
-	if id != nil {
-		akc = akc.SetOwnerID(*id)
-	}
-	return akc
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (akc *APIKeyCreate) SetOwner(u *User) *APIKeyCreate {
-	return akc.SetOwnerID(u.ID)
-}
-
-// Mutation returns the APIKeyMutation object of the builder.
-func (akc *APIKeyCreate) Mutation() *APIKeyMutation {
-	return akc.mutation
-}
-
-// Save creates the APIKey in the database.
-func (akc *APIKeyCreate) Save(ctx context.Context) (*APIKey, error) {
-	var (
-		err  error
-		node *APIKey
-	)
-	akc.defaults()
-	if len(akc.hooks) == 0 {
-		if err = akc.check(); err != nil {
-			return nil, err
-		}
-		node, err = akc.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*APIKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			if err = akc.check(); err != nil {
-				return nil, err
-			}
-			akc.mutation = mutation
-			if node, err = akc.sqlSave(ctx); err != nil {
-				return nil, err
-			}
-			mutation.id = &node.ID
-			mutation.done = true
-			return node, err
-		})
-		for i := len(akc.hooks) - 1; i >= 0; i-- {
-			if akc.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = akc.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, akc.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*APIKey)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from APIKeyMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX calls Save and panics if Save returns an error.
-func (akc *APIKeyCreate) SaveX(ctx context.Context) *APIKey {
-	v, err := akc.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (akc *APIKeyCreate) Exec(ctx context.Context) error {
-	_, err := akc.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (akc *APIKeyCreate) ExecX(ctx context.Context) {
-	if err := akc.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-// defaults sets the default values of the builder before save.
-func (akc *APIKeyCreate) defaults() {
-	if _, ok := akc.mutation.CreatedAt(); !ok {
-		v := apikey.DefaultCreatedAt()
-		akc.mutation.SetCreatedAt(v)
-	}
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (akc *APIKeyCreate) check() error {
-	if _, ok := akc.mutation.CreatedAt(); !ok {
-		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "APIKey.created_at"`)}
-	}
-	if _, ok := akc.mutation.Name(); !ok {
-		return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "APIKey.name"`)}
-	}
-	if v, ok := akc.mutation.Name(); ok {
-		if err := apikey.NameValidator(v); err != nil {
-			return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)}
-		}
-	}
-	if _, ok := akc.mutation.Value(); !ok {
-		return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "APIKey.value"`)}
-	}
-	if v, ok := akc.mutation.Value(); ok {
-		if err := apikey.ValueValidator(v); err != nil {
-			return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "APIKey.value": %w`, err)}
-		}
-	}
-	if _, ok := akc.mutation.Secret(); !ok {
-		return &ValidationError{Name: "secret", err: errors.New(`ent: missing required field "APIKey.secret"`)}
-	}
-	return nil
-}
-
-func (akc *APIKeyCreate) sqlSave(ctx context.Context) (*APIKey, error) {
-	_node, _spec := akc.createSpec()
-	if err := sqlgraph.CreateNode(ctx, akc.driver, _spec); err != nil {
-		if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	id := _spec.ID.Value.(int64)
-	_node.ID = int(id)
-	return _node, nil
-}
-
-func (akc *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) {
-	var (
-		_node = &APIKey{config: akc.config}
-		_spec = &sqlgraph.CreateSpec{
-			Table: apikey.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: apikey.FieldID,
-			},
-		}
-	)
-	if value, ok := akc.mutation.CreatedAt(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: apikey.FieldCreatedAt,
-		})
-		_node.CreatedAt = value
-	}
-	if value, ok := akc.mutation.Name(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: apikey.FieldName,
-		})
-		_node.Name = value
-	}
-	if value, ok := akc.mutation.Value(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: apikey.FieldValue,
-		})
-		_node.Value = value
-	}
-	if value, ok := akc.mutation.Secret(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeBytes,
-			Value:  value,
-			Column: apikey.FieldSecret,
-		})
-		_node.Secret = value
-	}
-	if nodes := akc.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   apikey.OwnerTable,
-			Columns: []string{apikey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_node.user_apikeys = &nodes[0]
-		_spec.Edges = append(_spec.Edges, edge)
-	}
-	return _node, _spec
-}
-
-// APIKeyCreateBulk is the builder for creating many APIKey entities in bulk.
-type APIKeyCreateBulk struct {
-	config
-	builders []*APIKeyCreate
-}
-
-// Save creates the APIKey entities in the database.
-func (akcb *APIKeyCreateBulk) Save(ctx context.Context) ([]*APIKey, error) {
-	specs := make([]*sqlgraph.CreateSpec, len(akcb.builders))
-	nodes := make([]*APIKey, len(akcb.builders))
-	mutators := make([]Mutator, len(akcb.builders))
-	for i := range akcb.builders {
-		func(i int, root context.Context) {
-			builder := akcb.builders[i]
-			builder.defaults()
-			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-				mutation, ok := m.(*APIKeyMutation)
-				if !ok {
-					return nil, fmt.Errorf("unexpected mutation type %T", m)
-				}
-				if err := builder.check(); err != nil {
-					return nil, err
-				}
-				builder.mutation = mutation
-				nodes[i], specs[i] = builder.createSpec()
-				var err error
-				if i < len(mutators)-1 {
-					_, err = mutators[i+1].Mutate(root, akcb.builders[i+1].mutation)
-				} else {
-					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
-					// Invoke the actual operation on the latest mutation in the chain.
-					if err = sqlgraph.BatchCreate(ctx, akcb.driver, spec); err != nil {
-						if sqlgraph.IsConstraintError(err) {
-							err = &ConstraintError{msg: err.Error(), wrap: err}
-						}
-					}
-				}
-				if err != nil {
-					return nil, err
-				}
-				mutation.id = &nodes[i].ID
-				if specs[i].ID.Value != nil {
-					id := specs[i].ID.Value.(int64)
-					nodes[i].ID = int(id)
-				}
-				mutation.done = true
-				return nodes[i], nil
-			})
-			for i := len(builder.hooks) - 1; i >= 0; i-- {
-				mut = builder.hooks[i](mut)
-			}
-			mutators[i] = mut
-		}(i, ctx)
-	}
-	if len(mutators) > 0 {
-		if _, err := mutators[0].Mutate(ctx, akcb.builders[0].mutation); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (akcb *APIKeyCreateBulk) SaveX(ctx context.Context) []*APIKey {
-	v, err := akcb.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (akcb *APIKeyCreateBulk) Exec(ctx context.Context) error {
-	_, err := akcb.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (akcb *APIKeyCreateBulk) ExecX(ctx context.Context) {
-	if err := akcb.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
diff --git a/pkg/storage/default/ent/apikey_delete.go b/pkg/storage/default/ent/apikey_delete.go
deleted file mode 100644
index a157b8f..0000000
--- a/pkg/storage/default/ent/apikey_delete.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-)
-
-// APIKeyDelete is the builder for deleting a APIKey entity.
-type APIKeyDelete struct {
-	config
-	hooks    []Hook
-	mutation *APIKeyMutation
-}
-
-// Where appends a list predicates to the APIKeyDelete builder.
-func (akd *APIKeyDelete) Where(ps ...predicate.APIKey) *APIKeyDelete {
-	akd.mutation.Where(ps...)
-	return akd
-}
-
-// Exec executes the deletion query and returns how many vertices were deleted.
-func (akd *APIKeyDelete) Exec(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(akd.hooks) == 0 {
-		affected, err = akd.sqlExec(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*APIKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			akd.mutation = mutation
-			affected, err = akd.sqlExec(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(akd.hooks) - 1; i >= 0; i-- {
-			if akd.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = akd.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, akd.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (akd *APIKeyDelete) ExecX(ctx context.Context) int {
-	n, err := akd.Exec(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return n
-}
-
-func (akd *APIKeyDelete) sqlExec(ctx context.Context) (int, error) {
-	_spec := &sqlgraph.DeleteSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table: apikey.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: apikey.FieldID,
-			},
-		},
-	}
-	if ps := akd.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	affected, err := sqlgraph.DeleteNodes(ctx, akd.driver, _spec)
-	if err != nil && sqlgraph.IsConstraintError(err) {
-		err = &ConstraintError{msg: err.Error(), wrap: err}
-	}
-	return affected, err
-}
-
-// APIKeyDeleteOne is the builder for deleting a single APIKey entity.
-type APIKeyDeleteOne struct {
-	akd *APIKeyDelete
-}
-
-// Exec executes the deletion query.
-func (akdo *APIKeyDeleteOne) Exec(ctx context.Context) error {
-	n, err := akdo.akd.Exec(ctx)
-	switch {
-	case err != nil:
-		return err
-	case n == 0:
-		return &NotFoundError{apikey.Label}
-	default:
-		return nil
-	}
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (akdo *APIKeyDeleteOne) ExecX(ctx context.Context) {
-	akdo.akd.ExecX(ctx)
-}
diff --git a/pkg/storage/default/ent/apikey_query.go b/pkg/storage/default/ent/apikey_query.go
deleted file mode 100644
index b20817e..0000000
--- a/pkg/storage/default/ent/apikey_query.go
+++ /dev/null
@@ -1,613 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"fmt"
-	"math"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// APIKeyQuery is the builder for querying APIKey entities.
-type APIKeyQuery struct {
-	config
-	limit      *int
-	offset     *int
-	unique     *bool
-	order      []OrderFunc
-	fields     []string
-	predicates []predicate.APIKey
-	withOwner  *UserQuery
-	withFKs    bool
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Where adds a new predicate for the APIKeyQuery builder.
-func (akq *APIKeyQuery) Where(ps ...predicate.APIKey) *APIKeyQuery {
-	akq.predicates = append(akq.predicates, ps...)
-	return akq
-}
-
-// Limit adds a limit step to the query.
-func (akq *APIKeyQuery) Limit(limit int) *APIKeyQuery {
-	akq.limit = &limit
-	return akq
-}
-
-// Offset adds an offset step to the query.
-func (akq *APIKeyQuery) Offset(offset int) *APIKeyQuery {
-	akq.offset = &offset
-	return akq
-}
-
-// Unique configures the query builder to filter duplicate records on query.
-// By default, unique is set to true, and can be disabled using this method.
-func (akq *APIKeyQuery) Unique(unique bool) *APIKeyQuery {
-	akq.unique = &unique
-	return akq
-}
-
-// Order adds an order step to the query.
-func (akq *APIKeyQuery) Order(o ...OrderFunc) *APIKeyQuery {
-	akq.order = append(akq.order, o...)
-	return akq
-}
-
-// QueryOwner chains the current query on the "owner" edge.
-func (akq *APIKeyQuery) QueryOwner() *UserQuery {
-	query := &UserQuery{config: akq.config}
-	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
-		if err := akq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		selector := akq.sqlQuery(ctx)
-		if err := selector.Err(); err != nil {
-			return nil, err
-		}
-		step := sqlgraph.NewStep(
-			sqlgraph.From(apikey.Table, apikey.FieldID, selector),
-			sqlgraph.To(user.Table, user.FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, apikey.OwnerTable, apikey.OwnerColumn),
-		)
-		fromU = sqlgraph.SetNeighbors(akq.driver.Dialect(), step)
-		return fromU, nil
-	}
-	return query
-}
-
-// First returns the first APIKey entity from the query.
-// Returns a *NotFoundError when no APIKey was found.
-func (akq *APIKeyQuery) First(ctx context.Context) (*APIKey, error) {
-	nodes, err := akq.Limit(1).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nil, &NotFoundError{apikey.Label}
-	}
-	return nodes[0], nil
-}
-
-// FirstX is like First, but panics if an error occurs.
-func (akq *APIKeyQuery) FirstX(ctx context.Context) *APIKey {
-	node, err := akq.First(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return node
-}
-
-// FirstID returns the first APIKey ID from the query.
-// Returns a *NotFoundError when no APIKey ID was found.
-func (akq *APIKeyQuery) FirstID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = akq.Limit(1).IDs(ctx); err != nil {
-		return
-	}
-	if len(ids) == 0 {
-		err = &NotFoundError{apikey.Label}
-		return
-	}
-	return ids[0], nil
-}
-
-// FirstIDX is like FirstID, but panics if an error occurs.
-func (akq *APIKeyQuery) FirstIDX(ctx context.Context) int {
-	id, err := akq.FirstID(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return id
-}
-
-// Only returns a single APIKey entity found by the query, ensuring it only returns one.
-// Returns a *NotSingularError when more than one APIKey entity is found.
-// Returns a *NotFoundError when no APIKey entities are found.
-func (akq *APIKeyQuery) Only(ctx context.Context) (*APIKey, error) {
-	nodes, err := akq.Limit(2).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	switch len(nodes) {
-	case 1:
-		return nodes[0], nil
-	case 0:
-		return nil, &NotFoundError{apikey.Label}
-	default:
-		return nil, &NotSingularError{apikey.Label}
-	}
-}
-
-// OnlyX is like Only, but panics if an error occurs.
-func (akq *APIKeyQuery) OnlyX(ctx context.Context) *APIKey {
-	node, err := akq.Only(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// OnlyID is like Only, but returns the only APIKey ID in the query.
-// Returns a *NotSingularError when more than one APIKey ID is found.
-// Returns a *NotFoundError when no entities are found.
-func (akq *APIKeyQuery) OnlyID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = akq.Limit(2).IDs(ctx); err != nil {
-		return
-	}
-	switch len(ids) {
-	case 1:
-		id = ids[0]
-	case 0:
-		err = &NotFoundError{apikey.Label}
-	default:
-		err = &NotSingularError{apikey.Label}
-	}
-	return
-}
-
-// OnlyIDX is like OnlyID, but panics if an error occurs.
-func (akq *APIKeyQuery) OnlyIDX(ctx context.Context) int {
-	id, err := akq.OnlyID(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return id
-}
-
-// All executes the query and returns a list of APIKeys.
-func (akq *APIKeyQuery) All(ctx context.Context) ([]*APIKey, error) {
-	if err := akq.prepareQuery(ctx); err != nil {
-		return nil, err
-	}
-	return akq.sqlAll(ctx)
-}
-
-// AllX is like All, but panics if an error occurs.
-func (akq *APIKeyQuery) AllX(ctx context.Context) []*APIKey {
-	nodes, err := akq.All(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return nodes
-}
-
-// IDs executes the query and returns a list of APIKey IDs.
-func (akq *APIKeyQuery) IDs(ctx context.Context) ([]int, error) {
-	var ids []int
-	if err := akq.Select(apikey.FieldID).Scan(ctx, &ids); err != nil {
-		return nil, err
-	}
-	return ids, nil
-}
-
-// IDsX is like IDs, but panics if an error occurs.
-func (akq *APIKeyQuery) IDsX(ctx context.Context) []int {
-	ids, err := akq.IDs(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return ids
-}
-
-// Count returns the count of the given query.
-func (akq *APIKeyQuery) Count(ctx context.Context) (int, error) {
-	if err := akq.prepareQuery(ctx); err != nil {
-		return 0, err
-	}
-	return akq.sqlCount(ctx)
-}
-
-// CountX is like Count, but panics if an error occurs.
-func (akq *APIKeyQuery) CountX(ctx context.Context) int {
-	count, err := akq.Count(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return count
-}
-
-// Exist returns true if the query has elements in the graph.
-func (akq *APIKeyQuery) Exist(ctx context.Context) (bool, error) {
-	if err := akq.prepareQuery(ctx); err != nil {
-		return false, err
-	}
-	return akq.sqlExist(ctx)
-}
-
-// ExistX is like Exist, but panics if an error occurs.
-func (akq *APIKeyQuery) ExistX(ctx context.Context) bool {
-	exist, err := akq.Exist(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return exist
-}
-
-// Clone returns a duplicate of the APIKeyQuery builder, including all associated steps. It can be
-// used to prepare common query builders and use them differently after the clone is made.
-func (akq *APIKeyQuery) Clone() *APIKeyQuery {
-	if akq == nil {
-		return nil
-	}
-	return &APIKeyQuery{
-		config:     akq.config,
-		limit:      akq.limit,
-		offset:     akq.offset,
-		order:      append([]OrderFunc{}, akq.order...),
-		predicates: append([]predicate.APIKey{}, akq.predicates...),
-		withOwner:  akq.withOwner.Clone(),
-		// clone intermediate query.
-		sql:    akq.sql.Clone(),
-		path:   akq.path,
-		unique: akq.unique,
-	}
-}
-
-// WithOwner tells the query-builder to eager-load the nodes that are connected to
-// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
-func (akq *APIKeyQuery) WithOwner(opts ...func(*UserQuery)) *APIKeyQuery {
-	query := &UserQuery{config: akq.config}
-	for _, opt := range opts {
-		opt(query)
-	}
-	akq.withOwner = query
-	return akq
-}
-
-// GroupBy is used to group vertices by one or more fields/columns.
-// It is often used with aggregate functions, like: count, max, mean, min, sum.
-//
-// Example:
-//
-//	var v []struct {
-//		CreatedAt int64 `json:"created_at,omitempty"`
-//		Count int `json:"count,omitempty"`
-//	}
-//
-//	client.APIKey.Query().
-//		GroupBy(apikey.FieldCreatedAt).
-//		Aggregate(ent.Count()).
-//		Scan(ctx, &v)
-func (akq *APIKeyQuery) GroupBy(field string, fields ...string) *APIKeyGroupBy {
-	grbuild := &APIKeyGroupBy{config: akq.config}
-	grbuild.fields = append([]string{field}, fields...)
-	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
-		if err := akq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		return akq.sqlQuery(ctx), nil
-	}
-	grbuild.label = apikey.Label
-	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
-	return grbuild
-}
-
-// Select allows the selection one or more fields/columns for the given query,
-// instead of selecting all fields in the entity.
-//
-// Example:
-//
-//	var v []struct {
-//		CreatedAt int64 `json:"created_at,omitempty"`
-//	}
-//
-//	client.APIKey.Query().
-//		Select(apikey.FieldCreatedAt).
-//		Scan(ctx, &v)
-func (akq *APIKeyQuery) Select(fields ...string) *APIKeySelect {
-	akq.fields = append(akq.fields, fields...)
-	selbuild := &APIKeySelect{APIKeyQuery: akq}
-	selbuild.label = apikey.Label
-	selbuild.flds, selbuild.scan = &akq.fields, selbuild.Scan
-	return selbuild
-}
-
-func (akq *APIKeyQuery) prepareQuery(ctx context.Context) error {
-	for _, f := range akq.fields {
-		if !apikey.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-		}
-	}
-	if akq.path != nil {
-		prev, err := akq.path(ctx)
-		if err != nil {
-			return err
-		}
-		akq.sql = prev
-	}
-	return nil
-}
-
-func (akq *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKey, error) {
-	var (
-		nodes       = []*APIKey{}
-		withFKs     = akq.withFKs
-		_spec       = akq.querySpec()
-		loadedTypes = [1]bool{
-			akq.withOwner != nil,
-		}
-	)
-	if akq.withOwner != nil {
-		withFKs = true
-	}
-	if withFKs {
-		_spec.Node.Columns = append(_spec.Node.Columns, apikey.ForeignKeys...)
-	}
-	_spec.ScanValues = func(columns []string) ([]any, error) {
-		return (*APIKey).scanValues(nil, columns)
-	}
-	_spec.Assign = func(columns []string, values []any) error {
-		node := &APIKey{config: akq.config}
-		nodes = append(nodes, node)
-		node.Edges.loadedTypes = loadedTypes
-		return node.assignValues(columns, values)
-	}
-	for i := range hooks {
-		hooks[i](ctx, _spec)
-	}
-	if err := sqlgraph.QueryNodes(ctx, akq.driver, _spec); err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nodes, nil
-	}
-	if query := akq.withOwner; query != nil {
-		if err := akq.loadOwner(ctx, query, nodes, nil,
-			func(n *APIKey, e *User) { n.Edges.Owner = e }); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-func (akq *APIKeyQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *User)) error {
-	ids := make([]int, 0, len(nodes))
-	nodeids := make(map[int][]*APIKey)
-	for i := range nodes {
-		if nodes[i].user_apikeys == nil {
-			continue
-		}
-		fk := *nodes[i].user_apikeys
-		if _, ok := nodeids[fk]; !ok {
-			ids = append(ids, fk)
-		}
-		nodeids[fk] = append(nodeids[fk], nodes[i])
-	}
-	query.Where(user.IDIn(ids...))
-	neighbors, err := query.All(ctx)
-	if err != nil {
-		return err
-	}
-	for _, n := range neighbors {
-		nodes, ok := nodeids[n.ID]
-		if !ok {
-			return fmt.Errorf(`unexpected foreign-key "user_apikeys" returned %v`, n.ID)
-		}
-		for i := range nodes {
-			assign(nodes[i], n)
-		}
-	}
-	return nil
-}
-
-func (akq *APIKeyQuery) sqlCount(ctx context.Context) (int, error) {
-	_spec := akq.querySpec()
-	_spec.Node.Columns = akq.fields
-	if len(akq.fields) > 0 {
-		_spec.Unique = akq.unique != nil && *akq.unique
-	}
-	return sqlgraph.CountNodes(ctx, akq.driver, _spec)
-}
-
-func (akq *APIKeyQuery) sqlExist(ctx context.Context) (bool, error) {
-	switch _, err := akq.FirstID(ctx); {
-	case IsNotFound(err):
-		return false, nil
-	case err != nil:
-		return false, fmt.Errorf("ent: check existence: %w", err)
-	default:
-		return true, nil
-	}
-}
-
-func (akq *APIKeyQuery) querySpec() *sqlgraph.QuerySpec {
-	_spec := &sqlgraph.QuerySpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   apikey.Table,
-			Columns: apikey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: apikey.FieldID,
-			},
-		},
-		From:   akq.sql,
-		Unique: true,
-	}
-	if unique := akq.unique; unique != nil {
-		_spec.Unique = *unique
-	}
-	if fields := akq.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID)
-		for i := range fields {
-			if fields[i] != apikey.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
-			}
-		}
-	}
-	if ps := akq.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if limit := akq.limit; limit != nil {
-		_spec.Limit = *limit
-	}
-	if offset := akq.offset; offset != nil {
-		_spec.Offset = *offset
-	}
-	if ps := akq.order; len(ps) > 0 {
-		_spec.Order = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	return _spec
-}
-
-func (akq *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
-	builder := sql.Dialect(akq.driver.Dialect())
-	t1 := builder.Table(apikey.Table)
-	columns := akq.fields
-	if len(columns) == 0 {
-		columns = apikey.Columns
-	}
-	selector := builder.Select(t1.Columns(columns...)...).From(t1)
-	if akq.sql != nil {
-		selector = akq.sql
-		selector.Select(selector.Columns(columns...)...)
-	}
-	if akq.unique != nil && *akq.unique {
-		selector.Distinct()
-	}
-	for _, p := range akq.predicates {
-		p(selector)
-	}
-	for _, p := range akq.order {
-		p(selector)
-	}
-	if offset := akq.offset; offset != nil {
-		// limit is mandatory for offset clause. We start
-		// with default value, and override it below if needed.
-		selector.Offset(*offset).Limit(math.MaxInt32)
-	}
-	if limit := akq.limit; limit != nil {
-		selector.Limit(*limit)
-	}
-	return selector
-}
-
-// APIKeyGroupBy is the group-by builder for APIKey entities.
-type APIKeyGroupBy struct {
-	config
-	selector
-	fields []string
-	fns    []AggregateFunc
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Aggregate adds the given aggregation functions to the group-by query.
-func (akgb *APIKeyGroupBy) Aggregate(fns ...AggregateFunc) *APIKeyGroupBy {
-	akgb.fns = append(akgb.fns, fns...)
-	return akgb
-}
-
-// Scan applies the group-by query and scans the result into the given value.
-func (akgb *APIKeyGroupBy) Scan(ctx context.Context, v any) error {
-	query, err := akgb.path(ctx)
-	if err != nil {
-		return err
-	}
-	akgb.sql = query
-	return akgb.sqlScan(ctx, v)
-}
-
-func (akgb *APIKeyGroupBy) sqlScan(ctx context.Context, v any) error {
-	for _, f := range akgb.fields {
-		if !apikey.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
-		}
-	}
-	selector := akgb.sqlQuery()
-	if err := selector.Err(); err != nil {
-		return err
-	}
-	rows := &sql.Rows{}
-	query, args := selector.Query()
-	if err := akgb.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
-
-func (akgb *APIKeyGroupBy) sqlQuery() *sql.Selector {
-	selector := akgb.sql.Select()
-	aggregation := make([]string, 0, len(akgb.fns))
-	for _, fn := range akgb.fns {
-		aggregation = append(aggregation, fn(selector))
-	}
-	// If no columns were selected in a custom aggregation function, the default
-	// selection is the fields used for "group-by", and the aggregation functions.
-	if len(selector.SelectedColumns()) == 0 {
-		columns := make([]string, 0, len(akgb.fields)+len(akgb.fns))
-		for _, f := range akgb.fields {
-			columns = append(columns, selector.C(f))
-		}
-		columns = append(columns, aggregation...)
-		selector.Select(columns...)
-	}
-	return selector.GroupBy(selector.Columns(akgb.fields...)...)
-}
-
-// APIKeySelect is the builder for selecting fields of APIKey entities.
-type APIKeySelect struct {
-	*APIKeyQuery
-	selector
-	// intermediate query (i.e. traversal path).
-	sql *sql.Selector
-}
-
-// Scan applies the selector query and scans the result into the given value.
-func (aks *APIKeySelect) Scan(ctx context.Context, v any) error {
-	if err := aks.prepareQuery(ctx); err != nil {
-		return err
-	}
-	aks.sql = aks.APIKeyQuery.sqlQuery(ctx)
-	return aks.sqlScan(ctx, v)
-}
-
-func (aks *APIKeySelect) sqlScan(ctx context.Context, v any) error {
-	rows := &sql.Rows{}
-	query, args := aks.sql.Query()
-	if err := aks.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
diff --git a/pkg/storage/default/ent/apikey_update.go b/pkg/storage/default/ent/apikey_update.go
deleted file mode 100644
index d0c8170..0000000
--- a/pkg/storage/default/ent/apikey_update.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// APIKeyUpdate is the builder for updating APIKey entities.
-type APIKeyUpdate struct {
-	config
-	hooks    []Hook
-	mutation *APIKeyMutation
-}
-
-// Where appends a list predicates to the APIKeyUpdate builder.
-func (aku *APIKeyUpdate) Where(ps ...predicate.APIKey) *APIKeyUpdate {
-	aku.mutation.Where(ps...)
-	return aku
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (aku *APIKeyUpdate) SetOwnerID(id int) *APIKeyUpdate {
-	aku.mutation.SetOwnerID(id)
-	return aku
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (aku *APIKeyUpdate) SetNillableOwnerID(id *int) *APIKeyUpdate {
-	if id != nil {
-		aku = aku.SetOwnerID(*id)
-	}
-	return aku
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (aku *APIKeyUpdate) SetOwner(u *User) *APIKeyUpdate {
-	return aku.SetOwnerID(u.ID)
-}
-
-// Mutation returns the APIKeyMutation object of the builder.
-func (aku *APIKeyUpdate) Mutation() *APIKeyMutation {
-	return aku.mutation
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (aku *APIKeyUpdate) ClearOwner() *APIKeyUpdate {
-	aku.mutation.ClearOwner()
-	return aku
-}
-
-// Save executes the query and returns the number of nodes affected by the update operation.
-func (aku *APIKeyUpdate) Save(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(aku.hooks) == 0 {
-		affected, err = aku.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*APIKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			aku.mutation = mutation
-			affected, err = aku.sqlSave(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(aku.hooks) - 1; i >= 0; i-- {
-			if aku.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = aku.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, aku.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (aku *APIKeyUpdate) SaveX(ctx context.Context) int {
-	affected, err := aku.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return affected
-}
-
-// Exec executes the query.
-func (aku *APIKeyUpdate) Exec(ctx context.Context) error {
-	_, err := aku.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (aku *APIKeyUpdate) ExecX(ctx context.Context) {
-	if err := aku.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (aku *APIKeyUpdate) sqlSave(ctx context.Context) (n int, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   apikey.Table,
-			Columns: apikey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: apikey.FieldID,
-			},
-		},
-	}
-	if ps := aku.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if aku.mutation.OwnerCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   apikey.OwnerTable,
-			Columns: []string{apikey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := aku.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   apikey.OwnerTable,
-			Columns: []string{apikey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	if n, err = sqlgraph.UpdateNodes(ctx, aku.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{apikey.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return 0, err
-	}
-	return n, nil
-}
-
-// APIKeyUpdateOne is the builder for updating a single APIKey entity.
-type APIKeyUpdateOne struct {
-	config
-	fields   []string
-	hooks    []Hook
-	mutation *APIKeyMutation
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (akuo *APIKeyUpdateOne) SetOwnerID(id int) *APIKeyUpdateOne {
-	akuo.mutation.SetOwnerID(id)
-	return akuo
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (akuo *APIKeyUpdateOne) SetNillableOwnerID(id *int) *APIKeyUpdateOne {
-	if id != nil {
-		akuo = akuo.SetOwnerID(*id)
-	}
-	return akuo
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (akuo *APIKeyUpdateOne) SetOwner(u *User) *APIKeyUpdateOne {
-	return akuo.SetOwnerID(u.ID)
-}
-
-// Mutation returns the APIKeyMutation object of the builder.
-func (akuo *APIKeyUpdateOne) Mutation() *APIKeyMutation {
-	return akuo.mutation
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (akuo *APIKeyUpdateOne) ClearOwner() *APIKeyUpdateOne {
-	akuo.mutation.ClearOwner()
-	return akuo
-}
-
-// Select allows selecting one or more fields (columns) of the returned entity.
-// The default is selecting all fields defined in the entity schema.
-func (akuo *APIKeyUpdateOne) Select(field string, fields ...string) *APIKeyUpdateOne {
-	akuo.fields = append([]string{field}, fields...)
-	return akuo
-}
-
-// Save executes the query and returns the updated APIKey entity.
-func (akuo *APIKeyUpdateOne) Save(ctx context.Context) (*APIKey, error) {
-	var (
-		err  error
-		node *APIKey
-	)
-	if len(akuo.hooks) == 0 {
-		node, err = akuo.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*APIKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			akuo.mutation = mutation
-			node, err = akuo.sqlSave(ctx)
-			mutation.done = true
-			return node, err
-		})
-		for i := len(akuo.hooks) - 1; i >= 0; i-- {
-			if akuo.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = akuo.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, akuo.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*APIKey)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from APIKeyMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (akuo *APIKeyUpdateOne) SaveX(ctx context.Context) *APIKey {
-	node, err := akuo.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// Exec executes the query on the entity.
-func (akuo *APIKeyUpdateOne) Exec(ctx context.Context) error {
-	_, err := akuo.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (akuo *APIKeyUpdateOne) ExecX(ctx context.Context) {
-	if err := akuo.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (akuo *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   apikey.Table,
-			Columns: apikey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: apikey.FieldID,
-			},
-		},
-	}
-	id, ok := akuo.mutation.ID()
-	if !ok {
-		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "APIKey.id" for update`)}
-	}
-	_spec.Node.ID.Value = id
-	if fields := akuo.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID)
-		for _, f := range fields {
-			if !apikey.ValidColumn(f) {
-				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-			}
-			if f != apikey.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, f)
-			}
-		}
-	}
-	if ps := akuo.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if akuo.mutation.OwnerCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   apikey.OwnerTable,
-			Columns: []string{apikey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := akuo.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   apikey.OwnerTable,
-			Columns: []string{apikey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	_node = &APIKey{config: akuo.config}
-	_spec.Assign = _node.assignValues
-	_spec.ScanValues = _node.scanValues
-	if err = sqlgraph.UpdateNode(ctx, akuo.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{apikey.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	return _node, nil
-}
diff --git a/pkg/storage/default/ent/client.go b/pkg/storage/default/ent/client.go
deleted file mode 100644
index 6fdcec2..0000000
--- a/pkg/storage/default/ent/client.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"log"
-
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/migrate"
-
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-
-	"entgo.io/ent/dialect"
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-)
-
-// Client is the client that holds all ent builders.
-type Client struct {
-	config
-	// Schema is the client for creating, migrating and dropping schema.
-	Schema *migrate.Schema
-	// APIKey is the client for interacting with the APIKey builders.
-	APIKey *APIKeyClient
-	// ServiceKey is the client for interacting with the ServiceKey builders.
-	ServiceKey *ServiceKeyClient
-	// User is the client for interacting with the User builders.
-	User *UserClient
-}
-
-// NewClient creates a new client configured with the given options.
-func NewClient(opts ...Option) *Client {
-	cfg := config{log: log.Println, hooks: &hooks{}}
-	cfg.options(opts...)
-	client := &Client{config: cfg}
-	client.init()
-	return client
-}
-
-func (c *Client) init() {
-	c.Schema = migrate.NewSchema(c.driver)
-	c.APIKey = NewAPIKeyClient(c.config)
-	c.ServiceKey = NewServiceKeyClient(c.config)
-	c.User = NewUserClient(c.config)
-}
-
-// Open opens a database/sql.DB specified by the driver name and
-// the data source name, and returns a new client attached to it.
-// Optional parameters can be added for configuring the client.
-func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
-	switch driverName {
-	case dialect.MySQL, dialect.Postgres, dialect.SQLite:
-		drv, err := sql.Open(driverName, dataSourceName)
-		if err != nil {
-			return nil, err
-		}
-		return NewClient(append(options, Driver(drv))...), nil
-	default:
-		return nil, fmt.Errorf("unsupported driver: %q", driverName)
-	}
-}
-
-// Tx returns a new transactional client. The provided context
-// is used until the transaction is committed or rolled back.
-func (c *Client) Tx(ctx context.Context) (*Tx, error) {
-	if _, ok := c.driver.(*txDriver); ok {
-		return nil, errors.New("ent: cannot start a transaction within a transaction")
-	}
-	tx, err := newTx(ctx, c.driver)
-	if err != nil {
-		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
-	}
-	cfg := c.config
-	cfg.driver = tx
-	return &Tx{
-		ctx:        ctx,
-		config:     cfg,
-		APIKey:     NewAPIKeyClient(cfg),
-		ServiceKey: NewServiceKeyClient(cfg),
-		User:       NewUserClient(cfg),
-	}, nil
-}
-
-// BeginTx returns a transactional client with specified options.
-func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
-	if _, ok := c.driver.(*txDriver); ok {
-		return nil, errors.New("ent: cannot start a transaction within a transaction")
-	}
-	tx, err := c.driver.(interface {
-		BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
-	}).BeginTx(ctx, opts)
-	if err != nil {
-		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
-	}
-	cfg := c.config
-	cfg.driver = &txDriver{tx: tx, drv: c.driver}
-	return &Tx{
-		ctx:        ctx,
-		config:     cfg,
-		APIKey:     NewAPIKeyClient(cfg),
-		ServiceKey: NewServiceKeyClient(cfg),
-		User:       NewUserClient(cfg),
-	}, nil
-}
-
-// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
-//
-//	client.Debug().
-//		APIKey.
-//		Query().
-//		Count(ctx)
-func (c *Client) Debug() *Client {
-	if c.debug {
-		return c
-	}
-	cfg := c.config
-	cfg.driver = dialect.Debug(c.driver, c.log)
-	client := &Client{config: cfg}
-	client.init()
-	return client
-}
-
-// Close closes the database connection and prevents new queries from starting.
-func (c *Client) Close() error {
-	return c.driver.Close()
-}
-
-// Use adds the mutation hooks to all the entity clients.
-// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
-func (c *Client) Use(hooks ...Hook) {
-	c.APIKey.Use(hooks...)
-	c.ServiceKey.Use(hooks...)
-	c.User.Use(hooks...)
-}
-
-// APIKeyClient is a client for the APIKey schema.
-type APIKeyClient struct {
-	config
-}
-
-// NewAPIKeyClient returns a client for the APIKey from the given config.
-func NewAPIKeyClient(c config) *APIKeyClient {
-	return &APIKeyClient{config: c}
-}
-
-// Use adds a list of mutation hooks to the hooks stack.
-// A call to `Use(f, g, h)` equals to `apikey.Hooks(f(g(h())))`.
-func (c *APIKeyClient) Use(hooks ...Hook) {
-	c.hooks.APIKey = append(c.hooks.APIKey, hooks...)
-}
-
-// Create returns a builder for creating a APIKey entity.
-func (c *APIKeyClient) Create() *APIKeyCreate {
-	mutation := newAPIKeyMutation(c.config, OpCreate)
-	return &APIKeyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// CreateBulk returns a builder for creating a bulk of APIKey entities.
-func (c *APIKeyClient) CreateBulk(builders ...*APIKeyCreate) *APIKeyCreateBulk {
-	return &APIKeyCreateBulk{config: c.config, builders: builders}
-}
-
-// Update returns an update builder for APIKey.
-func (c *APIKeyClient) Update() *APIKeyUpdate {
-	mutation := newAPIKeyMutation(c.config, OpUpdate)
-	return &APIKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOne returns an update builder for the given entity.
-func (c *APIKeyClient) UpdateOne(ak *APIKey) *APIKeyUpdateOne {
-	mutation := newAPIKeyMutation(c.config, OpUpdateOne, withAPIKey(ak))
-	return &APIKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOneID returns an update builder for the given id.
-func (c *APIKeyClient) UpdateOneID(id int) *APIKeyUpdateOne {
-	mutation := newAPIKeyMutation(c.config, OpUpdateOne, withAPIKeyID(id))
-	return &APIKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// Delete returns a delete builder for APIKey.
-func (c *APIKeyClient) Delete() *APIKeyDelete {
-	mutation := newAPIKeyMutation(c.config, OpDelete)
-	return &APIKeyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// DeleteOne returns a builder for deleting the given entity.
-func (c *APIKeyClient) DeleteOne(ak *APIKey) *APIKeyDeleteOne {
-	return c.DeleteOneID(ak.ID)
-}
-
-// DeleteOne returns a builder for deleting the given entity by its id.
-func (c *APIKeyClient) DeleteOneID(id int) *APIKeyDeleteOne {
-	builder := c.Delete().Where(apikey.ID(id))
-	builder.mutation.id = &id
-	builder.mutation.op = OpDeleteOne
-	return &APIKeyDeleteOne{builder}
-}
-
-// Query returns a query builder for APIKey.
-func (c *APIKeyClient) Query() *APIKeyQuery {
-	return &APIKeyQuery{
-		config: c.config,
-	}
-}
-
-// Get returns a APIKey entity by its id.
-func (c *APIKeyClient) Get(ctx context.Context, id int) (*APIKey, error) {
-	return c.Query().Where(apikey.ID(id)).Only(ctx)
-}
-
-// GetX is like Get, but panics if an error occurs.
-func (c *APIKeyClient) GetX(ctx context.Context, id int) *APIKey {
-	obj, err := c.Get(ctx, id)
-	if err != nil {
-		panic(err)
-	}
-	return obj
-}
-
-// QueryOwner queries the owner edge of a APIKey.
-func (c *APIKeyClient) QueryOwner(ak *APIKey) *UserQuery {
-	query := &UserQuery{config: c.config}
-	query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
-		id := ak.ID
-		step := sqlgraph.NewStep(
-			sqlgraph.From(apikey.Table, apikey.FieldID, id),
-			sqlgraph.To(user.Table, user.FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, apikey.OwnerTable, apikey.OwnerColumn),
-		)
-		fromV = sqlgraph.Neighbors(ak.driver.Dialect(), step)
-		return fromV, nil
-	}
-	return query
-}
-
-// Hooks returns the client hooks.
-func (c *APIKeyClient) Hooks() []Hook {
-	return c.hooks.APIKey
-}
-
-// ServiceKeyClient is a client for the ServiceKey schema.
-type ServiceKeyClient struct {
-	config
-}
-
-// NewServiceKeyClient returns a client for the ServiceKey from the given config.
-func NewServiceKeyClient(c config) *ServiceKeyClient {
-	return &ServiceKeyClient{config: c}
-}
-
-// Use adds a list of mutation hooks to the hooks stack.
-// A call to `Use(f, g, h)` equals to `servicekey.Hooks(f(g(h())))`.
-func (c *ServiceKeyClient) Use(hooks ...Hook) {
-	c.hooks.ServiceKey = append(c.hooks.ServiceKey, hooks...)
-}
-
-// Create returns a builder for creating a ServiceKey entity.
-func (c *ServiceKeyClient) Create() *ServiceKeyCreate {
-	mutation := newServiceKeyMutation(c.config, OpCreate)
-	return &ServiceKeyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// CreateBulk returns a builder for creating a bulk of ServiceKey entities.
-func (c *ServiceKeyClient) CreateBulk(builders ...*ServiceKeyCreate) *ServiceKeyCreateBulk {
-	return &ServiceKeyCreateBulk{config: c.config, builders: builders}
-}
-
-// Update returns an update builder for ServiceKey.
-func (c *ServiceKeyClient) Update() *ServiceKeyUpdate {
-	mutation := newServiceKeyMutation(c.config, OpUpdate)
-	return &ServiceKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOne returns an update builder for the given entity.
-func (c *ServiceKeyClient) UpdateOne(sk *ServiceKey) *ServiceKeyUpdateOne {
-	mutation := newServiceKeyMutation(c.config, OpUpdateOne, withServiceKey(sk))
-	return &ServiceKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOneID returns an update builder for the given id.
-func (c *ServiceKeyClient) UpdateOneID(id int) *ServiceKeyUpdateOne {
-	mutation := newServiceKeyMutation(c.config, OpUpdateOne, withServiceKeyID(id))
-	return &ServiceKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// Delete returns a delete builder for ServiceKey.
-func (c *ServiceKeyClient) Delete() *ServiceKeyDelete {
-	mutation := newServiceKeyMutation(c.config, OpDelete)
-	return &ServiceKeyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// DeleteOne returns a builder for deleting the given entity.
-func (c *ServiceKeyClient) DeleteOne(sk *ServiceKey) *ServiceKeyDeleteOne {
-	return c.DeleteOneID(sk.ID)
-}
-
-// DeleteOne returns a builder for deleting the given entity by its id.
-func (c *ServiceKeyClient) DeleteOneID(id int) *ServiceKeyDeleteOne {
-	builder := c.Delete().Where(servicekey.ID(id))
-	builder.mutation.id = &id
-	builder.mutation.op = OpDeleteOne
-	return &ServiceKeyDeleteOne{builder}
-}
-
-// Query returns a query builder for ServiceKey.
-func (c *ServiceKeyClient) Query() *ServiceKeyQuery {
-	return &ServiceKeyQuery{
-		config: c.config,
-	}
-}
-
-// Get returns a ServiceKey entity by its id.
-func (c *ServiceKeyClient) Get(ctx context.Context, id int) (*ServiceKey, error) {
-	return c.Query().Where(servicekey.ID(id)).Only(ctx)
-}
-
-// GetX is like Get, but panics if an error occurs.
-func (c *ServiceKeyClient) GetX(ctx context.Context, id int) *ServiceKey {
-	obj, err := c.Get(ctx, id)
-	if err != nil {
-		panic(err)
-	}
-	return obj
-}
-
-// QueryOwner queries the owner edge of a ServiceKey.
-func (c *ServiceKeyClient) QueryOwner(sk *ServiceKey) *UserQuery {
-	query := &UserQuery{config: c.config}
-	query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
-		id := sk.ID
-		step := sqlgraph.NewStep(
-			sqlgraph.From(servicekey.Table, servicekey.FieldID, id),
-			sqlgraph.To(user.Table, user.FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, servicekey.OwnerTable, servicekey.OwnerColumn),
-		)
-		fromV = sqlgraph.Neighbors(sk.driver.Dialect(), step)
-		return fromV, nil
-	}
-	return query
-}
-
-// Hooks returns the client hooks.
-func (c *ServiceKeyClient) Hooks() []Hook {
-	return c.hooks.ServiceKey
-}
-
-// UserClient is a client for the User schema.
-type UserClient struct {
-	config
-}
-
-// NewUserClient returns a client for the User from the given config.
-func NewUserClient(c config) *UserClient {
-	return &UserClient{config: c}
-}
-
-// Use adds a list of mutation hooks to the hooks stack.
-// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`.
-func (c *UserClient) Use(hooks ...Hook) {
-	c.hooks.User = append(c.hooks.User, hooks...)
-}
-
-// Create returns a builder for creating a User entity.
-func (c *UserClient) Create() *UserCreate {
-	mutation := newUserMutation(c.config, OpCreate)
-	return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// CreateBulk returns a builder for creating a bulk of User entities.
-func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
-	return &UserCreateBulk{config: c.config, builders: builders}
-}
-
-// Update returns an update builder for User.
-func (c *UserClient) Update() *UserUpdate {
-	mutation := newUserMutation(c.config, OpUpdate)
-	return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOne returns an update builder for the given entity.
-func (c *UserClient) UpdateOne(u *User) *UserUpdateOne {
-	mutation := newUserMutation(c.config, OpUpdateOne, withUser(u))
-	return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// UpdateOneID returns an update builder for the given id.
-func (c *UserClient) UpdateOneID(id int) *UserUpdateOne {
-	mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id))
-	return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// Delete returns a delete builder for User.
-func (c *UserClient) Delete() *UserDelete {
-	mutation := newUserMutation(c.config, OpDelete)
-	return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
-}
-
-// DeleteOne returns a builder for deleting the given entity.
-func (c *UserClient) DeleteOne(u *User) *UserDeleteOne {
-	return c.DeleteOneID(u.ID)
-}
-
-// DeleteOne returns a builder for deleting the given entity by its id.
-func (c *UserClient) DeleteOneID(id int) *UserDeleteOne {
-	builder := c.Delete().Where(user.ID(id))
-	builder.mutation.id = &id
-	builder.mutation.op = OpDeleteOne
-	return &UserDeleteOne{builder}
-}
-
-// Query returns a query builder for User.
-func (c *UserClient) Query() *UserQuery {
-	return &UserQuery{
-		config: c.config,
-	}
-}
-
-// Get returns a User entity by its id.
-func (c *UserClient) Get(ctx context.Context, id int) (*User, error) {
-	return c.Query().Where(user.ID(id)).Only(ctx)
-}
-
-// GetX is like Get, but panics if an error occurs.
-func (c *UserClient) GetX(ctx context.Context, id int) *User {
-	obj, err := c.Get(ctx, id)
-	if err != nil {
-		panic(err)
-	}
-	return obj
-}
-
-// QueryServicekeys queries the servicekeys edge of a User.
-func (c *UserClient) QueryServicekeys(u *User) *ServiceKeyQuery {
-	query := &ServiceKeyQuery{config: c.config}
-	query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
-		id := u.ID
-		step := sqlgraph.NewStep(
-			sqlgraph.From(user.Table, user.FieldID, id),
-			sqlgraph.To(servicekey.Table, servicekey.FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, user.ServicekeysTable, user.ServicekeysColumn),
-		)
-		fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
-		return fromV, nil
-	}
-	return query
-}
-
-// QueryApikeys queries the apikeys edge of a User.
-func (c *UserClient) QueryApikeys(u *User) *APIKeyQuery {
-	query := &APIKeyQuery{config: c.config}
-	query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
-		id := u.ID
-		step := sqlgraph.NewStep(
-			sqlgraph.From(user.Table, user.FieldID, id),
-			sqlgraph.To(apikey.Table, apikey.FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, user.ApikeysTable, user.ApikeysColumn),
-		)
-		fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
-		return fromV, nil
-	}
-	return query
-}
-
-// Hooks returns the client hooks.
-func (c *UserClient) Hooks() []Hook {
-	return c.hooks.User
-}
diff --git a/pkg/storage/default/ent/generate.go b/pkg/storage/default/ent/generate.go
deleted file mode 100644
index 8d3fdfd..0000000
--- a/pkg/storage/default/ent/generate.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package ent
-
-//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema
diff --git a/pkg/storage/default/ent/migrate/schema.go b/pkg/storage/default/ent/migrate/schema.go
deleted file mode 100644
index e8578bc..0000000
--- a/pkg/storage/default/ent/migrate/schema.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package migrate
-
-import (
-	"entgo.io/ent/dialect/sql/schema"
-	"entgo.io/ent/schema/field"
-)
-
-var (
-	// APIKeysColumns holds the columns for the "api_keys" table.
-	APIKeysColumns = []*schema.Column{
-		{Name: "id", Type: field.TypeInt, Increment: true},
-		{Name: "created_at", Type: field.TypeInt64},
-		{Name: "name", Type: field.TypeString},
-		{Name: "value", Type: field.TypeString, Unique: true},
-		{Name: "secret", Type: field.TypeBytes},
-		{Name: "user_apikeys", Type: field.TypeInt, Nullable: true},
-	}
-	// APIKeysTable holds the schema information for the "api_keys" table.
-	APIKeysTable = &schema.Table{
-		Name:       "api_keys",
-		Columns:    APIKeysColumns,
-		PrimaryKey: []*schema.Column{APIKeysColumns[0]},
-		ForeignKeys: []*schema.ForeignKey{
-			{
-				Symbol:     "api_keys_users_apikeys",
-				Columns:    []*schema.Column{APIKeysColumns[5]},
-				RefColumns: []*schema.Column{UsersColumns[0]},
-				OnDelete:   schema.SetNull,
-			},
-		},
-	}
-	// ServiceKeysColumns holds the columns for the "service_keys" table.
-	ServiceKeysColumns = []*schema.Column{
-		{Name: "id", Type: field.TypeInt, Increment: true},
-		{Name: "created_at", Type: field.TypeInt64},
-		{Name: "name", Type: field.TypeString},
-		{Name: "value", Type: field.TypeString, Unique: true},
-		{Name: "secret", Type: field.TypeBytes},
-		{Name: "resource", Type: field.TypeString, Default: ""},
-		{Name: "num_used", Type: field.TypeInt64, Default: 0},
-		{Name: "max_uses", Type: field.TypeInt64, Default: 0},
-		{Name: "expires", Type: field.TypeInt64, Default: 0},
-		{Name: "user_servicekeys", Type: field.TypeInt, Nullable: true},
-	}
-	// ServiceKeysTable holds the schema information for the "service_keys" table.
-	ServiceKeysTable = &schema.Table{
-		Name:       "service_keys",
-		Columns:    ServiceKeysColumns,
-		PrimaryKey: []*schema.Column{ServiceKeysColumns[0]},
-		ForeignKeys: []*schema.ForeignKey{
-			{
-				Symbol:     "service_keys_users_servicekeys",
-				Columns:    []*schema.Column{ServiceKeysColumns[9]},
-				RefColumns: []*schema.Column{UsersColumns[0]},
-				OnDelete:   schema.SetNull,
-			},
-		},
-	}
-	// UsersColumns holds the columns for the "users" table.
-	UsersColumns = []*schema.Column{
-		{Name: "id", Type: field.TypeInt, Increment: true},
-		{Name: "username", Type: field.TypeString, Default: "unknown"},
-		{Name: "created_at", Type: field.TypeTime},
-	}
-	// UsersTable holds the schema information for the "users" table.
-	UsersTable = &schema.Table{
-		Name:       "users",
-		Columns:    UsersColumns,
-		PrimaryKey: []*schema.Column{UsersColumns[0]},
-	}
-	// Tables holds all the tables in the schema.
-	Tables = []*schema.Table{
-		APIKeysTable,
-		ServiceKeysTable,
-		UsersTable,
-	}
-)
-
-func init() {
-	APIKeysTable.ForeignKeys[0].RefTable = UsersTable
-	ServiceKeysTable.ForeignKeys[0].RefTable = UsersTable
-}
diff --git a/pkg/storage/default/ent/mutation.go b/pkg/storage/default/ent/mutation.go
deleted file mode 100644
index ab9de45..0000000
--- a/pkg/storage/default/ent/mutation.go
+++ /dev/null
@@ -1,2040 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-
-	"entgo.io/ent"
-)
-
-const (
-	// Operation types.
-	OpCreate    = ent.OpCreate
-	OpDelete    = ent.OpDelete
-	OpDeleteOne = ent.OpDeleteOne
-	OpUpdate    = ent.OpUpdate
-	OpUpdateOne = ent.OpUpdateOne
-
-	// Node types.
-	TypeAPIKey     = "APIKey"
-	TypeServiceKey = "ServiceKey"
-	TypeUser       = "User"
-)
-
-// APIKeyMutation represents an operation that mutates the APIKey nodes in the graph.
-type APIKeyMutation struct {
-	config
-	op            Op
-	typ           string
-	id            *int
-	created_at    *int64
-	addcreated_at *int64
-	name          *string
-	value         *string
-	secret        *[]byte
-	clearedFields map[string]struct{}
-	owner         *int
-	clearedowner  bool
-	done          bool
-	oldValue      func(context.Context) (*APIKey, error)
-	predicates    []predicate.APIKey
-}
-
-var _ ent.Mutation = (*APIKeyMutation)(nil)
-
-// apikeyOption allows management of the mutation configuration using functional options.
-type apikeyOption func(*APIKeyMutation)
-
-// newAPIKeyMutation creates new mutation for the APIKey entity.
-func newAPIKeyMutation(c config, op Op, opts ...apikeyOption) *APIKeyMutation {
-	m := &APIKeyMutation{
-		config:        c,
-		op:            op,
-		typ:           TypeAPIKey,
-		clearedFields: make(map[string]struct{}),
-	}
-	for _, opt := range opts {
-		opt(m)
-	}
-	return m
-}
-
-// withAPIKeyID sets the ID field of the mutation.
-func withAPIKeyID(id int) apikeyOption {
-	return func(m *APIKeyMutation) {
-		var (
-			err   error
-			once  sync.Once
-			value *APIKey
-		)
-		m.oldValue = func(ctx context.Context) (*APIKey, error) {
-			once.Do(func() {
-				if m.done {
-					err = errors.New("querying old values post mutation is not allowed")
-				} else {
-					value, err = m.Client().APIKey.Get(ctx, id)
-				}
-			})
-			return value, err
-		}
-		m.id = &id
-	}
-}
-
-// withAPIKey sets the old APIKey of the mutation.
-func withAPIKey(node *APIKey) apikeyOption {
-	return func(m *APIKeyMutation) {
-		m.oldValue = func(context.Context) (*APIKey, error) {
-			return node, nil
-		}
-		m.id = &node.ID
-	}
-}
-
-// Client returns a new `ent.Client` from the mutation. If the mutation was
-// executed in a transaction (ent.Tx), a transactional client is returned.
-func (m APIKeyMutation) Client() *Client {
-	client := &Client{config: m.config}
-	client.init()
-	return client
-}
-
-// Tx returns an `ent.Tx` for mutations that were executed in transactions;
-// it returns an error otherwise.
-func (m APIKeyMutation) Tx() (*Tx, error) {
-	if _, ok := m.driver.(*txDriver); !ok {
-		return nil, errors.New("ent: mutation is not running in a transaction")
-	}
-	tx := &Tx{config: m.config}
-	tx.init()
-	return tx, nil
-}
-
-// ID returns the ID value in the mutation. Note that the ID is only available
-// if it was provided to the builder or after it was returned from the database.
-func (m *APIKeyMutation) ID() (id int, exists bool) {
-	if m.id == nil {
-		return
-	}
-	return *m.id, true
-}
-
-// IDs queries the database and returns the entity ids that match the mutation's predicate.
-// That means, if the mutation is applied within a transaction with an isolation level such
-// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
-// or updated by the mutation.
-func (m *APIKeyMutation) IDs(ctx context.Context) ([]int, error) {
-	switch {
-	case m.op.Is(OpUpdateOne | OpDeleteOne):
-		id, exists := m.ID()
-		if exists {
-			return []int{id}, nil
-		}
-		fallthrough
-	case m.op.Is(OpUpdate | OpDelete):
-		return m.Client().APIKey.Query().Where(m.predicates...).IDs(ctx)
-	default:
-		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
-	}
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (m *APIKeyMutation) SetCreatedAt(i int64) {
-	m.created_at = &i
-	m.addcreated_at = nil
-}
-
-// CreatedAt returns the value of the "created_at" field in the mutation.
-func (m *APIKeyMutation) CreatedAt() (r int64, exists bool) {
-	v := m.created_at
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldCreatedAt returns the old "created_at" field's value of the APIKey entity.
-// If the APIKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *APIKeyMutation) OldCreatedAt(ctx context.Context) (v int64, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
-	}
-	return oldValue.CreatedAt, nil
-}
-
-// AddCreatedAt adds i to the "created_at" field.
-func (m *APIKeyMutation) AddCreatedAt(i int64) {
-	if m.addcreated_at != nil {
-		*m.addcreated_at += i
-	} else {
-		m.addcreated_at = &i
-	}
-}
-
-// AddedCreatedAt returns the value that was added to the "created_at" field in this mutation.
-func (m *APIKeyMutation) AddedCreatedAt() (r int64, exists bool) {
-	v := m.addcreated_at
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// ResetCreatedAt resets all changes to the "created_at" field.
-func (m *APIKeyMutation) ResetCreatedAt() {
-	m.created_at = nil
-	m.addcreated_at = nil
-}
-
-// SetName sets the "name" field.
-func (m *APIKeyMutation) SetName(s string) {
-	m.name = &s
-}
-
-// Name returns the value of the "name" field in the mutation.
-func (m *APIKeyMutation) Name() (r string, exists bool) {
-	v := m.name
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldName returns the old "name" field's value of the APIKey entity.
-// If the APIKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *APIKeyMutation) OldName(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldName is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldName requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldName: %w", err)
-	}
-	return oldValue.Name, nil
-}
-
-// ResetName resets all changes to the "name" field.
-func (m *APIKeyMutation) ResetName() {
-	m.name = nil
-}
-
-// SetValue sets the "value" field.
-func (m *APIKeyMutation) SetValue(s string) {
-	m.value = &s
-}
-
-// Value returns the value of the "value" field in the mutation.
-func (m *APIKeyMutation) Value() (r string, exists bool) {
-	v := m.value
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldValue returns the old "value" field's value of the APIKey entity.
-// If the APIKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *APIKeyMutation) OldValue(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldValue is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldValue requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldValue: %w", err)
-	}
-	return oldValue.Value, nil
-}
-
-// ResetValue resets all changes to the "value" field.
-func (m *APIKeyMutation) ResetValue() {
-	m.value = nil
-}
-
-// SetSecret sets the "secret" field.
-func (m *APIKeyMutation) SetSecret(b []byte) {
-	m.secret = &b
-}
-
-// Secret returns the value of the "secret" field in the mutation.
-func (m *APIKeyMutation) Secret() (r []byte, exists bool) {
-	v := m.secret
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldSecret returns the old "secret" field's value of the APIKey entity.
-// If the APIKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *APIKeyMutation) OldSecret(ctx context.Context) (v []byte, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldSecret is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldSecret requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldSecret: %w", err)
-	}
-	return oldValue.Secret, nil
-}
-
-// ResetSecret resets all changes to the "secret" field.
-func (m *APIKeyMutation) ResetSecret() {
-	m.secret = nil
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by id.
-func (m *APIKeyMutation) SetOwnerID(id int) {
-	m.owner = &id
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (m *APIKeyMutation) ClearOwner() {
-	m.clearedowner = true
-}
-
-// OwnerCleared reports if the "owner" edge to the User entity was cleared.
-func (m *APIKeyMutation) OwnerCleared() bool {
-	return m.clearedowner
-}
-
-// OwnerID returns the "owner" edge ID in the mutation.
-func (m *APIKeyMutation) OwnerID() (id int, exists bool) {
-	if m.owner != nil {
-		return *m.owner, true
-	}
-	return
-}
-
-// OwnerIDs returns the "owner" edge IDs in the mutation.
-// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
-// OwnerID instead. It exists only for internal usage by the builders.
-func (m *APIKeyMutation) OwnerIDs() (ids []int) {
-	if id := m.owner; id != nil {
-		ids = append(ids, *id)
-	}
-	return
-}
-
-// ResetOwner resets all changes to the "owner" edge.
-func (m *APIKeyMutation) ResetOwner() {
-	m.owner = nil
-	m.clearedowner = false
-}
-
-// Where appends a list predicates to the APIKeyMutation builder.
-func (m *APIKeyMutation) Where(ps ...predicate.APIKey) {
-	m.predicates = append(m.predicates, ps...)
-}
-
-// Op returns the operation name.
-func (m *APIKeyMutation) Op() Op {
-	return m.op
-}
-
-// Type returns the node type of this mutation (APIKey).
-func (m *APIKeyMutation) Type() string {
-	return m.typ
-}
-
-// Fields returns all fields that were changed during this mutation. Note that in
-// order to get all numeric fields that were incremented/decremented, call
-// AddedFields().
-func (m *APIKeyMutation) Fields() []string {
-	fields := make([]string, 0, 4)
-	if m.created_at != nil {
-		fields = append(fields, apikey.FieldCreatedAt)
-	}
-	if m.name != nil {
-		fields = append(fields, apikey.FieldName)
-	}
-	if m.value != nil {
-		fields = append(fields, apikey.FieldValue)
-	}
-	if m.secret != nil {
-		fields = append(fields, apikey.FieldSecret)
-	}
-	return fields
-}
-
-// Field returns the value of a field with the given name. The second boolean
-// return value indicates that this field was not set, or was not defined in the
-// schema.
-func (m *APIKeyMutation) Field(name string) (ent.Value, bool) {
-	switch name {
-	case apikey.FieldCreatedAt:
-		return m.CreatedAt()
-	case apikey.FieldName:
-		return m.Name()
-	case apikey.FieldValue:
-		return m.Value()
-	case apikey.FieldSecret:
-		return m.Secret()
-	}
-	return nil, false
-}
-
-// OldField returns the old value of the field from the database. An error is
-// returned if the mutation operation is not UpdateOne, or the query to the
-// database failed.
-func (m *APIKeyMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
-	switch name {
-	case apikey.FieldCreatedAt:
-		return m.OldCreatedAt(ctx)
-	case apikey.FieldName:
-		return m.OldName(ctx)
-	case apikey.FieldValue:
-		return m.OldValue(ctx)
-	case apikey.FieldSecret:
-		return m.OldSecret(ctx)
-	}
-	return nil, fmt.Errorf("unknown APIKey field %s", name)
-}
-
-// SetField sets the value of a field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *APIKeyMutation) SetField(name string, value ent.Value) error {
-	switch name {
-	case apikey.FieldCreatedAt:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetCreatedAt(v)
-		return nil
-	case apikey.FieldName:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetName(v)
-		return nil
-	case apikey.FieldValue:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetValue(v)
-		return nil
-	case apikey.FieldSecret:
-		v, ok := value.([]byte)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetSecret(v)
-		return nil
-	}
-	return fmt.Errorf("unknown APIKey field %s", name)
-}
-
-// AddedFields returns all numeric fields that were incremented/decremented during
-// this mutation.
-func (m *APIKeyMutation) AddedFields() []string {
-	var fields []string
-	if m.addcreated_at != nil {
-		fields = append(fields, apikey.FieldCreatedAt)
-	}
-	return fields
-}
-
-// AddedField returns the numeric value that was incremented/decremented on a field
-// with the given name. The second boolean return value indicates that this field
-// was not set, or was not defined in the schema.
-func (m *APIKeyMutation) AddedField(name string) (ent.Value, bool) {
-	switch name {
-	case apikey.FieldCreatedAt:
-		return m.AddedCreatedAt()
-	}
-	return nil, false
-}
-
-// AddField adds the value to the field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *APIKeyMutation) AddField(name string, value ent.Value) error {
-	switch name {
-	case apikey.FieldCreatedAt:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.AddCreatedAt(v)
-		return nil
-	}
-	return fmt.Errorf("unknown APIKey numeric field %s", name)
-}
-
-// ClearedFields returns all nullable fields that were cleared during this
-// mutation.
-func (m *APIKeyMutation) ClearedFields() []string {
-	return nil
-}
-
-// FieldCleared returns a boolean indicating if a field with the given name was
-// cleared in this mutation.
-func (m *APIKeyMutation) FieldCleared(name string) bool {
-	_, ok := m.clearedFields[name]
-	return ok
-}
-
-// ClearField clears the value of the field with the given name. It returns an
-// error if the field is not defined in the schema.
-func (m *APIKeyMutation) ClearField(name string) error {
-	return fmt.Errorf("unknown APIKey nullable field %s", name)
-}
-
-// ResetField resets all changes in the mutation for the field with the given name.
-// It returns an error if the field is not defined in the schema.
-func (m *APIKeyMutation) ResetField(name string) error {
-	switch name {
-	case apikey.FieldCreatedAt:
-		m.ResetCreatedAt()
-		return nil
-	case apikey.FieldName:
-		m.ResetName()
-		return nil
-	case apikey.FieldValue:
-		m.ResetValue()
-		return nil
-	case apikey.FieldSecret:
-		m.ResetSecret()
-		return nil
-	}
-	return fmt.Errorf("unknown APIKey field %s", name)
-}
-
-// AddedEdges returns all edge names that were set/added in this mutation.
-func (m *APIKeyMutation) AddedEdges() []string {
-	edges := make([]string, 0, 1)
-	if m.owner != nil {
-		edges = append(edges, apikey.EdgeOwner)
-	}
-	return edges
-}
-
-// AddedIDs returns all IDs (to other nodes) that were added for the given edge
-// name in this mutation.
-func (m *APIKeyMutation) AddedIDs(name string) []ent.Value {
-	switch name {
-	case apikey.EdgeOwner:
-		if id := m.owner; id != nil {
-			return []ent.Value{*id}
-		}
-	}
-	return nil
-}
-
-// RemovedEdges returns all edge names that were removed in this mutation.
-func (m *APIKeyMutation) RemovedEdges() []string {
-	edges := make([]string, 0, 1)
-	return edges
-}
-
-// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
-// the given name in this mutation.
-func (m *APIKeyMutation) RemovedIDs(name string) []ent.Value {
-	return nil
-}
-
-// ClearedEdges returns all edge names that were cleared in this mutation.
-func (m *APIKeyMutation) ClearedEdges() []string {
-	edges := make([]string, 0, 1)
-	if m.clearedowner {
-		edges = append(edges, apikey.EdgeOwner)
-	}
-	return edges
-}
-
-// EdgeCleared returns a boolean which indicates if the edge with the given name
-// was cleared in this mutation.
-func (m *APIKeyMutation) EdgeCleared(name string) bool {
-	switch name {
-	case apikey.EdgeOwner:
-		return m.clearedowner
-	}
-	return false
-}
-
-// ClearEdge clears the value of the edge with the given name. It returns an error
-// if that edge is not defined in the schema.
-func (m *APIKeyMutation) ClearEdge(name string) error {
-	switch name {
-	case apikey.EdgeOwner:
-		m.ClearOwner()
-		return nil
-	}
-	return fmt.Errorf("unknown APIKey unique edge %s", name)
-}
-
-// ResetEdge resets all changes to the edge with the given name in this mutation.
-// It returns an error if the edge is not defined in the schema.
-func (m *APIKeyMutation) ResetEdge(name string) error {
-	switch name {
-	case apikey.EdgeOwner:
-		m.ResetOwner()
-		return nil
-	}
-	return fmt.Errorf("unknown APIKey edge %s", name)
-}
-
-// ServiceKeyMutation represents an operation that mutates the ServiceKey nodes in the graph.
-type ServiceKeyMutation struct {
-	config
-	op            Op
-	typ           string
-	id            *int
-	created_at    *int64
-	addcreated_at *int64
-	name          *string
-	value         *string
-	secret        *[]byte
-	resource      *string
-	num_used      *int64
-	addnum_used   *int64
-	max_uses      *int64
-	addmax_uses   *int64
-	expires       *int64
-	addexpires    *int64
-	clearedFields map[string]struct{}
-	owner         *int
-	clearedowner  bool
-	done          bool
-	oldValue      func(context.Context) (*ServiceKey, error)
-	predicates    []predicate.ServiceKey
-}
-
-var _ ent.Mutation = (*ServiceKeyMutation)(nil)
-
-// servicekeyOption allows management of the mutation configuration using functional options.
-type servicekeyOption func(*ServiceKeyMutation)
-
-// newServiceKeyMutation creates new mutation for the ServiceKey entity.
-func newServiceKeyMutation(c config, op Op, opts ...servicekeyOption) *ServiceKeyMutation {
-	m := &ServiceKeyMutation{
-		config:        c,
-		op:            op,
-		typ:           TypeServiceKey,
-		clearedFields: make(map[string]struct{}),
-	}
-	for _, opt := range opts {
-		opt(m)
-	}
-	return m
-}
-
-// withServiceKeyID sets the ID field of the mutation.
-func withServiceKeyID(id int) servicekeyOption {
-	return func(m *ServiceKeyMutation) {
-		var (
-			err   error
-			once  sync.Once
-			value *ServiceKey
-		)
-		m.oldValue = func(ctx context.Context) (*ServiceKey, error) {
-			once.Do(func() {
-				if m.done {
-					err = errors.New("querying old values post mutation is not allowed")
-				} else {
-					value, err = m.Client().ServiceKey.Get(ctx, id)
-				}
-			})
-			return value, err
-		}
-		m.id = &id
-	}
-}
-
-// withServiceKey sets the old ServiceKey of the mutation.
-func withServiceKey(node *ServiceKey) servicekeyOption {
-	return func(m *ServiceKeyMutation) {
-		m.oldValue = func(context.Context) (*ServiceKey, error) {
-			return node, nil
-		}
-		m.id = &node.ID
-	}
-}
-
-// Client returns a new `ent.Client` from the mutation. If the mutation was
-// executed in a transaction (ent.Tx), a transactional client is returned.
-func (m ServiceKeyMutation) Client() *Client {
-	client := &Client{config: m.config}
-	client.init()
-	return client
-}
-
-// Tx returns an `ent.Tx` for mutations that were executed in transactions;
-// it returns an error otherwise.
-func (m ServiceKeyMutation) Tx() (*Tx, error) {
-	if _, ok := m.driver.(*txDriver); !ok {
-		return nil, errors.New("ent: mutation is not running in a transaction")
-	}
-	tx := &Tx{config: m.config}
-	tx.init()
-	return tx, nil
-}
-
-// ID returns the ID value in the mutation. Note that the ID is only available
-// if it was provided to the builder or after it was returned from the database.
-func (m *ServiceKeyMutation) ID() (id int, exists bool) {
-	if m.id == nil {
-		return
-	}
-	return *m.id, true
-}
-
-// IDs queries the database and returns the entity ids that match the mutation's predicate.
-// That means, if the mutation is applied within a transaction with an isolation level such
-// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
-// or updated by the mutation.
-func (m *ServiceKeyMutation) IDs(ctx context.Context) ([]int, error) {
-	switch {
-	case m.op.Is(OpUpdateOne | OpDeleteOne):
-		id, exists := m.ID()
-		if exists {
-			return []int{id}, nil
-		}
-		fallthrough
-	case m.op.Is(OpUpdate | OpDelete):
-		return m.Client().ServiceKey.Query().Where(m.predicates...).IDs(ctx)
-	default:
-		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
-	}
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (m *ServiceKeyMutation) SetCreatedAt(i int64) {
-	m.created_at = &i
-	m.addcreated_at = nil
-}
-
-// CreatedAt returns the value of the "created_at" field in the mutation.
-func (m *ServiceKeyMutation) CreatedAt() (r int64, exists bool) {
-	v := m.created_at
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldCreatedAt returns the old "created_at" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldCreatedAt(ctx context.Context) (v int64, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
-	}
-	return oldValue.CreatedAt, nil
-}
-
-// AddCreatedAt adds i to the "created_at" field.
-func (m *ServiceKeyMutation) AddCreatedAt(i int64) {
-	if m.addcreated_at != nil {
-		*m.addcreated_at += i
-	} else {
-		m.addcreated_at = &i
-	}
-}
-
-// AddedCreatedAt returns the value that was added to the "created_at" field in this mutation.
-func (m *ServiceKeyMutation) AddedCreatedAt() (r int64, exists bool) {
-	v := m.addcreated_at
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// ResetCreatedAt resets all changes to the "created_at" field.
-func (m *ServiceKeyMutation) ResetCreatedAt() {
-	m.created_at = nil
-	m.addcreated_at = nil
-}
-
-// SetName sets the "name" field.
-func (m *ServiceKeyMutation) SetName(s string) {
-	m.name = &s
-}
-
-// Name returns the value of the "name" field in the mutation.
-func (m *ServiceKeyMutation) Name() (r string, exists bool) {
-	v := m.name
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldName returns the old "name" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldName(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldName is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldName requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldName: %w", err)
-	}
-	return oldValue.Name, nil
-}
-
-// ResetName resets all changes to the "name" field.
-func (m *ServiceKeyMutation) ResetName() {
-	m.name = nil
-}
-
-// SetValue sets the "value" field.
-func (m *ServiceKeyMutation) SetValue(s string) {
-	m.value = &s
-}
-
-// Value returns the value of the "value" field in the mutation.
-func (m *ServiceKeyMutation) Value() (r string, exists bool) {
-	v := m.value
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldValue returns the old "value" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldValue(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldValue is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldValue requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldValue: %w", err)
-	}
-	return oldValue.Value, nil
-}
-
-// ResetValue resets all changes to the "value" field.
-func (m *ServiceKeyMutation) ResetValue() {
-	m.value = nil
-}
-
-// SetSecret sets the "secret" field.
-func (m *ServiceKeyMutation) SetSecret(b []byte) {
-	m.secret = &b
-}
-
-// Secret returns the value of the "secret" field in the mutation.
-func (m *ServiceKeyMutation) Secret() (r []byte, exists bool) {
-	v := m.secret
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldSecret returns the old "secret" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldSecret(ctx context.Context) (v []byte, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldSecret is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldSecret requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldSecret: %w", err)
-	}
-	return oldValue.Secret, nil
-}
-
-// ResetSecret resets all changes to the "secret" field.
-func (m *ServiceKeyMutation) ResetSecret() {
-	m.secret = nil
-}
-
-// SetResource sets the "resource" field.
-func (m *ServiceKeyMutation) SetResource(s string) {
-	m.resource = &s
-}
-
-// Resource returns the value of the "resource" field in the mutation.
-func (m *ServiceKeyMutation) Resource() (r string, exists bool) {
-	v := m.resource
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldResource returns the old "resource" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldResource(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldResource is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldResource requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldResource: %w", err)
-	}
-	return oldValue.Resource, nil
-}
-
-// ResetResource resets all changes to the "resource" field.
-func (m *ServiceKeyMutation) ResetResource() {
-	m.resource = nil
-}
-
-// SetNumUsed sets the "num_used" field.
-func (m *ServiceKeyMutation) SetNumUsed(i int64) {
-	m.num_used = &i
-	m.addnum_used = nil
-}
-
-// NumUsed returns the value of the "num_used" field in the mutation.
-func (m *ServiceKeyMutation) NumUsed() (r int64, exists bool) {
-	v := m.num_used
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldNumUsed returns the old "num_used" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldNumUsed(ctx context.Context) (v int64, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldNumUsed is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldNumUsed requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldNumUsed: %w", err)
-	}
-	return oldValue.NumUsed, nil
-}
-
-// AddNumUsed adds i to the "num_used" field.
-func (m *ServiceKeyMutation) AddNumUsed(i int64) {
-	if m.addnum_used != nil {
-		*m.addnum_used += i
-	} else {
-		m.addnum_used = &i
-	}
-}
-
-// AddedNumUsed returns the value that was added to the "num_used" field in this mutation.
-func (m *ServiceKeyMutation) AddedNumUsed() (r int64, exists bool) {
-	v := m.addnum_used
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// ResetNumUsed resets all changes to the "num_used" field.
-func (m *ServiceKeyMutation) ResetNumUsed() {
-	m.num_used = nil
-	m.addnum_used = nil
-}
-
-// SetMaxUses sets the "max_uses" field.
-func (m *ServiceKeyMutation) SetMaxUses(i int64) {
-	m.max_uses = &i
-	m.addmax_uses = nil
-}
-
-// MaxUses returns the value of the "max_uses" field in the mutation.
-func (m *ServiceKeyMutation) MaxUses() (r int64, exists bool) {
-	v := m.max_uses
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldMaxUses returns the old "max_uses" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldMaxUses(ctx context.Context) (v int64, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldMaxUses is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldMaxUses requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldMaxUses: %w", err)
-	}
-	return oldValue.MaxUses, nil
-}
-
-// AddMaxUses adds i to the "max_uses" field.
-func (m *ServiceKeyMutation) AddMaxUses(i int64) {
-	if m.addmax_uses != nil {
-		*m.addmax_uses += i
-	} else {
-		m.addmax_uses = &i
-	}
-}
-
-// AddedMaxUses returns the value that was added to the "max_uses" field in this mutation.
-func (m *ServiceKeyMutation) AddedMaxUses() (r int64, exists bool) {
-	v := m.addmax_uses
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// ResetMaxUses resets all changes to the "max_uses" field.
-func (m *ServiceKeyMutation) ResetMaxUses() {
-	m.max_uses = nil
-	m.addmax_uses = nil
-}
-
-// SetExpires sets the "expires" field.
-func (m *ServiceKeyMutation) SetExpires(i int64) {
-	m.expires = &i
-	m.addexpires = nil
-}
-
-// Expires returns the value of the "expires" field in the mutation.
-func (m *ServiceKeyMutation) Expires() (r int64, exists bool) {
-	v := m.expires
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldExpires returns the old "expires" field's value of the ServiceKey entity.
-// If the ServiceKey object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *ServiceKeyMutation) OldExpires(ctx context.Context) (v int64, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldExpires is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldExpires requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldExpires: %w", err)
-	}
-	return oldValue.Expires, nil
-}
-
-// AddExpires adds i to the "expires" field.
-func (m *ServiceKeyMutation) AddExpires(i int64) {
-	if m.addexpires != nil {
-		*m.addexpires += i
-	} else {
-		m.addexpires = &i
-	}
-}
-
-// AddedExpires returns the value that was added to the "expires" field in this mutation.
-func (m *ServiceKeyMutation) AddedExpires() (r int64, exists bool) {
-	v := m.addexpires
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// ResetExpires resets all changes to the "expires" field.
-func (m *ServiceKeyMutation) ResetExpires() {
-	m.expires = nil
-	m.addexpires = nil
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by id.
-func (m *ServiceKeyMutation) SetOwnerID(id int) {
-	m.owner = &id
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (m *ServiceKeyMutation) ClearOwner() {
-	m.clearedowner = true
-}
-
-// OwnerCleared reports if the "owner" edge to the User entity was cleared.
-func (m *ServiceKeyMutation) OwnerCleared() bool {
-	return m.clearedowner
-}
-
-// OwnerID returns the "owner" edge ID in the mutation.
-func (m *ServiceKeyMutation) OwnerID() (id int, exists bool) {
-	if m.owner != nil {
-		return *m.owner, true
-	}
-	return
-}
-
-// OwnerIDs returns the "owner" edge IDs in the mutation.
-// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
-// OwnerID instead. It exists only for internal usage by the builders.
-func (m *ServiceKeyMutation) OwnerIDs() (ids []int) {
-	if id := m.owner; id != nil {
-		ids = append(ids, *id)
-	}
-	return
-}
-
-// ResetOwner resets all changes to the "owner" edge.
-func (m *ServiceKeyMutation) ResetOwner() {
-	m.owner = nil
-	m.clearedowner = false
-}
-
-// Where appends a list predicates to the ServiceKeyMutation builder.
-func (m *ServiceKeyMutation) Where(ps ...predicate.ServiceKey) {
-	m.predicates = append(m.predicates, ps...)
-}
-
-// Op returns the operation name.
-func (m *ServiceKeyMutation) Op() Op {
-	return m.op
-}
-
-// Type returns the node type of this mutation (ServiceKey).
-func (m *ServiceKeyMutation) Type() string {
-	return m.typ
-}
-
-// Fields returns all fields that were changed during this mutation. Note that in
-// order to get all numeric fields that were incremented/decremented, call
-// AddedFields().
-func (m *ServiceKeyMutation) Fields() []string {
-	fields := make([]string, 0, 8)
-	if m.created_at != nil {
-		fields = append(fields, servicekey.FieldCreatedAt)
-	}
-	if m.name != nil {
-		fields = append(fields, servicekey.FieldName)
-	}
-	if m.value != nil {
-		fields = append(fields, servicekey.FieldValue)
-	}
-	if m.secret != nil {
-		fields = append(fields, servicekey.FieldSecret)
-	}
-	if m.resource != nil {
-		fields = append(fields, servicekey.FieldResource)
-	}
-	if m.num_used != nil {
-		fields = append(fields, servicekey.FieldNumUsed)
-	}
-	if m.max_uses != nil {
-		fields = append(fields, servicekey.FieldMaxUses)
-	}
-	if m.expires != nil {
-		fields = append(fields, servicekey.FieldExpires)
-	}
-	return fields
-}
-
-// Field returns the value of a field with the given name. The second boolean
-// return value indicates that this field was not set, or was not defined in the
-// schema.
-func (m *ServiceKeyMutation) Field(name string) (ent.Value, bool) {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		return m.CreatedAt()
-	case servicekey.FieldName:
-		return m.Name()
-	case servicekey.FieldValue:
-		return m.Value()
-	case servicekey.FieldSecret:
-		return m.Secret()
-	case servicekey.FieldResource:
-		return m.Resource()
-	case servicekey.FieldNumUsed:
-		return m.NumUsed()
-	case servicekey.FieldMaxUses:
-		return m.MaxUses()
-	case servicekey.FieldExpires:
-		return m.Expires()
-	}
-	return nil, false
-}
-
-// OldField returns the old value of the field from the database. An error is
-// returned if the mutation operation is not UpdateOne, or the query to the
-// database failed.
-func (m *ServiceKeyMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		return m.OldCreatedAt(ctx)
-	case servicekey.FieldName:
-		return m.OldName(ctx)
-	case servicekey.FieldValue:
-		return m.OldValue(ctx)
-	case servicekey.FieldSecret:
-		return m.OldSecret(ctx)
-	case servicekey.FieldResource:
-		return m.OldResource(ctx)
-	case servicekey.FieldNumUsed:
-		return m.OldNumUsed(ctx)
-	case servicekey.FieldMaxUses:
-		return m.OldMaxUses(ctx)
-	case servicekey.FieldExpires:
-		return m.OldExpires(ctx)
-	}
-	return nil, fmt.Errorf("unknown ServiceKey field %s", name)
-}
-
-// SetField sets the value of a field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *ServiceKeyMutation) SetField(name string, value ent.Value) error {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetCreatedAt(v)
-		return nil
-	case servicekey.FieldName:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetName(v)
-		return nil
-	case servicekey.FieldValue:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetValue(v)
-		return nil
-	case servicekey.FieldSecret:
-		v, ok := value.([]byte)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetSecret(v)
-		return nil
-	case servicekey.FieldResource:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetResource(v)
-		return nil
-	case servicekey.FieldNumUsed:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetNumUsed(v)
-		return nil
-	case servicekey.FieldMaxUses:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetMaxUses(v)
-		return nil
-	case servicekey.FieldExpires:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetExpires(v)
-		return nil
-	}
-	return fmt.Errorf("unknown ServiceKey field %s", name)
-}
-
-// AddedFields returns all numeric fields that were incremented/decremented during
-// this mutation.
-func (m *ServiceKeyMutation) AddedFields() []string {
-	var fields []string
-	if m.addcreated_at != nil {
-		fields = append(fields, servicekey.FieldCreatedAt)
-	}
-	if m.addnum_used != nil {
-		fields = append(fields, servicekey.FieldNumUsed)
-	}
-	if m.addmax_uses != nil {
-		fields = append(fields, servicekey.FieldMaxUses)
-	}
-	if m.addexpires != nil {
-		fields = append(fields, servicekey.FieldExpires)
-	}
-	return fields
-}
-
-// AddedField returns the numeric value that was incremented/decremented on a field
-// with the given name. The second boolean return value indicates that this field
-// was not set, or was not defined in the schema.
-func (m *ServiceKeyMutation) AddedField(name string) (ent.Value, bool) {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		return m.AddedCreatedAt()
-	case servicekey.FieldNumUsed:
-		return m.AddedNumUsed()
-	case servicekey.FieldMaxUses:
-		return m.AddedMaxUses()
-	case servicekey.FieldExpires:
-		return m.AddedExpires()
-	}
-	return nil, false
-}
-
-// AddField adds the value to the field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *ServiceKeyMutation) AddField(name string, value ent.Value) error {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.AddCreatedAt(v)
-		return nil
-	case servicekey.FieldNumUsed:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.AddNumUsed(v)
-		return nil
-	case servicekey.FieldMaxUses:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.AddMaxUses(v)
-		return nil
-	case servicekey.FieldExpires:
-		v, ok := value.(int64)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.AddExpires(v)
-		return nil
-	}
-	return fmt.Errorf("unknown ServiceKey numeric field %s", name)
-}
-
-// ClearedFields returns all nullable fields that were cleared during this
-// mutation.
-func (m *ServiceKeyMutation) ClearedFields() []string {
-	return nil
-}
-
-// FieldCleared returns a boolean indicating if a field with the given name was
-// cleared in this mutation.
-func (m *ServiceKeyMutation) FieldCleared(name string) bool {
-	_, ok := m.clearedFields[name]
-	return ok
-}
-
-// ClearField clears the value of the field with the given name. It returns an
-// error if the field is not defined in the schema.
-func (m *ServiceKeyMutation) ClearField(name string) error {
-	return fmt.Errorf("unknown ServiceKey nullable field %s", name)
-}
-
-// ResetField resets all changes in the mutation for the field with the given name.
-// It returns an error if the field is not defined in the schema.
-func (m *ServiceKeyMutation) ResetField(name string) error {
-	switch name {
-	case servicekey.FieldCreatedAt:
-		m.ResetCreatedAt()
-		return nil
-	case servicekey.FieldName:
-		m.ResetName()
-		return nil
-	case servicekey.FieldValue:
-		m.ResetValue()
-		return nil
-	case servicekey.FieldSecret:
-		m.ResetSecret()
-		return nil
-	case servicekey.FieldResource:
-		m.ResetResource()
-		return nil
-	case servicekey.FieldNumUsed:
-		m.ResetNumUsed()
-		return nil
-	case servicekey.FieldMaxUses:
-		m.ResetMaxUses()
-		return nil
-	case servicekey.FieldExpires:
-		m.ResetExpires()
-		return nil
-	}
-	return fmt.Errorf("unknown ServiceKey field %s", name)
-}
-
-// AddedEdges returns all edge names that were set/added in this mutation.
-func (m *ServiceKeyMutation) AddedEdges() []string {
-	edges := make([]string, 0, 1)
-	if m.owner != nil {
-		edges = append(edges, servicekey.EdgeOwner)
-	}
-	return edges
-}
-
-// AddedIDs returns all IDs (to other nodes) that were added for the given edge
-// name in this mutation.
-func (m *ServiceKeyMutation) AddedIDs(name string) []ent.Value {
-	switch name {
-	case servicekey.EdgeOwner:
-		if id := m.owner; id != nil {
-			return []ent.Value{*id}
-		}
-	}
-	return nil
-}
-
-// RemovedEdges returns all edge names that were removed in this mutation.
-func (m *ServiceKeyMutation) RemovedEdges() []string {
-	edges := make([]string, 0, 1)
-	return edges
-}
-
-// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
-// the given name in this mutation.
-func (m *ServiceKeyMutation) RemovedIDs(name string) []ent.Value {
-	return nil
-}
-
-// ClearedEdges returns all edge names that were cleared in this mutation.
-func (m *ServiceKeyMutation) ClearedEdges() []string {
-	edges := make([]string, 0, 1)
-	if m.clearedowner {
-		edges = append(edges, servicekey.EdgeOwner)
-	}
-	return edges
-}
-
-// EdgeCleared returns a boolean which indicates if the edge with the given name
-// was cleared in this mutation.
-func (m *ServiceKeyMutation) EdgeCleared(name string) bool {
-	switch name {
-	case servicekey.EdgeOwner:
-		return m.clearedowner
-	}
-	return false
-}
-
-// ClearEdge clears the value of the edge with the given name. It returns an error
-// if that edge is not defined in the schema.
-func (m *ServiceKeyMutation) ClearEdge(name string) error {
-	switch name {
-	case servicekey.EdgeOwner:
-		m.ClearOwner()
-		return nil
-	}
-	return fmt.Errorf("unknown ServiceKey unique edge %s", name)
-}
-
-// ResetEdge resets all changes to the edge with the given name in this mutation.
-// It returns an error if the edge is not defined in the schema.
-func (m *ServiceKeyMutation) ResetEdge(name string) error {
-	switch name {
-	case servicekey.EdgeOwner:
-		m.ResetOwner()
-		return nil
-	}
-	return fmt.Errorf("unknown ServiceKey edge %s", name)
-}
-
-// UserMutation represents an operation that mutates the User nodes in the graph.
-type UserMutation struct {
-	config
-	op                 Op
-	typ                string
-	id                 *int
-	username           *string
-	created_at         *time.Time
-	clearedFields      map[string]struct{}
-	servicekeys        map[int]struct{}
-	removedservicekeys map[int]struct{}
-	clearedservicekeys bool
-	apikeys            map[int]struct{}
-	removedapikeys     map[int]struct{}
-	clearedapikeys     bool
-	done               bool
-	oldValue           func(context.Context) (*User, error)
-	predicates         []predicate.User
-}
-
-var _ ent.Mutation = (*UserMutation)(nil)
-
-// userOption allows management of the mutation configuration using functional options.
-type userOption func(*UserMutation)
-
-// newUserMutation creates new mutation for the User entity.
-func newUserMutation(c config, op Op, opts ...userOption) *UserMutation {
-	m := &UserMutation{
-		config:        c,
-		op:            op,
-		typ:           TypeUser,
-		clearedFields: make(map[string]struct{}),
-	}
-	for _, opt := range opts {
-		opt(m)
-	}
-	return m
-}
-
-// withUserID sets the ID field of the mutation.
-func withUserID(id int) userOption {
-	return func(m *UserMutation) {
-		var (
-			err   error
-			once  sync.Once
-			value *User
-		)
-		m.oldValue = func(ctx context.Context) (*User, error) {
-			once.Do(func() {
-				if m.done {
-					err = errors.New("querying old values post mutation is not allowed")
-				} else {
-					value, err = m.Client().User.Get(ctx, id)
-				}
-			})
-			return value, err
-		}
-		m.id = &id
-	}
-}
-
-// withUser sets the old User of the mutation.
-func withUser(node *User) userOption {
-	return func(m *UserMutation) {
-		m.oldValue = func(context.Context) (*User, error) {
-			return node, nil
-		}
-		m.id = &node.ID
-	}
-}
-
-// Client returns a new `ent.Client` from the mutation. If the mutation was
-// executed in a transaction (ent.Tx), a transactional client is returned.
-func (m UserMutation) Client() *Client {
-	client := &Client{config: m.config}
-	client.init()
-	return client
-}
-
-// Tx returns an `ent.Tx` for mutations that were executed in transactions;
-// it returns an error otherwise.
-func (m UserMutation) Tx() (*Tx, error) {
-	if _, ok := m.driver.(*txDriver); !ok {
-		return nil, errors.New("ent: mutation is not running in a transaction")
-	}
-	tx := &Tx{config: m.config}
-	tx.init()
-	return tx, nil
-}
-
-// ID returns the ID value in the mutation. Note that the ID is only available
-// if it was provided to the builder or after it was returned from the database.
-func (m *UserMutation) ID() (id int, exists bool) {
-	if m.id == nil {
-		return
-	}
-	return *m.id, true
-}
-
-// IDs queries the database and returns the entity ids that match the mutation's predicate.
-// That means, if the mutation is applied within a transaction with an isolation level such
-// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
-// or updated by the mutation.
-func (m *UserMutation) IDs(ctx context.Context) ([]int, error) {
-	switch {
-	case m.op.Is(OpUpdateOne | OpDeleteOne):
-		id, exists := m.ID()
-		if exists {
-			return []int{id}, nil
-		}
-		fallthrough
-	case m.op.Is(OpUpdate | OpDelete):
-		return m.Client().User.Query().Where(m.predicates...).IDs(ctx)
-	default:
-		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
-	}
-}
-
-// SetUsername sets the "username" field.
-func (m *UserMutation) SetUsername(s string) {
-	m.username = &s
-}
-
-// Username returns the value of the "username" field in the mutation.
-func (m *UserMutation) Username() (r string, exists bool) {
-	v := m.username
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldUsername returns the old "username" field's value of the User entity.
-// If the User object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *UserMutation) OldUsername(ctx context.Context) (v string, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldUsername is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldUsername requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldUsername: %w", err)
-	}
-	return oldValue.Username, nil
-}
-
-// ResetUsername resets all changes to the "username" field.
-func (m *UserMutation) ResetUsername() {
-	m.username = nil
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (m *UserMutation) SetCreatedAt(t time.Time) {
-	m.created_at = &t
-}
-
-// CreatedAt returns the value of the "created_at" field in the mutation.
-func (m *UserMutation) CreatedAt() (r time.Time, exists bool) {
-	v := m.created_at
-	if v == nil {
-		return
-	}
-	return *v, true
-}
-
-// OldCreatedAt returns the old "created_at" field's value of the User entity.
-// If the User object wasn't provided to the builder, the object is fetched from the database.
-// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
-func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
-	if !m.op.Is(OpUpdateOne) {
-		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
-	}
-	if m.id == nil || m.oldValue == nil {
-		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
-	}
-	oldValue, err := m.oldValue(ctx)
-	if err != nil {
-		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
-	}
-	return oldValue.CreatedAt, nil
-}
-
-// ResetCreatedAt resets all changes to the "created_at" field.
-func (m *UserMutation) ResetCreatedAt() {
-	m.created_at = nil
-}
-
-// AddServicekeyIDs adds the "servicekeys" edge to the ServiceKey entity by ids.
-func (m *UserMutation) AddServicekeyIDs(ids ...int) {
-	if m.servicekeys == nil {
-		m.servicekeys = make(map[int]struct{})
-	}
-	for i := range ids {
-		m.servicekeys[ids[i]] = struct{}{}
-	}
-}
-
-// ClearServicekeys clears the "servicekeys" edge to the ServiceKey entity.
-func (m *UserMutation) ClearServicekeys() {
-	m.clearedservicekeys = true
-}
-
-// ServicekeysCleared reports if the "servicekeys" edge to the ServiceKey entity was cleared.
-func (m *UserMutation) ServicekeysCleared() bool {
-	return m.clearedservicekeys
-}
-
-// RemoveServicekeyIDs removes the "servicekeys" edge to the ServiceKey entity by IDs.
-func (m *UserMutation) RemoveServicekeyIDs(ids ...int) {
-	if m.removedservicekeys == nil {
-		m.removedservicekeys = make(map[int]struct{})
-	}
-	for i := range ids {
-		delete(m.servicekeys, ids[i])
-		m.removedservicekeys[ids[i]] = struct{}{}
-	}
-}
-
-// RemovedServicekeys returns the removed IDs of the "servicekeys" edge to the ServiceKey entity.
-func (m *UserMutation) RemovedServicekeysIDs() (ids []int) {
-	for id := range m.removedservicekeys {
-		ids = append(ids, id)
-	}
-	return
-}
-
-// ServicekeysIDs returns the "servicekeys" edge IDs in the mutation.
-func (m *UserMutation) ServicekeysIDs() (ids []int) {
-	for id := range m.servicekeys {
-		ids = append(ids, id)
-	}
-	return
-}
-
-// ResetServicekeys resets all changes to the "servicekeys" edge.
-func (m *UserMutation) ResetServicekeys() {
-	m.servicekeys = nil
-	m.clearedservicekeys = false
-	m.removedservicekeys = nil
-}
-
-// AddApikeyIDs adds the "apikeys" edge to the APIKey entity by ids.
-func (m *UserMutation) AddApikeyIDs(ids ...int) {
-	if m.apikeys == nil {
-		m.apikeys = make(map[int]struct{})
-	}
-	for i := range ids {
-		m.apikeys[ids[i]] = struct{}{}
-	}
-}
-
-// ClearApikeys clears the "apikeys" edge to the APIKey entity.
-func (m *UserMutation) ClearApikeys() {
-	m.clearedapikeys = true
-}
-
-// ApikeysCleared reports if the "apikeys" edge to the APIKey entity was cleared.
-func (m *UserMutation) ApikeysCleared() bool {
-	return m.clearedapikeys
-}
-
-// RemoveApikeyIDs removes the "apikeys" edge to the APIKey entity by IDs.
-func (m *UserMutation) RemoveApikeyIDs(ids ...int) {
-	if m.removedapikeys == nil {
-		m.removedapikeys = make(map[int]struct{})
-	}
-	for i := range ids {
-		delete(m.apikeys, ids[i])
-		m.removedapikeys[ids[i]] = struct{}{}
-	}
-}
-
-// RemovedApikeys returns the removed IDs of the "apikeys" edge to the APIKey entity.
-func (m *UserMutation) RemovedApikeysIDs() (ids []int) {
-	for id := range m.removedapikeys {
-		ids = append(ids, id)
-	}
-	return
-}
-
-// ApikeysIDs returns the "apikeys" edge IDs in the mutation.
-func (m *UserMutation) ApikeysIDs() (ids []int) {
-	for id := range m.apikeys {
-		ids = append(ids, id)
-	}
-	return
-}
-
-// ResetApikeys resets all changes to the "apikeys" edge.
-func (m *UserMutation) ResetApikeys() {
-	m.apikeys = nil
-	m.clearedapikeys = false
-	m.removedapikeys = nil
-}
-
-// Where appends a list predicates to the UserMutation builder.
-func (m *UserMutation) Where(ps ...predicate.User) {
-	m.predicates = append(m.predicates, ps...)
-}
-
-// Op returns the operation name.
-func (m *UserMutation) Op() Op {
-	return m.op
-}
-
-// Type returns the node type of this mutation (User).
-func (m *UserMutation) Type() string {
-	return m.typ
-}
-
-// Fields returns all fields that were changed during this mutation. Note that in
-// order to get all numeric fields that were incremented/decremented, call
-// AddedFields().
-func (m *UserMutation) Fields() []string {
-	fields := make([]string, 0, 2)
-	if m.username != nil {
-		fields = append(fields, user.FieldUsername)
-	}
-	if m.created_at != nil {
-		fields = append(fields, user.FieldCreatedAt)
-	}
-	return fields
-}
-
-// Field returns the value of a field with the given name. The second boolean
-// return value indicates that this field was not set, or was not defined in the
-// schema.
-func (m *UserMutation) Field(name string) (ent.Value, bool) {
-	switch name {
-	case user.FieldUsername:
-		return m.Username()
-	case user.FieldCreatedAt:
-		return m.CreatedAt()
-	}
-	return nil, false
-}
-
-// OldField returns the old value of the field from the database. An error is
-// returned if the mutation operation is not UpdateOne, or the query to the
-// database failed.
-func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
-	switch name {
-	case user.FieldUsername:
-		return m.OldUsername(ctx)
-	case user.FieldCreatedAt:
-		return m.OldCreatedAt(ctx)
-	}
-	return nil, fmt.Errorf("unknown User field %s", name)
-}
-
-// SetField sets the value of a field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *UserMutation) SetField(name string, value ent.Value) error {
-	switch name {
-	case user.FieldUsername:
-		v, ok := value.(string)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetUsername(v)
-		return nil
-	case user.FieldCreatedAt:
-		v, ok := value.(time.Time)
-		if !ok {
-			return fmt.Errorf("unexpected type %T for field %s", value, name)
-		}
-		m.SetCreatedAt(v)
-		return nil
-	}
-	return fmt.Errorf("unknown User field %s", name)
-}
-
-// AddedFields returns all numeric fields that were incremented/decremented during
-// this mutation.
-func (m *UserMutation) AddedFields() []string {
-	return nil
-}
-
-// AddedField returns the numeric value that was incremented/decremented on a field
-// with the given name. The second boolean return value indicates that this field
-// was not set, or was not defined in the schema.
-func (m *UserMutation) AddedField(name string) (ent.Value, bool) {
-	return nil, false
-}
-
-// AddField adds the value to the field with the given name. It returns an error if
-// the field is not defined in the schema, or if the type mismatched the field
-// type.
-func (m *UserMutation) AddField(name string, value ent.Value) error {
-	switch name {
-	}
-	return fmt.Errorf("unknown User numeric field %s", name)
-}
-
-// ClearedFields returns all nullable fields that were cleared during this
-// mutation.
-func (m *UserMutation) ClearedFields() []string {
-	return nil
-}
-
-// FieldCleared returns a boolean indicating if a field with the given name was
-// cleared in this mutation.
-func (m *UserMutation) FieldCleared(name string) bool {
-	_, ok := m.clearedFields[name]
-	return ok
-}
-
-// ClearField clears the value of the field with the given name. It returns an
-// error if the field is not defined in the schema.
-func (m *UserMutation) ClearField(name string) error {
-	return fmt.Errorf("unknown User nullable field %s", name)
-}
-
-// ResetField resets all changes in the mutation for the field with the given name.
-// It returns an error if the field is not defined in the schema.
-func (m *UserMutation) ResetField(name string) error {
-	switch name {
-	case user.FieldUsername:
-		m.ResetUsername()
-		return nil
-	case user.FieldCreatedAt:
-		m.ResetCreatedAt()
-		return nil
-	}
-	return fmt.Errorf("unknown User field %s", name)
-}
-
-// AddedEdges returns all edge names that were set/added in this mutation.
-func (m *UserMutation) AddedEdges() []string {
-	edges := make([]string, 0, 2)
-	if m.servicekeys != nil {
-		edges = append(edges, user.EdgeServicekeys)
-	}
-	if m.apikeys != nil {
-		edges = append(edges, user.EdgeApikeys)
-	}
-	return edges
-}
-
-// AddedIDs returns all IDs (to other nodes) that were added for the given edge
-// name in this mutation.
-func (m *UserMutation) AddedIDs(name string) []ent.Value {
-	switch name {
-	case user.EdgeServicekeys:
-		ids := make([]ent.Value, 0, len(m.servicekeys))
-		for id := range m.servicekeys {
-			ids = append(ids, id)
-		}
-		return ids
-	case user.EdgeApikeys:
-		ids := make([]ent.Value, 0, len(m.apikeys))
-		for id := range m.apikeys {
-			ids = append(ids, id)
-		}
-		return ids
-	}
-	return nil
-}
-
-// RemovedEdges returns all edge names that were removed in this mutation.
-func (m *UserMutation) RemovedEdges() []string {
-	edges := make([]string, 0, 2)
-	if m.removedservicekeys != nil {
-		edges = append(edges, user.EdgeServicekeys)
-	}
-	if m.removedapikeys != nil {
-		edges = append(edges, user.EdgeApikeys)
-	}
-	return edges
-}
-
-// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
-// the given name in this mutation.
-func (m *UserMutation) RemovedIDs(name string) []ent.Value {
-	switch name {
-	case user.EdgeServicekeys:
-		ids := make([]ent.Value, 0, len(m.removedservicekeys))
-		for id := range m.removedservicekeys {
-			ids = append(ids, id)
-		}
-		return ids
-	case user.EdgeApikeys:
-		ids := make([]ent.Value, 0, len(m.removedapikeys))
-		for id := range m.removedapikeys {
-			ids = append(ids, id)
-		}
-		return ids
-	}
-	return nil
-}
-
-// ClearedEdges returns all edge names that were cleared in this mutation.
-func (m *UserMutation) ClearedEdges() []string {
-	edges := make([]string, 0, 2)
-	if m.clearedservicekeys {
-		edges = append(edges, user.EdgeServicekeys)
-	}
-	if m.clearedapikeys {
-		edges = append(edges, user.EdgeApikeys)
-	}
-	return edges
-}
-
-// EdgeCleared returns a boolean which indicates if the edge with the given name
-// was cleared in this mutation.
-func (m *UserMutation) EdgeCleared(name string) bool {
-	switch name {
-	case user.EdgeServicekeys:
-		return m.clearedservicekeys
-	case user.EdgeApikeys:
-		return m.clearedapikeys
-	}
-	return false
-}
-
-// ClearEdge clears the value of the edge with the given name. It returns an error
-// if that edge is not defined in the schema.
-func (m *UserMutation) ClearEdge(name string) error {
-	switch name {
-	}
-	return fmt.Errorf("unknown User unique edge %s", name)
-}
-
-// ResetEdge resets all changes to the edge with the given name in this mutation.
-// It returns an error if the edge is not defined in the schema.
-func (m *UserMutation) ResetEdge(name string) error {
-	switch name {
-	case user.EdgeServicekeys:
-		m.ResetServicekeys()
-		return nil
-	case user.EdgeApikeys:
-		m.ResetApikeys()
-		return nil
-	}
-	return fmt.Errorf("unknown User edge %s", name)
-}
diff --git a/pkg/storage/default/ent/predicate/predicate.go b/pkg/storage/default/ent/predicate/predicate.go
deleted file mode 100644
index dea6f2d..0000000
--- a/pkg/storage/default/ent/predicate/predicate.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package predicate
-
-import (
-	"entgo.io/ent/dialect/sql"
-)
-
-// APIKey is the predicate function for apikey builders.
-type APIKey func(*sql.Selector)
-
-// ServiceKey is the predicate function for servicekey builders.
-type ServiceKey func(*sql.Selector)
-
-// User is the predicate function for user builders.
-type User func(*sql.Selector)
diff --git a/pkg/storage/default/ent/runtime.go b/pkg/storage/default/ent/runtime.go
deleted file mode 100644
index b01321c..0000000
--- a/pkg/storage/default/ent/runtime.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"time"
-
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/schema"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// The init function reads all schema descriptors with runtime code
-// (default values, validators, hooks and policies) and stitches it
-// to their package variables.
-func init() {
-	apikeyFields := schema.APIKey{}.Fields()
-	_ = apikeyFields
-	// apikeyDescCreatedAt is the schema descriptor for created_at field.
-	apikeyDescCreatedAt := apikeyFields[0].Descriptor()
-	// apikey.DefaultCreatedAt holds the default value on creation for the created_at field.
-	apikey.DefaultCreatedAt = apikeyDescCreatedAt.Default.(func() int64)
-	// apikeyDescName is the schema descriptor for name field.
-	apikeyDescName := apikeyFields[1].Descriptor()
-	// apikey.NameValidator is a validator for the "name" field. It is called by the builders before save.
-	apikey.NameValidator = apikeyDescName.Validators[0].(func(string) error)
-	// apikeyDescValue is the schema descriptor for value field.
-	apikeyDescValue := apikeyFields[2].Descriptor()
-	// apikey.ValueValidator is a validator for the "value" field. It is called by the builders before save.
-	apikey.ValueValidator = apikeyDescValue.Validators[0].(func(string) error)
-	servicekeyFields := schema.ServiceKey{}.Fields()
-	_ = servicekeyFields
-	// servicekeyDescCreatedAt is the schema descriptor for created_at field.
-	servicekeyDescCreatedAt := servicekeyFields[0].Descriptor()
-	// servicekey.DefaultCreatedAt holds the default value on creation for the created_at field.
-	servicekey.DefaultCreatedAt = servicekeyDescCreatedAt.Default.(func() int64)
-	// servicekeyDescName is the schema descriptor for name field.
-	servicekeyDescName := servicekeyFields[1].Descriptor()
-	// servicekey.NameValidator is a validator for the "name" field. It is called by the builders before save.
-	servicekey.NameValidator = servicekeyDescName.Validators[0].(func(string) error)
-	// servicekeyDescValue is the schema descriptor for value field.
-	servicekeyDescValue := servicekeyFields[2].Descriptor()
-	// servicekey.ValueValidator is a validator for the "value" field. It is called by the builders before save.
-	servicekey.ValueValidator = servicekeyDescValue.Validators[0].(func(string) error)
-	// servicekeyDescSecret is the schema descriptor for secret field.
-	servicekeyDescSecret := servicekeyFields[3].Descriptor()
-	// servicekey.SecretValidator is a validator for the "secret" field. It is called by the builders before save.
-	servicekey.SecretValidator = servicekeyDescSecret.Validators[0].(func([]byte) error)
-	// servicekeyDescResource is the schema descriptor for resource field.
-	servicekeyDescResource := servicekeyFields[4].Descriptor()
-	// servicekey.DefaultResource holds the default value on creation for the resource field.
-	servicekey.DefaultResource = servicekeyDescResource.Default.(string)
-	// servicekeyDescNumUsed is the schema descriptor for num_used field.
-	servicekeyDescNumUsed := servicekeyFields[5].Descriptor()
-	// servicekey.DefaultNumUsed holds the default value on creation for the num_used field.
-	servicekey.DefaultNumUsed = servicekeyDescNumUsed.Default.(int64)
-	// servicekeyDescMaxUses is the schema descriptor for max_uses field.
-	servicekeyDescMaxUses := servicekeyFields[6].Descriptor()
-	// servicekey.DefaultMaxUses holds the default value on creation for the max_uses field.
-	servicekey.DefaultMaxUses = servicekeyDescMaxUses.Default.(int64)
-	// servicekeyDescExpires is the schema descriptor for expires field.
-	servicekeyDescExpires := servicekeyFields[7].Descriptor()
-	// servicekey.DefaultExpires holds the default value on creation for the expires field.
-	servicekey.DefaultExpires = servicekeyDescExpires.Default.(int64)
-	userFields := schema.User{}.Fields()
-	_ = userFields
-	// userDescUsername is the schema descriptor for username field.
-	userDescUsername := userFields[0].Descriptor()
-	// user.DefaultUsername holds the default value on creation for the username field.
-	user.DefaultUsername = userDescUsername.Default.(string)
-	// userDescCreatedAt is the schema descriptor for created_at field.
-	userDescCreatedAt := userFields[1].Descriptor()
-	// user.DefaultCreatedAt holds the default value on creation for the created_at field.
-	user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)
-}
diff --git a/pkg/storage/default/ent/runtime/runtime.go b/pkg/storage/default/ent/runtime/runtime.go
deleted file mode 100644
index 989cc21..0000000
--- a/pkg/storage/default/ent/runtime/runtime.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package runtime
-
-// The schema-stitching logic is generated in github.com/loopholelabs/auth/pkg/storage/default/ent/runtime.go
-
-const (
-	Version = "v0.11.3"                                         // Version of ent codegen.
-	Sum     = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen.
-)
diff --git a/pkg/storage/default/ent/schema/apiKey.go b/pkg/storage/default/ent/schema/apiKey.go
deleted file mode 100644
index 38ab07b..0000000
--- a/pkg/storage/default/ent/schema/apiKey.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package schema
-
-import (
-	"entgo.io/ent"
-	"entgo.io/ent/schema/edge"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/utils"
-)
-
-// APIKey holds the schema definition for the APIKey entity.
-type APIKey struct {
-	ent.Schema
-}
-
-// Fields of the APIKey.
-func (APIKey) Fields() []ent.Field {
-	return []ent.Field{
-		field.Int64("created_at").Immutable().DefaultFunc(utils.TimeInt64Now),
-		field.String("name").NotEmpty().Immutable(),
-		field.String("value").Unique().NotEmpty().Immutable(),
-		field.Bytes("secret").Immutable(),
-	}
-}
-
-// Edges of the APIKey.
-func (APIKey) Edges() []ent.Edge {
-	return []ent.Edge{
-		edge.From("owner", User.Type).Ref("apikeys").Unique(),
-	}
-}
diff --git a/pkg/storage/default/ent/schema/serviceKey.go b/pkg/storage/default/ent/schema/serviceKey.go
deleted file mode 100644
index 58b4bc0..0000000
--- a/pkg/storage/default/ent/schema/serviceKey.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package schema
-
-import (
-	"entgo.io/ent"
-	"entgo.io/ent/schema/edge"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/utils"
-)
-
-// ServiceKey holds the schema definition for the ServiceKey entity.
-type ServiceKey struct {
-	ent.Schema
-}
-
-// Fields of the RefreshToken.
-func (ServiceKey) Fields() []ent.Field {
-	return []ent.Field{
-		field.Int64("created_at").Immutable().DefaultFunc(utils.TimeInt64Now),
-		field.String("name").NotEmpty().Immutable(),
-		field.String("value").Unique().NotEmpty().Immutable(),
-		field.Bytes("secret").NotEmpty().Immutable(),
-		field.String("resource").Default(""),
-		field.Int64("num_used").Default(0),
-		field.Int64("max_uses").Default(0),
-		field.Int64("expires").Default(0),
-	}
-}
-
-// Edges of the RefreshToken.
-func (ServiceKey) Edges() []ent.Edge {
-	return []ent.Edge{
-		edge.From("owner", User.Type).Ref("servicekeys").Unique(),
-	}
-}
diff --git a/pkg/storage/default/ent/schema/user.go b/pkg/storage/default/ent/schema/user.go
deleted file mode 100644
index e379561..0000000
--- a/pkg/storage/default/ent/schema/user.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package schema
-
-import (
-	"entgo.io/ent"
-	"entgo.io/ent/schema/edge"
-	"entgo.io/ent/schema/field"
-	"time"
-)
-
-// User holds the schema definition for the User entity.
-type User struct {
-	ent.Schema
-}
-
-// Fields of the User.
-func (User) Fields() []ent.Field {
-	return []ent.Field{
-		field.String("username").Default("unknown"),
-		field.Time("created_at").Immutable().Default(time.Now),
-	}
-}
-
-// Edges of the User.
-func (User) Edges() []ent.Edge {
-	return []ent.Edge{
-		edge.To("servicekeys", ServiceKey.Type),
-		edge.To("apikeys", APIKey.Type),
-	}
-}
diff --git a/pkg/storage/default/ent/servicekey.go b/pkg/storage/default/ent/servicekey.go
deleted file mode 100644
index 6d5e700..0000000
--- a/pkg/storage/default/ent/servicekey.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"fmt"
-	"strings"
-
-	"entgo.io/ent/dialect/sql"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// ServiceKey is the model entity for the ServiceKey schema.
-type ServiceKey struct {
-	config `json:"-"`
-	// ID of the ent.
-	ID int `json:"id,omitempty"`
-	// CreatedAt holds the value of the "created_at" field.
-	CreatedAt int64 `json:"created_at,omitempty"`
-	// Name holds the value of the "name" field.
-	Name string `json:"name,omitempty"`
-	// Value holds the value of the "value" field.
-	Value string `json:"value,omitempty"`
-	// Secret holds the value of the "secret" field.
-	Secret []byte `json:"secret,omitempty"`
-	// Resource holds the value of the "resource" field.
-	Resource string `json:"resource,omitempty"`
-	// NumUsed holds the value of the "num_used" field.
-	NumUsed int64 `json:"num_used,omitempty"`
-	// MaxUses holds the value of the "max_uses" field.
-	MaxUses int64 `json:"max_uses,omitempty"`
-	// Expires holds the value of the "expires" field.
-	Expires int64 `json:"expires,omitempty"`
-	// Edges holds the relations/edges for other nodes in the graph.
-	// The values are being populated by the ServiceKeyQuery when eager-loading is set.
-	Edges            ServiceKeyEdges `json:"edges"`
-	user_servicekeys *int
-}
-
-// ServiceKeyEdges holds the relations/edges for other nodes in the graph.
-type ServiceKeyEdges struct {
-	// Owner holds the value of the owner edge.
-	Owner *User `json:"owner,omitempty"`
-	// loadedTypes holds the information for reporting if a
-	// type was loaded (or requested) in eager-loading or not.
-	loadedTypes [1]bool
-}
-
-// OwnerOrErr returns the Owner value or an error if the edge
-// was not loaded in eager-loading, or loaded but was not found.
-func (e ServiceKeyEdges) OwnerOrErr() (*User, error) {
-	if e.loadedTypes[0] {
-		if e.Owner == nil {
-			// Edge was loaded but was not found.
-			return nil, &NotFoundError{label: user.Label}
-		}
-		return e.Owner, nil
-	}
-	return nil, &NotLoadedError{edge: "owner"}
-}
-
-// scanValues returns the types for scanning values from sql.Rows.
-func (*ServiceKey) scanValues(columns []string) ([]any, error) {
-	values := make([]any, len(columns))
-	for i := range columns {
-		switch columns[i] {
-		case servicekey.FieldSecret:
-			values[i] = new([]byte)
-		case servicekey.FieldID, servicekey.FieldCreatedAt, servicekey.FieldNumUsed, servicekey.FieldMaxUses, servicekey.FieldExpires:
-			values[i] = new(sql.NullInt64)
-		case servicekey.FieldName, servicekey.FieldValue, servicekey.FieldResource:
-			values[i] = new(sql.NullString)
-		case servicekey.ForeignKeys[0]: // user_servicekeys
-			values[i] = new(sql.NullInt64)
-		default:
-			return nil, fmt.Errorf("unexpected column %q for type ServiceKey", columns[i])
-		}
-	}
-	return values, nil
-}
-
-// assignValues assigns the values that were returned from sql.Rows (after scanning)
-// to the ServiceKey fields.
-func (sk *ServiceKey) assignValues(columns []string, values []any) error {
-	if m, n := len(values), len(columns); m < n {
-		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
-	}
-	for i := range columns {
-		switch columns[i] {
-		case servicekey.FieldID:
-			value, ok := values[i].(*sql.NullInt64)
-			if !ok {
-				return fmt.Errorf("unexpected type %T for field id", value)
-			}
-			sk.ID = int(value.Int64)
-		case servicekey.FieldCreatedAt:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for field created_at", values[i])
-			} else if value.Valid {
-				sk.CreatedAt = value.Int64
-			}
-		case servicekey.FieldName:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field name", values[i])
-			} else if value.Valid {
-				sk.Name = value.String
-			}
-		case servicekey.FieldValue:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field value", values[i])
-			} else if value.Valid {
-				sk.Value = value.String
-			}
-		case servicekey.FieldSecret:
-			if value, ok := values[i].(*[]byte); !ok {
-				return fmt.Errorf("unexpected type %T for field secret", values[i])
-			} else if value != nil {
-				sk.Secret = *value
-			}
-		case servicekey.FieldResource:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field resource", values[i])
-			} else if value.Valid {
-				sk.Resource = value.String
-			}
-		case servicekey.FieldNumUsed:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for field num_used", values[i])
-			} else if value.Valid {
-				sk.NumUsed = value.Int64
-			}
-		case servicekey.FieldMaxUses:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for field max_uses", values[i])
-			} else if value.Valid {
-				sk.MaxUses = value.Int64
-			}
-		case servicekey.FieldExpires:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for field expires", values[i])
-			} else if value.Valid {
-				sk.Expires = value.Int64
-			}
-		case servicekey.ForeignKeys[0]:
-			if value, ok := values[i].(*sql.NullInt64); !ok {
-				return fmt.Errorf("unexpected type %T for edge-field user_servicekeys", value)
-			} else if value.Valid {
-				sk.user_servicekeys = new(int)
-				*sk.user_servicekeys = int(value.Int64)
-			}
-		}
-	}
-	return nil
-}
-
-// QueryOwner queries the "owner" edge of the ServiceKey entity.
-func (sk *ServiceKey) QueryOwner() *UserQuery {
-	return (&ServiceKeyClient{config: sk.config}).QueryOwner(sk)
-}
-
-// Update returns a builder for updating this ServiceKey.
-// Note that you need to call ServiceKey.Unwrap() before calling this method if this ServiceKey
-// was returned from a transaction, and the transaction was committed or rolled back.
-func (sk *ServiceKey) Update() *ServiceKeyUpdateOne {
-	return (&ServiceKeyClient{config: sk.config}).UpdateOne(sk)
-}
-
-// Unwrap unwraps the ServiceKey entity that was returned from a transaction after it was closed,
-// so that all future queries will be executed through the driver which created the transaction.
-func (sk *ServiceKey) Unwrap() *ServiceKey {
-	_tx, ok := sk.config.driver.(*txDriver)
-	if !ok {
-		panic("ent: ServiceKey is not a transactional entity")
-	}
-	sk.config.driver = _tx.drv
-	return sk
-}
-
-// String implements the fmt.Stringer.
-func (sk *ServiceKey) String() string {
-	var builder strings.Builder
-	builder.WriteString("ServiceKey(")
-	builder.WriteString(fmt.Sprintf("id=%v, ", sk.ID))
-	builder.WriteString("created_at=")
-	builder.WriteString(fmt.Sprintf("%v", sk.CreatedAt))
-	builder.WriteString(", ")
-	builder.WriteString("name=")
-	builder.WriteString(sk.Name)
-	builder.WriteString(", ")
-	builder.WriteString("value=")
-	builder.WriteString(sk.Value)
-	builder.WriteString(", ")
-	builder.WriteString("secret=")
-	builder.WriteString(fmt.Sprintf("%v", sk.Secret))
-	builder.WriteString(", ")
-	builder.WriteString("resource=")
-	builder.WriteString(sk.Resource)
-	builder.WriteString(", ")
-	builder.WriteString("num_used=")
-	builder.WriteString(fmt.Sprintf("%v", sk.NumUsed))
-	builder.WriteString(", ")
-	builder.WriteString("max_uses=")
-	builder.WriteString(fmt.Sprintf("%v", sk.MaxUses))
-	builder.WriteString(", ")
-	builder.WriteString("expires=")
-	builder.WriteString(fmt.Sprintf("%v", sk.Expires))
-	builder.WriteByte(')')
-	return builder.String()
-}
-
-// ServiceKeys is a parsable slice of ServiceKey.
-type ServiceKeys []*ServiceKey
-
-func (sk ServiceKeys) config(cfg config) {
-	for _i := range sk {
-		sk[_i].config = cfg
-	}
-}
diff --git a/pkg/storage/default/ent/servicekey/servicekey.go b/pkg/storage/default/ent/servicekey/servicekey.go
deleted file mode 100644
index c01d5f1..0000000
--- a/pkg/storage/default/ent/servicekey/servicekey.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package servicekey
-
-const (
-	// Label holds the string label denoting the servicekey type in the database.
-	Label = "service_key"
-	// FieldID holds the string denoting the id field in the database.
-	FieldID = "id"
-	// FieldCreatedAt holds the string denoting the created_at field in the database.
-	FieldCreatedAt = "created_at"
-	// FieldName holds the string denoting the name field in the database.
-	FieldName = "name"
-	// FieldValue holds the string denoting the value field in the database.
-	FieldValue = "value"
-	// FieldSecret holds the string denoting the secret field in the database.
-	FieldSecret = "secret"
-	// FieldResource holds the string denoting the resource field in the database.
-	FieldResource = "resource"
-	// FieldNumUsed holds the string denoting the num_used field in the database.
-	FieldNumUsed = "num_used"
-	// FieldMaxUses holds the string denoting the max_uses field in the database.
-	FieldMaxUses = "max_uses"
-	// FieldExpires holds the string denoting the expires field in the database.
-	FieldExpires = "expires"
-	// EdgeOwner holds the string denoting the owner edge name in mutations.
-	EdgeOwner = "owner"
-	// Table holds the table name of the servicekey in the database.
-	Table = "service_keys"
-	// OwnerTable is the table that holds the owner relation/edge.
-	OwnerTable = "service_keys"
-	// OwnerInverseTable is the table name for the User entity.
-	// It exists in this package in order to avoid circular dependency with the "user" package.
-	OwnerInverseTable = "users"
-	// OwnerColumn is the table column denoting the owner relation/edge.
-	OwnerColumn = "user_servicekeys"
-)
-
-// Columns holds all SQL columns for servicekey fields.
-var Columns = []string{
-	FieldID,
-	FieldCreatedAt,
-	FieldName,
-	FieldValue,
-	FieldSecret,
-	FieldResource,
-	FieldNumUsed,
-	FieldMaxUses,
-	FieldExpires,
-}
-
-// ForeignKeys holds the SQL foreign-keys that are owned by the "service_keys"
-// table and are not defined as standalone fields in the schema.
-var ForeignKeys = []string{
-	"user_servicekeys",
-}
-
-// ValidColumn reports if the column name is valid (part of the table columns).
-func ValidColumn(column string) bool {
-	for i := range Columns {
-		if column == Columns[i] {
-			return true
-		}
-	}
-	for i := range ForeignKeys {
-		if column == ForeignKeys[i] {
-			return true
-		}
-	}
-	return false
-}
-
-var (
-	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
-	DefaultCreatedAt func() int64
-	// NameValidator is a validator for the "name" field. It is called by the builders before save.
-	NameValidator func(string) error
-	// ValueValidator is a validator for the "value" field. It is called by the builders before save.
-	ValueValidator func(string) error
-	// SecretValidator is a validator for the "secret" field. It is called by the builders before save.
-	SecretValidator func([]byte) error
-	// DefaultResource holds the default value on creation for the "resource" field.
-	DefaultResource string
-	// DefaultNumUsed holds the default value on creation for the "num_used" field.
-	DefaultNumUsed int64
-	// DefaultMaxUses holds the default value on creation for the "max_uses" field.
-	DefaultMaxUses int64
-	// DefaultExpires holds the default value on creation for the "expires" field.
-	DefaultExpires int64
-)
diff --git a/pkg/storage/default/ent/servicekey/where.go b/pkg/storage/default/ent/servicekey/where.go
deleted file mode 100644
index 66e9e48..0000000
--- a/pkg/storage/default/ent/servicekey/where.go
+++ /dev/null
@@ -1,813 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package servicekey
-
-import (
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-)
-
-// ID filters vertices based on their ID field.
-func ID(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDEQ applies the EQ predicate on the ID field.
-func IDEQ(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDNEQ applies the NEQ predicate on the ID field.
-func IDNEQ(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldID), id))
-	})
-}
-
-// IDIn applies the In predicate on the ID field.
-func IDIn(ids ...int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.In(s.C(FieldID), v...))
-	})
-}
-
-// IDNotIn applies the NotIn predicate on the ID field.
-func IDNotIn(ids ...int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.NotIn(s.C(FieldID), v...))
-	})
-}
-
-// IDGT applies the GT predicate on the ID field.
-func IDGT(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldID), id))
-	})
-}
-
-// IDGTE applies the GTE predicate on the ID field.
-func IDGTE(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldID), id))
-	})
-}
-
-// IDLT applies the LT predicate on the ID field.
-func IDLT(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldID), id))
-	})
-}
-
-// IDLTE applies the LTE predicate on the ID field.
-func IDLTE(id int) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldID), id))
-	})
-}
-
-// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
-func CreatedAt(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
-func Name(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldName), v))
-	})
-}
-
-// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
-func Value(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldValue), v))
-	})
-}
-
-// Secret applies equality check predicate on the "secret" field. It's identical to SecretEQ.
-func Secret(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldSecret), v))
-	})
-}
-
-// Resource applies equality check predicate on the "resource" field. It's identical to ResourceEQ.
-func Resource(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldResource), v))
-	})
-}
-
-// NumUsed applies equality check predicate on the "num_used" field. It's identical to NumUsedEQ.
-func NumUsed(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldNumUsed), v))
-	})
-}
-
-// MaxUses applies equality check predicate on the "max_uses" field. It's identical to MaxUsesEQ.
-func MaxUses(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldMaxUses), v))
-	})
-}
-
-// Expires applies equality check predicate on the "expires" field. It's identical to ExpiresEQ.
-func Expires(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldExpires), v))
-	})
-}
-
-// CreatedAtEQ applies the EQ predicate on the "created_at" field.
-func CreatedAtEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
-func CreatedAtNEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtIn applies the In predicate on the "created_at" field.
-func CreatedAtIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
-func CreatedAtNotIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtGT applies the GT predicate on the "created_at" field.
-func CreatedAtGT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtGTE applies the GTE predicate on the "created_at" field.
-func CreatedAtGTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLT applies the LT predicate on the "created_at" field.
-func CreatedAtLT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLTE applies the LTE predicate on the "created_at" field.
-func CreatedAtLTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// NameEQ applies the EQ predicate on the "name" field.
-func NameEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldName), v))
-	})
-}
-
-// NameNEQ applies the NEQ predicate on the "name" field.
-func NameNEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldName), v))
-	})
-}
-
-// NameIn applies the In predicate on the "name" field.
-func NameIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldName), v...))
-	})
-}
-
-// NameNotIn applies the NotIn predicate on the "name" field.
-func NameNotIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldName), v...))
-	})
-}
-
-// NameGT applies the GT predicate on the "name" field.
-func NameGT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldName), v))
-	})
-}
-
-// NameGTE applies the GTE predicate on the "name" field.
-func NameGTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldName), v))
-	})
-}
-
-// NameLT applies the LT predicate on the "name" field.
-func NameLT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldName), v))
-	})
-}
-
-// NameLTE applies the LTE predicate on the "name" field.
-func NameLTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldName), v))
-	})
-}
-
-// NameContains applies the Contains predicate on the "name" field.
-func NameContains(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldName), v))
-	})
-}
-
-// NameHasPrefix applies the HasPrefix predicate on the "name" field.
-func NameHasPrefix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldName), v))
-	})
-}
-
-// NameHasSuffix applies the HasSuffix predicate on the "name" field.
-func NameHasSuffix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldName), v))
-	})
-}
-
-// NameEqualFold applies the EqualFold predicate on the "name" field.
-func NameEqualFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldName), v))
-	})
-}
-
-// NameContainsFold applies the ContainsFold predicate on the "name" field.
-func NameContainsFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldName), v))
-	})
-}
-
-// ValueEQ applies the EQ predicate on the "value" field.
-func ValueEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldValue), v))
-	})
-}
-
-// ValueNEQ applies the NEQ predicate on the "value" field.
-func ValueNEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldValue), v))
-	})
-}
-
-// ValueIn applies the In predicate on the "value" field.
-func ValueIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldValue), v...))
-	})
-}
-
-// ValueNotIn applies the NotIn predicate on the "value" field.
-func ValueNotIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldValue), v...))
-	})
-}
-
-// ValueGT applies the GT predicate on the "value" field.
-func ValueGT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldValue), v))
-	})
-}
-
-// ValueGTE applies the GTE predicate on the "value" field.
-func ValueGTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldValue), v))
-	})
-}
-
-// ValueLT applies the LT predicate on the "value" field.
-func ValueLT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldValue), v))
-	})
-}
-
-// ValueLTE applies the LTE predicate on the "value" field.
-func ValueLTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldValue), v))
-	})
-}
-
-// ValueContains applies the Contains predicate on the "value" field.
-func ValueContains(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldValue), v))
-	})
-}
-
-// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
-func ValueHasPrefix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldValue), v))
-	})
-}
-
-// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
-func ValueHasSuffix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldValue), v))
-	})
-}
-
-// ValueEqualFold applies the EqualFold predicate on the "value" field.
-func ValueEqualFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldValue), v))
-	})
-}
-
-// ValueContainsFold applies the ContainsFold predicate on the "value" field.
-func ValueContainsFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldValue), v))
-	})
-}
-
-// SecretEQ applies the EQ predicate on the "secret" field.
-func SecretEQ(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldSecret), v))
-	})
-}
-
-// SecretNEQ applies the NEQ predicate on the "secret" field.
-func SecretNEQ(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldSecret), v))
-	})
-}
-
-// SecretIn applies the In predicate on the "secret" field.
-func SecretIn(vs ...[]byte) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldSecret), v...))
-	})
-}
-
-// SecretNotIn applies the NotIn predicate on the "secret" field.
-func SecretNotIn(vs ...[]byte) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldSecret), v...))
-	})
-}
-
-// SecretGT applies the GT predicate on the "secret" field.
-func SecretGT(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldSecret), v))
-	})
-}
-
-// SecretGTE applies the GTE predicate on the "secret" field.
-func SecretGTE(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldSecret), v))
-	})
-}
-
-// SecretLT applies the LT predicate on the "secret" field.
-func SecretLT(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldSecret), v))
-	})
-}
-
-// SecretLTE applies the LTE predicate on the "secret" field.
-func SecretLTE(v []byte) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldSecret), v))
-	})
-}
-
-// ResourceEQ applies the EQ predicate on the "resource" field.
-func ResourceEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldResource), v))
-	})
-}
-
-// ResourceNEQ applies the NEQ predicate on the "resource" field.
-func ResourceNEQ(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldResource), v))
-	})
-}
-
-// ResourceIn applies the In predicate on the "resource" field.
-func ResourceIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldResource), v...))
-	})
-}
-
-// ResourceNotIn applies the NotIn predicate on the "resource" field.
-func ResourceNotIn(vs ...string) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldResource), v...))
-	})
-}
-
-// ResourceGT applies the GT predicate on the "resource" field.
-func ResourceGT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldResource), v))
-	})
-}
-
-// ResourceGTE applies the GTE predicate on the "resource" field.
-func ResourceGTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldResource), v))
-	})
-}
-
-// ResourceLT applies the LT predicate on the "resource" field.
-func ResourceLT(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldResource), v))
-	})
-}
-
-// ResourceLTE applies the LTE predicate on the "resource" field.
-func ResourceLTE(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldResource), v))
-	})
-}
-
-// ResourceContains applies the Contains predicate on the "resource" field.
-func ResourceContains(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldResource), v))
-	})
-}
-
-// ResourceHasPrefix applies the HasPrefix predicate on the "resource" field.
-func ResourceHasPrefix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldResource), v))
-	})
-}
-
-// ResourceHasSuffix applies the HasSuffix predicate on the "resource" field.
-func ResourceHasSuffix(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldResource), v))
-	})
-}
-
-// ResourceEqualFold applies the EqualFold predicate on the "resource" field.
-func ResourceEqualFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldResource), v))
-	})
-}
-
-// ResourceContainsFold applies the ContainsFold predicate on the "resource" field.
-func ResourceContainsFold(v string) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldResource), v))
-	})
-}
-
-// NumUsedEQ applies the EQ predicate on the "num_used" field.
-func NumUsedEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldNumUsed), v))
-	})
-}
-
-// NumUsedNEQ applies the NEQ predicate on the "num_used" field.
-func NumUsedNEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldNumUsed), v))
-	})
-}
-
-// NumUsedIn applies the In predicate on the "num_used" field.
-func NumUsedIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldNumUsed), v...))
-	})
-}
-
-// NumUsedNotIn applies the NotIn predicate on the "num_used" field.
-func NumUsedNotIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldNumUsed), v...))
-	})
-}
-
-// NumUsedGT applies the GT predicate on the "num_used" field.
-func NumUsedGT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldNumUsed), v))
-	})
-}
-
-// NumUsedGTE applies the GTE predicate on the "num_used" field.
-func NumUsedGTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldNumUsed), v))
-	})
-}
-
-// NumUsedLT applies the LT predicate on the "num_used" field.
-func NumUsedLT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldNumUsed), v))
-	})
-}
-
-// NumUsedLTE applies the LTE predicate on the "num_used" field.
-func NumUsedLTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldNumUsed), v))
-	})
-}
-
-// MaxUsesEQ applies the EQ predicate on the "max_uses" field.
-func MaxUsesEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldMaxUses), v))
-	})
-}
-
-// MaxUsesNEQ applies the NEQ predicate on the "max_uses" field.
-func MaxUsesNEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldMaxUses), v))
-	})
-}
-
-// MaxUsesIn applies the In predicate on the "max_uses" field.
-func MaxUsesIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldMaxUses), v...))
-	})
-}
-
-// MaxUsesNotIn applies the NotIn predicate on the "max_uses" field.
-func MaxUsesNotIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldMaxUses), v...))
-	})
-}
-
-// MaxUsesGT applies the GT predicate on the "max_uses" field.
-func MaxUsesGT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldMaxUses), v))
-	})
-}
-
-// MaxUsesGTE applies the GTE predicate on the "max_uses" field.
-func MaxUsesGTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldMaxUses), v))
-	})
-}
-
-// MaxUsesLT applies the LT predicate on the "max_uses" field.
-func MaxUsesLT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldMaxUses), v))
-	})
-}
-
-// MaxUsesLTE applies the LTE predicate on the "max_uses" field.
-func MaxUsesLTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldMaxUses), v))
-	})
-}
-
-// ExpiresEQ applies the EQ predicate on the "expires" field.
-func ExpiresEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldExpires), v))
-	})
-}
-
-// ExpiresNEQ applies the NEQ predicate on the "expires" field.
-func ExpiresNEQ(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldExpires), v))
-	})
-}
-
-// ExpiresIn applies the In predicate on the "expires" field.
-func ExpiresIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldExpires), v...))
-	})
-}
-
-// ExpiresNotIn applies the NotIn predicate on the "expires" field.
-func ExpiresNotIn(vs ...int64) predicate.ServiceKey {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldExpires), v...))
-	})
-}
-
-// ExpiresGT applies the GT predicate on the "expires" field.
-func ExpiresGT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldExpires), v))
-	})
-}
-
-// ExpiresGTE applies the GTE predicate on the "expires" field.
-func ExpiresGTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldExpires), v))
-	})
-}
-
-// ExpiresLT applies the LT predicate on the "expires" field.
-func ExpiresLT(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldExpires), v))
-	})
-}
-
-// ExpiresLTE applies the LTE predicate on the "expires" field.
-func ExpiresLTE(v int64) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldExpires), v))
-	})
-}
-
-// HasOwner applies the HasEdge predicate on the "owner" edge.
-func HasOwner() predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(OwnerTable, FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
-		)
-		sqlgraph.HasNeighbors(s, step)
-	})
-}
-
-// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
-func HasOwnerWith(preds ...predicate.User) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(OwnerInverseTable, FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
-		)
-		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
-			for _, p := range preds {
-				p(s)
-			}
-		})
-	})
-}
-
-// And groups predicates with the AND operator between them.
-func And(predicates ...predicate.ServiceKey) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for _, p := range predicates {
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Or groups predicates with the OR operator between them.
-func Or(predicates ...predicate.ServiceKey) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for i, p := range predicates {
-			if i > 0 {
-				s1.Or()
-			}
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Not applies the not operator on the given predicate.
-func Not(p predicate.ServiceKey) predicate.ServiceKey {
-	return predicate.ServiceKey(func(s *sql.Selector) {
-		p(s.Not())
-	})
-}
diff --git a/pkg/storage/default/ent/servicekey_create.go b/pkg/storage/default/ent/servicekey_create.go
deleted file mode 100644
index 076c90c..0000000
--- a/pkg/storage/default/ent/servicekey_create.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// ServiceKeyCreate is the builder for creating a ServiceKey entity.
-type ServiceKeyCreate struct {
-	config
-	mutation *ServiceKeyMutation
-	hooks    []Hook
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (skc *ServiceKeyCreate) SetCreatedAt(i int64) *ServiceKeyCreate {
-	skc.mutation.SetCreatedAt(i)
-	return skc
-}
-
-// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableCreatedAt(i *int64) *ServiceKeyCreate {
-	if i != nil {
-		skc.SetCreatedAt(*i)
-	}
-	return skc
-}
-
-// SetName sets the "name" field.
-func (skc *ServiceKeyCreate) SetName(s string) *ServiceKeyCreate {
-	skc.mutation.SetName(s)
-	return skc
-}
-
-// SetValue sets the "value" field.
-func (skc *ServiceKeyCreate) SetValue(s string) *ServiceKeyCreate {
-	skc.mutation.SetValue(s)
-	return skc
-}
-
-// SetSecret sets the "secret" field.
-func (skc *ServiceKeyCreate) SetSecret(b []byte) *ServiceKeyCreate {
-	skc.mutation.SetSecret(b)
-	return skc
-}
-
-// SetResource sets the "resource" field.
-func (skc *ServiceKeyCreate) SetResource(s string) *ServiceKeyCreate {
-	skc.mutation.SetResource(s)
-	return skc
-}
-
-// SetNillableResource sets the "resource" field if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableResource(s *string) *ServiceKeyCreate {
-	if s != nil {
-		skc.SetResource(*s)
-	}
-	return skc
-}
-
-// SetNumUsed sets the "num_used" field.
-func (skc *ServiceKeyCreate) SetNumUsed(i int64) *ServiceKeyCreate {
-	skc.mutation.SetNumUsed(i)
-	return skc
-}
-
-// SetNillableNumUsed sets the "num_used" field if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableNumUsed(i *int64) *ServiceKeyCreate {
-	if i != nil {
-		skc.SetNumUsed(*i)
-	}
-	return skc
-}
-
-// SetMaxUses sets the "max_uses" field.
-func (skc *ServiceKeyCreate) SetMaxUses(i int64) *ServiceKeyCreate {
-	skc.mutation.SetMaxUses(i)
-	return skc
-}
-
-// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableMaxUses(i *int64) *ServiceKeyCreate {
-	if i != nil {
-		skc.SetMaxUses(*i)
-	}
-	return skc
-}
-
-// SetExpires sets the "expires" field.
-func (skc *ServiceKeyCreate) SetExpires(i int64) *ServiceKeyCreate {
-	skc.mutation.SetExpires(i)
-	return skc
-}
-
-// SetNillableExpires sets the "expires" field if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableExpires(i *int64) *ServiceKeyCreate {
-	if i != nil {
-		skc.SetExpires(*i)
-	}
-	return skc
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (skc *ServiceKeyCreate) SetOwnerID(id int) *ServiceKeyCreate {
-	skc.mutation.SetOwnerID(id)
-	return skc
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (skc *ServiceKeyCreate) SetNillableOwnerID(id *int) *ServiceKeyCreate {
-	if id != nil {
-		skc = skc.SetOwnerID(*id)
-	}
-	return skc
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (skc *ServiceKeyCreate) SetOwner(u *User) *ServiceKeyCreate {
-	return skc.SetOwnerID(u.ID)
-}
-
-// Mutation returns the ServiceKeyMutation object of the builder.
-func (skc *ServiceKeyCreate) Mutation() *ServiceKeyMutation {
-	return skc.mutation
-}
-
-// Save creates the ServiceKey in the database.
-func (skc *ServiceKeyCreate) Save(ctx context.Context) (*ServiceKey, error) {
-	var (
-		err  error
-		node *ServiceKey
-	)
-	skc.defaults()
-	if len(skc.hooks) == 0 {
-		if err = skc.check(); err != nil {
-			return nil, err
-		}
-		node, err = skc.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*ServiceKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			if err = skc.check(); err != nil {
-				return nil, err
-			}
-			skc.mutation = mutation
-			if node, err = skc.sqlSave(ctx); err != nil {
-				return nil, err
-			}
-			mutation.id = &node.ID
-			mutation.done = true
-			return node, err
-		})
-		for i := len(skc.hooks) - 1; i >= 0; i-- {
-			if skc.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = skc.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, skc.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*ServiceKey)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from ServiceKeyMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX calls Save and panics if Save returns an error.
-func (skc *ServiceKeyCreate) SaveX(ctx context.Context) *ServiceKey {
-	v, err := skc.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (skc *ServiceKeyCreate) Exec(ctx context.Context) error {
-	_, err := skc.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (skc *ServiceKeyCreate) ExecX(ctx context.Context) {
-	if err := skc.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-// defaults sets the default values of the builder before save.
-func (skc *ServiceKeyCreate) defaults() {
-	if _, ok := skc.mutation.CreatedAt(); !ok {
-		v := servicekey.DefaultCreatedAt()
-		skc.mutation.SetCreatedAt(v)
-	}
-	if _, ok := skc.mutation.Resource(); !ok {
-		v := servicekey.DefaultResource
-		skc.mutation.SetResource(v)
-	}
-	if _, ok := skc.mutation.NumUsed(); !ok {
-		v := servicekey.DefaultNumUsed
-		skc.mutation.SetNumUsed(v)
-	}
-	if _, ok := skc.mutation.MaxUses(); !ok {
-		v := servicekey.DefaultMaxUses
-		skc.mutation.SetMaxUses(v)
-	}
-	if _, ok := skc.mutation.Expires(); !ok {
-		v := servicekey.DefaultExpires
-		skc.mutation.SetExpires(v)
-	}
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (skc *ServiceKeyCreate) check() error {
-	if _, ok := skc.mutation.CreatedAt(); !ok {
-		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ServiceKey.created_at"`)}
-	}
-	if _, ok := skc.mutation.Name(); !ok {
-		return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ServiceKey.name"`)}
-	}
-	if v, ok := skc.mutation.Name(); ok {
-		if err := servicekey.NameValidator(v); err != nil {
-			return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ServiceKey.name": %w`, err)}
-		}
-	}
-	if _, ok := skc.mutation.Value(); !ok {
-		return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "ServiceKey.value"`)}
-	}
-	if v, ok := skc.mutation.Value(); ok {
-		if err := servicekey.ValueValidator(v); err != nil {
-			return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "ServiceKey.value": %w`, err)}
-		}
-	}
-	if _, ok := skc.mutation.Secret(); !ok {
-		return &ValidationError{Name: "secret", err: errors.New(`ent: missing required field "ServiceKey.secret"`)}
-	}
-	if v, ok := skc.mutation.Secret(); ok {
-		if err := servicekey.SecretValidator(v); err != nil {
-			return &ValidationError{Name: "secret", err: fmt.Errorf(`ent: validator failed for field "ServiceKey.secret": %w`, err)}
-		}
-	}
-	if _, ok := skc.mutation.Resource(); !ok {
-		return &ValidationError{Name: "resource", err: errors.New(`ent: missing required field "ServiceKey.resource"`)}
-	}
-	if _, ok := skc.mutation.NumUsed(); !ok {
-		return &ValidationError{Name: "num_used", err: errors.New(`ent: missing required field "ServiceKey.num_used"`)}
-	}
-	if _, ok := skc.mutation.MaxUses(); !ok {
-		return &ValidationError{Name: "max_uses", err: errors.New(`ent: missing required field "ServiceKey.max_uses"`)}
-	}
-	if _, ok := skc.mutation.Expires(); !ok {
-		return &ValidationError{Name: "expires", err: errors.New(`ent: missing required field "ServiceKey.expires"`)}
-	}
-	return nil
-}
-
-func (skc *ServiceKeyCreate) sqlSave(ctx context.Context) (*ServiceKey, error) {
-	_node, _spec := skc.createSpec()
-	if err := sqlgraph.CreateNode(ctx, skc.driver, _spec); err != nil {
-		if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	id := _spec.ID.Value.(int64)
-	_node.ID = int(id)
-	return _node, nil
-}
-
-func (skc *ServiceKeyCreate) createSpec() (*ServiceKey, *sqlgraph.CreateSpec) {
-	var (
-		_node = &ServiceKey{config: skc.config}
-		_spec = &sqlgraph.CreateSpec{
-			Table: servicekey.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: servicekey.FieldID,
-			},
-		}
-	)
-	if value, ok := skc.mutation.CreatedAt(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldCreatedAt,
-		})
-		_node.CreatedAt = value
-	}
-	if value, ok := skc.mutation.Name(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: servicekey.FieldName,
-		})
-		_node.Name = value
-	}
-	if value, ok := skc.mutation.Value(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: servicekey.FieldValue,
-		})
-		_node.Value = value
-	}
-	if value, ok := skc.mutation.Secret(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeBytes,
-			Value:  value,
-			Column: servicekey.FieldSecret,
-		})
-		_node.Secret = value
-	}
-	if value, ok := skc.mutation.Resource(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: servicekey.FieldResource,
-		})
-		_node.Resource = value
-	}
-	if value, ok := skc.mutation.NumUsed(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldNumUsed,
-		})
-		_node.NumUsed = value
-	}
-	if value, ok := skc.mutation.MaxUses(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldMaxUses,
-		})
-		_node.MaxUses = value
-	}
-	if value, ok := skc.mutation.Expires(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldExpires,
-		})
-		_node.Expires = value
-	}
-	if nodes := skc.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   servicekey.OwnerTable,
-			Columns: []string{servicekey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_node.user_servicekeys = &nodes[0]
-		_spec.Edges = append(_spec.Edges, edge)
-	}
-	return _node, _spec
-}
-
-// ServiceKeyCreateBulk is the builder for creating many ServiceKey entities in bulk.
-type ServiceKeyCreateBulk struct {
-	config
-	builders []*ServiceKeyCreate
-}
-
-// Save creates the ServiceKey entities in the database.
-func (skcb *ServiceKeyCreateBulk) Save(ctx context.Context) ([]*ServiceKey, error) {
-	specs := make([]*sqlgraph.CreateSpec, len(skcb.builders))
-	nodes := make([]*ServiceKey, len(skcb.builders))
-	mutators := make([]Mutator, len(skcb.builders))
-	for i := range skcb.builders {
-		func(i int, root context.Context) {
-			builder := skcb.builders[i]
-			builder.defaults()
-			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-				mutation, ok := m.(*ServiceKeyMutation)
-				if !ok {
-					return nil, fmt.Errorf("unexpected mutation type %T", m)
-				}
-				if err := builder.check(); err != nil {
-					return nil, err
-				}
-				builder.mutation = mutation
-				nodes[i], specs[i] = builder.createSpec()
-				var err error
-				if i < len(mutators)-1 {
-					_, err = mutators[i+1].Mutate(root, skcb.builders[i+1].mutation)
-				} else {
-					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
-					// Invoke the actual operation on the latest mutation in the chain.
-					if err = sqlgraph.BatchCreate(ctx, skcb.driver, spec); err != nil {
-						if sqlgraph.IsConstraintError(err) {
-							err = &ConstraintError{msg: err.Error(), wrap: err}
-						}
-					}
-				}
-				if err != nil {
-					return nil, err
-				}
-				mutation.id = &nodes[i].ID
-				if specs[i].ID.Value != nil {
-					id := specs[i].ID.Value.(int64)
-					nodes[i].ID = int(id)
-				}
-				mutation.done = true
-				return nodes[i], nil
-			})
-			for i := len(builder.hooks) - 1; i >= 0; i-- {
-				mut = builder.hooks[i](mut)
-			}
-			mutators[i] = mut
-		}(i, ctx)
-	}
-	if len(mutators) > 0 {
-		if _, err := mutators[0].Mutate(ctx, skcb.builders[0].mutation); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (skcb *ServiceKeyCreateBulk) SaveX(ctx context.Context) []*ServiceKey {
-	v, err := skcb.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (skcb *ServiceKeyCreateBulk) Exec(ctx context.Context) error {
-	_, err := skcb.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (skcb *ServiceKeyCreateBulk) ExecX(ctx context.Context) {
-	if err := skcb.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
diff --git a/pkg/storage/default/ent/servicekey_delete.go b/pkg/storage/default/ent/servicekey_delete.go
deleted file mode 100644
index 6a506a7..0000000
--- a/pkg/storage/default/ent/servicekey_delete.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-)
-
-// ServiceKeyDelete is the builder for deleting a ServiceKey entity.
-type ServiceKeyDelete struct {
-	config
-	hooks    []Hook
-	mutation *ServiceKeyMutation
-}
-
-// Where appends a list predicates to the ServiceKeyDelete builder.
-func (skd *ServiceKeyDelete) Where(ps ...predicate.ServiceKey) *ServiceKeyDelete {
-	skd.mutation.Where(ps...)
-	return skd
-}
-
-// Exec executes the deletion query and returns how many vertices were deleted.
-func (skd *ServiceKeyDelete) Exec(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(skd.hooks) == 0 {
-		affected, err = skd.sqlExec(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*ServiceKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			skd.mutation = mutation
-			affected, err = skd.sqlExec(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(skd.hooks) - 1; i >= 0; i-- {
-			if skd.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = skd.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, skd.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (skd *ServiceKeyDelete) ExecX(ctx context.Context) int {
-	n, err := skd.Exec(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return n
-}
-
-func (skd *ServiceKeyDelete) sqlExec(ctx context.Context) (int, error) {
-	_spec := &sqlgraph.DeleteSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table: servicekey.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: servicekey.FieldID,
-			},
-		},
-	}
-	if ps := skd.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	affected, err := sqlgraph.DeleteNodes(ctx, skd.driver, _spec)
-	if err != nil && sqlgraph.IsConstraintError(err) {
-		err = &ConstraintError{msg: err.Error(), wrap: err}
-	}
-	return affected, err
-}
-
-// ServiceKeyDeleteOne is the builder for deleting a single ServiceKey entity.
-type ServiceKeyDeleteOne struct {
-	skd *ServiceKeyDelete
-}
-
-// Exec executes the deletion query.
-func (skdo *ServiceKeyDeleteOne) Exec(ctx context.Context) error {
-	n, err := skdo.skd.Exec(ctx)
-	switch {
-	case err != nil:
-		return err
-	case n == 0:
-		return &NotFoundError{servicekey.Label}
-	default:
-		return nil
-	}
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (skdo *ServiceKeyDeleteOne) ExecX(ctx context.Context) {
-	skdo.skd.ExecX(ctx)
-}
diff --git a/pkg/storage/default/ent/servicekey_query.go b/pkg/storage/default/ent/servicekey_query.go
deleted file mode 100644
index e137211..0000000
--- a/pkg/storage/default/ent/servicekey_query.go
+++ /dev/null
@@ -1,613 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"fmt"
-	"math"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// ServiceKeyQuery is the builder for querying ServiceKey entities.
-type ServiceKeyQuery struct {
-	config
-	limit      *int
-	offset     *int
-	unique     *bool
-	order      []OrderFunc
-	fields     []string
-	predicates []predicate.ServiceKey
-	withOwner  *UserQuery
-	withFKs    bool
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Where adds a new predicate for the ServiceKeyQuery builder.
-func (skq *ServiceKeyQuery) Where(ps ...predicate.ServiceKey) *ServiceKeyQuery {
-	skq.predicates = append(skq.predicates, ps...)
-	return skq
-}
-
-// Limit adds a limit step to the query.
-func (skq *ServiceKeyQuery) Limit(limit int) *ServiceKeyQuery {
-	skq.limit = &limit
-	return skq
-}
-
-// Offset adds an offset step to the query.
-func (skq *ServiceKeyQuery) Offset(offset int) *ServiceKeyQuery {
-	skq.offset = &offset
-	return skq
-}
-
-// Unique configures the query builder to filter duplicate records on query.
-// By default, unique is set to true, and can be disabled using this method.
-func (skq *ServiceKeyQuery) Unique(unique bool) *ServiceKeyQuery {
-	skq.unique = &unique
-	return skq
-}
-
-// Order adds an order step to the query.
-func (skq *ServiceKeyQuery) Order(o ...OrderFunc) *ServiceKeyQuery {
-	skq.order = append(skq.order, o...)
-	return skq
-}
-
-// QueryOwner chains the current query on the "owner" edge.
-func (skq *ServiceKeyQuery) QueryOwner() *UserQuery {
-	query := &UserQuery{config: skq.config}
-	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
-		if err := skq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		selector := skq.sqlQuery(ctx)
-		if err := selector.Err(); err != nil {
-			return nil, err
-		}
-		step := sqlgraph.NewStep(
-			sqlgraph.From(servicekey.Table, servicekey.FieldID, selector),
-			sqlgraph.To(user.Table, user.FieldID),
-			sqlgraph.Edge(sqlgraph.M2O, true, servicekey.OwnerTable, servicekey.OwnerColumn),
-		)
-		fromU = sqlgraph.SetNeighbors(skq.driver.Dialect(), step)
-		return fromU, nil
-	}
-	return query
-}
-
-// First returns the first ServiceKey entity from the query.
-// Returns a *NotFoundError when no ServiceKey was found.
-func (skq *ServiceKeyQuery) First(ctx context.Context) (*ServiceKey, error) {
-	nodes, err := skq.Limit(1).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nil, &NotFoundError{servicekey.Label}
-	}
-	return nodes[0], nil
-}
-
-// FirstX is like First, but panics if an error occurs.
-func (skq *ServiceKeyQuery) FirstX(ctx context.Context) *ServiceKey {
-	node, err := skq.First(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return node
-}
-
-// FirstID returns the first ServiceKey ID from the query.
-// Returns a *NotFoundError when no ServiceKey ID was found.
-func (skq *ServiceKeyQuery) FirstID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = skq.Limit(1).IDs(ctx); err != nil {
-		return
-	}
-	if len(ids) == 0 {
-		err = &NotFoundError{servicekey.Label}
-		return
-	}
-	return ids[0], nil
-}
-
-// FirstIDX is like FirstID, but panics if an error occurs.
-func (skq *ServiceKeyQuery) FirstIDX(ctx context.Context) int {
-	id, err := skq.FirstID(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return id
-}
-
-// Only returns a single ServiceKey entity found by the query, ensuring it only returns one.
-// Returns a *NotSingularError when more than one ServiceKey entity is found.
-// Returns a *NotFoundError when no ServiceKey entities are found.
-func (skq *ServiceKeyQuery) Only(ctx context.Context) (*ServiceKey, error) {
-	nodes, err := skq.Limit(2).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	switch len(nodes) {
-	case 1:
-		return nodes[0], nil
-	case 0:
-		return nil, &NotFoundError{servicekey.Label}
-	default:
-		return nil, &NotSingularError{servicekey.Label}
-	}
-}
-
-// OnlyX is like Only, but panics if an error occurs.
-func (skq *ServiceKeyQuery) OnlyX(ctx context.Context) *ServiceKey {
-	node, err := skq.Only(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// OnlyID is like Only, but returns the only ServiceKey ID in the query.
-// Returns a *NotSingularError when more than one ServiceKey ID is found.
-// Returns a *NotFoundError when no entities are found.
-func (skq *ServiceKeyQuery) OnlyID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = skq.Limit(2).IDs(ctx); err != nil {
-		return
-	}
-	switch len(ids) {
-	case 1:
-		id = ids[0]
-	case 0:
-		err = &NotFoundError{servicekey.Label}
-	default:
-		err = &NotSingularError{servicekey.Label}
-	}
-	return
-}
-
-// OnlyIDX is like OnlyID, but panics if an error occurs.
-func (skq *ServiceKeyQuery) OnlyIDX(ctx context.Context) int {
-	id, err := skq.OnlyID(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return id
-}
-
-// All executes the query and returns a list of ServiceKeys.
-func (skq *ServiceKeyQuery) All(ctx context.Context) ([]*ServiceKey, error) {
-	if err := skq.prepareQuery(ctx); err != nil {
-		return nil, err
-	}
-	return skq.sqlAll(ctx)
-}
-
-// AllX is like All, but panics if an error occurs.
-func (skq *ServiceKeyQuery) AllX(ctx context.Context) []*ServiceKey {
-	nodes, err := skq.All(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return nodes
-}
-
-// IDs executes the query and returns a list of ServiceKey IDs.
-func (skq *ServiceKeyQuery) IDs(ctx context.Context) ([]int, error) {
-	var ids []int
-	if err := skq.Select(servicekey.FieldID).Scan(ctx, &ids); err != nil {
-		return nil, err
-	}
-	return ids, nil
-}
-
-// IDsX is like IDs, but panics if an error occurs.
-func (skq *ServiceKeyQuery) IDsX(ctx context.Context) []int {
-	ids, err := skq.IDs(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return ids
-}
-
-// Count returns the count of the given query.
-func (skq *ServiceKeyQuery) Count(ctx context.Context) (int, error) {
-	if err := skq.prepareQuery(ctx); err != nil {
-		return 0, err
-	}
-	return skq.sqlCount(ctx)
-}
-
-// CountX is like Count, but panics if an error occurs.
-func (skq *ServiceKeyQuery) CountX(ctx context.Context) int {
-	count, err := skq.Count(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return count
-}
-
-// Exist returns true if the query has elements in the graph.
-func (skq *ServiceKeyQuery) Exist(ctx context.Context) (bool, error) {
-	if err := skq.prepareQuery(ctx); err != nil {
-		return false, err
-	}
-	return skq.sqlExist(ctx)
-}
-
-// ExistX is like Exist, but panics if an error occurs.
-func (skq *ServiceKeyQuery) ExistX(ctx context.Context) bool {
-	exist, err := skq.Exist(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return exist
-}
-
-// Clone returns a duplicate of the ServiceKeyQuery builder, including all associated steps. It can be
-// used to prepare common query builders and use them differently after the clone is made.
-func (skq *ServiceKeyQuery) Clone() *ServiceKeyQuery {
-	if skq == nil {
-		return nil
-	}
-	return &ServiceKeyQuery{
-		config:     skq.config,
-		limit:      skq.limit,
-		offset:     skq.offset,
-		order:      append([]OrderFunc{}, skq.order...),
-		predicates: append([]predicate.ServiceKey{}, skq.predicates...),
-		withOwner:  skq.withOwner.Clone(),
-		// clone intermediate query.
-		sql:    skq.sql.Clone(),
-		path:   skq.path,
-		unique: skq.unique,
-	}
-}
-
-// WithOwner tells the query-builder to eager-load the nodes that are connected to
-// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
-func (skq *ServiceKeyQuery) WithOwner(opts ...func(*UserQuery)) *ServiceKeyQuery {
-	query := &UserQuery{config: skq.config}
-	for _, opt := range opts {
-		opt(query)
-	}
-	skq.withOwner = query
-	return skq
-}
-
-// GroupBy is used to group vertices by one or more fields/columns.
-// It is often used with aggregate functions, like: count, max, mean, min, sum.
-//
-// Example:
-//
-//	var v []struct {
-//		CreatedAt int64 `json:"created_at,omitempty"`
-//		Count int `json:"count,omitempty"`
-//	}
-//
-//	client.ServiceKey.Query().
-//		GroupBy(servicekey.FieldCreatedAt).
-//		Aggregate(ent.Count()).
-//		Scan(ctx, &v)
-func (skq *ServiceKeyQuery) GroupBy(field string, fields ...string) *ServiceKeyGroupBy {
-	grbuild := &ServiceKeyGroupBy{config: skq.config}
-	grbuild.fields = append([]string{field}, fields...)
-	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
-		if err := skq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		return skq.sqlQuery(ctx), nil
-	}
-	grbuild.label = servicekey.Label
-	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
-	return grbuild
-}
-
-// Select allows the selection one or more fields/columns for the given query,
-// instead of selecting all fields in the entity.
-//
-// Example:
-//
-//	var v []struct {
-//		CreatedAt int64 `json:"created_at,omitempty"`
-//	}
-//
-//	client.ServiceKey.Query().
-//		Select(servicekey.FieldCreatedAt).
-//		Scan(ctx, &v)
-func (skq *ServiceKeyQuery) Select(fields ...string) *ServiceKeySelect {
-	skq.fields = append(skq.fields, fields...)
-	selbuild := &ServiceKeySelect{ServiceKeyQuery: skq}
-	selbuild.label = servicekey.Label
-	selbuild.flds, selbuild.scan = &skq.fields, selbuild.Scan
-	return selbuild
-}
-
-func (skq *ServiceKeyQuery) prepareQuery(ctx context.Context) error {
-	for _, f := range skq.fields {
-		if !servicekey.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-		}
-	}
-	if skq.path != nil {
-		prev, err := skq.path(ctx)
-		if err != nil {
-			return err
-		}
-		skq.sql = prev
-	}
-	return nil
-}
-
-func (skq *ServiceKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ServiceKey, error) {
-	var (
-		nodes       = []*ServiceKey{}
-		withFKs     = skq.withFKs
-		_spec       = skq.querySpec()
-		loadedTypes = [1]bool{
-			skq.withOwner != nil,
-		}
-	)
-	if skq.withOwner != nil {
-		withFKs = true
-	}
-	if withFKs {
-		_spec.Node.Columns = append(_spec.Node.Columns, servicekey.ForeignKeys...)
-	}
-	_spec.ScanValues = func(columns []string) ([]any, error) {
-		return (*ServiceKey).scanValues(nil, columns)
-	}
-	_spec.Assign = func(columns []string, values []any) error {
-		node := &ServiceKey{config: skq.config}
-		nodes = append(nodes, node)
-		node.Edges.loadedTypes = loadedTypes
-		return node.assignValues(columns, values)
-	}
-	for i := range hooks {
-		hooks[i](ctx, _spec)
-	}
-	if err := sqlgraph.QueryNodes(ctx, skq.driver, _spec); err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nodes, nil
-	}
-	if query := skq.withOwner; query != nil {
-		if err := skq.loadOwner(ctx, query, nodes, nil,
-			func(n *ServiceKey, e *User) { n.Edges.Owner = e }); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-func (skq *ServiceKeyQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*ServiceKey, init func(*ServiceKey), assign func(*ServiceKey, *User)) error {
-	ids := make([]int, 0, len(nodes))
-	nodeids := make(map[int][]*ServiceKey)
-	for i := range nodes {
-		if nodes[i].user_servicekeys == nil {
-			continue
-		}
-		fk := *nodes[i].user_servicekeys
-		if _, ok := nodeids[fk]; !ok {
-			ids = append(ids, fk)
-		}
-		nodeids[fk] = append(nodeids[fk], nodes[i])
-	}
-	query.Where(user.IDIn(ids...))
-	neighbors, err := query.All(ctx)
-	if err != nil {
-		return err
-	}
-	for _, n := range neighbors {
-		nodes, ok := nodeids[n.ID]
-		if !ok {
-			return fmt.Errorf(`unexpected foreign-key "user_servicekeys" returned %v`, n.ID)
-		}
-		for i := range nodes {
-			assign(nodes[i], n)
-		}
-	}
-	return nil
-}
-
-func (skq *ServiceKeyQuery) sqlCount(ctx context.Context) (int, error) {
-	_spec := skq.querySpec()
-	_spec.Node.Columns = skq.fields
-	if len(skq.fields) > 0 {
-		_spec.Unique = skq.unique != nil && *skq.unique
-	}
-	return sqlgraph.CountNodes(ctx, skq.driver, _spec)
-}
-
-func (skq *ServiceKeyQuery) sqlExist(ctx context.Context) (bool, error) {
-	switch _, err := skq.FirstID(ctx); {
-	case IsNotFound(err):
-		return false, nil
-	case err != nil:
-		return false, fmt.Errorf("ent: check existence: %w", err)
-	default:
-		return true, nil
-	}
-}
-
-func (skq *ServiceKeyQuery) querySpec() *sqlgraph.QuerySpec {
-	_spec := &sqlgraph.QuerySpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   servicekey.Table,
-			Columns: servicekey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: servicekey.FieldID,
-			},
-		},
-		From:   skq.sql,
-		Unique: true,
-	}
-	if unique := skq.unique; unique != nil {
-		_spec.Unique = *unique
-	}
-	if fields := skq.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, servicekey.FieldID)
-		for i := range fields {
-			if fields[i] != servicekey.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
-			}
-		}
-	}
-	if ps := skq.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if limit := skq.limit; limit != nil {
-		_spec.Limit = *limit
-	}
-	if offset := skq.offset; offset != nil {
-		_spec.Offset = *offset
-	}
-	if ps := skq.order; len(ps) > 0 {
-		_spec.Order = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	return _spec
-}
-
-func (skq *ServiceKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
-	builder := sql.Dialect(skq.driver.Dialect())
-	t1 := builder.Table(servicekey.Table)
-	columns := skq.fields
-	if len(columns) == 0 {
-		columns = servicekey.Columns
-	}
-	selector := builder.Select(t1.Columns(columns...)...).From(t1)
-	if skq.sql != nil {
-		selector = skq.sql
-		selector.Select(selector.Columns(columns...)...)
-	}
-	if skq.unique != nil && *skq.unique {
-		selector.Distinct()
-	}
-	for _, p := range skq.predicates {
-		p(selector)
-	}
-	for _, p := range skq.order {
-		p(selector)
-	}
-	if offset := skq.offset; offset != nil {
-		// limit is mandatory for offset clause. We start
-		// with default value, and override it below if needed.
-		selector.Offset(*offset).Limit(math.MaxInt32)
-	}
-	if limit := skq.limit; limit != nil {
-		selector.Limit(*limit)
-	}
-	return selector
-}
-
-// ServiceKeyGroupBy is the group-by builder for ServiceKey entities.
-type ServiceKeyGroupBy struct {
-	config
-	selector
-	fields []string
-	fns    []AggregateFunc
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Aggregate adds the given aggregation functions to the group-by query.
-func (skgb *ServiceKeyGroupBy) Aggregate(fns ...AggregateFunc) *ServiceKeyGroupBy {
-	skgb.fns = append(skgb.fns, fns...)
-	return skgb
-}
-
-// Scan applies the group-by query and scans the result into the given value.
-func (skgb *ServiceKeyGroupBy) Scan(ctx context.Context, v any) error {
-	query, err := skgb.path(ctx)
-	if err != nil {
-		return err
-	}
-	skgb.sql = query
-	return skgb.sqlScan(ctx, v)
-}
-
-func (skgb *ServiceKeyGroupBy) sqlScan(ctx context.Context, v any) error {
-	for _, f := range skgb.fields {
-		if !servicekey.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
-		}
-	}
-	selector := skgb.sqlQuery()
-	if err := selector.Err(); err != nil {
-		return err
-	}
-	rows := &sql.Rows{}
-	query, args := selector.Query()
-	if err := skgb.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
-
-func (skgb *ServiceKeyGroupBy) sqlQuery() *sql.Selector {
-	selector := skgb.sql.Select()
-	aggregation := make([]string, 0, len(skgb.fns))
-	for _, fn := range skgb.fns {
-		aggregation = append(aggregation, fn(selector))
-	}
-	// If no columns were selected in a custom aggregation function, the default
-	// selection is the fields used for "group-by", and the aggregation functions.
-	if len(selector.SelectedColumns()) == 0 {
-		columns := make([]string, 0, len(skgb.fields)+len(skgb.fns))
-		for _, f := range skgb.fields {
-			columns = append(columns, selector.C(f))
-		}
-		columns = append(columns, aggregation...)
-		selector.Select(columns...)
-	}
-	return selector.GroupBy(selector.Columns(skgb.fields...)...)
-}
-
-// ServiceKeySelect is the builder for selecting fields of ServiceKey entities.
-type ServiceKeySelect struct {
-	*ServiceKeyQuery
-	selector
-	// intermediate query (i.e. traversal path).
-	sql *sql.Selector
-}
-
-// Scan applies the selector query and scans the result into the given value.
-func (sks *ServiceKeySelect) Scan(ctx context.Context, v any) error {
-	if err := sks.prepareQuery(ctx); err != nil {
-		return err
-	}
-	sks.sql = sks.ServiceKeyQuery.sqlQuery(ctx)
-	return sks.sqlScan(ctx, v)
-}
-
-func (sks *ServiceKeySelect) sqlScan(ctx context.Context, v any) error {
-	rows := &sql.Rows{}
-	query, args := sks.sql.Query()
-	if err := sks.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
diff --git a/pkg/storage/default/ent/servicekey_update.go b/pkg/storage/default/ent/servicekey_update.go
deleted file mode 100644
index 2e51b9c..0000000
--- a/pkg/storage/default/ent/servicekey_update.go
+++ /dev/null
@@ -1,618 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// ServiceKeyUpdate is the builder for updating ServiceKey entities.
-type ServiceKeyUpdate struct {
-	config
-	hooks    []Hook
-	mutation *ServiceKeyMutation
-}
-
-// Where appends a list predicates to the ServiceKeyUpdate builder.
-func (sku *ServiceKeyUpdate) Where(ps ...predicate.ServiceKey) *ServiceKeyUpdate {
-	sku.mutation.Where(ps...)
-	return sku
-}
-
-// SetResource sets the "resource" field.
-func (sku *ServiceKeyUpdate) SetResource(s string) *ServiceKeyUpdate {
-	sku.mutation.SetResource(s)
-	return sku
-}
-
-// SetNillableResource sets the "resource" field if the given value is not nil.
-func (sku *ServiceKeyUpdate) SetNillableResource(s *string) *ServiceKeyUpdate {
-	if s != nil {
-		sku.SetResource(*s)
-	}
-	return sku
-}
-
-// SetNumUsed sets the "num_used" field.
-func (sku *ServiceKeyUpdate) SetNumUsed(i int64) *ServiceKeyUpdate {
-	sku.mutation.ResetNumUsed()
-	sku.mutation.SetNumUsed(i)
-	return sku
-}
-
-// SetNillableNumUsed sets the "num_used" field if the given value is not nil.
-func (sku *ServiceKeyUpdate) SetNillableNumUsed(i *int64) *ServiceKeyUpdate {
-	if i != nil {
-		sku.SetNumUsed(*i)
-	}
-	return sku
-}
-
-// AddNumUsed adds i to the "num_used" field.
-func (sku *ServiceKeyUpdate) AddNumUsed(i int64) *ServiceKeyUpdate {
-	sku.mutation.AddNumUsed(i)
-	return sku
-}
-
-// SetMaxUses sets the "max_uses" field.
-func (sku *ServiceKeyUpdate) SetMaxUses(i int64) *ServiceKeyUpdate {
-	sku.mutation.ResetMaxUses()
-	sku.mutation.SetMaxUses(i)
-	return sku
-}
-
-// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
-func (sku *ServiceKeyUpdate) SetNillableMaxUses(i *int64) *ServiceKeyUpdate {
-	if i != nil {
-		sku.SetMaxUses(*i)
-	}
-	return sku
-}
-
-// AddMaxUses adds i to the "max_uses" field.
-func (sku *ServiceKeyUpdate) AddMaxUses(i int64) *ServiceKeyUpdate {
-	sku.mutation.AddMaxUses(i)
-	return sku
-}
-
-// SetExpires sets the "expires" field.
-func (sku *ServiceKeyUpdate) SetExpires(i int64) *ServiceKeyUpdate {
-	sku.mutation.ResetExpires()
-	sku.mutation.SetExpires(i)
-	return sku
-}
-
-// SetNillableExpires sets the "expires" field if the given value is not nil.
-func (sku *ServiceKeyUpdate) SetNillableExpires(i *int64) *ServiceKeyUpdate {
-	if i != nil {
-		sku.SetExpires(*i)
-	}
-	return sku
-}
-
-// AddExpires adds i to the "expires" field.
-func (sku *ServiceKeyUpdate) AddExpires(i int64) *ServiceKeyUpdate {
-	sku.mutation.AddExpires(i)
-	return sku
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (sku *ServiceKeyUpdate) SetOwnerID(id int) *ServiceKeyUpdate {
-	sku.mutation.SetOwnerID(id)
-	return sku
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (sku *ServiceKeyUpdate) SetNillableOwnerID(id *int) *ServiceKeyUpdate {
-	if id != nil {
-		sku = sku.SetOwnerID(*id)
-	}
-	return sku
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (sku *ServiceKeyUpdate) SetOwner(u *User) *ServiceKeyUpdate {
-	return sku.SetOwnerID(u.ID)
-}
-
-// Mutation returns the ServiceKeyMutation object of the builder.
-func (sku *ServiceKeyUpdate) Mutation() *ServiceKeyMutation {
-	return sku.mutation
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (sku *ServiceKeyUpdate) ClearOwner() *ServiceKeyUpdate {
-	sku.mutation.ClearOwner()
-	return sku
-}
-
-// Save executes the query and returns the number of nodes affected by the update operation.
-func (sku *ServiceKeyUpdate) Save(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(sku.hooks) == 0 {
-		affected, err = sku.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*ServiceKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			sku.mutation = mutation
-			affected, err = sku.sqlSave(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(sku.hooks) - 1; i >= 0; i-- {
-			if sku.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = sku.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, sku.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (sku *ServiceKeyUpdate) SaveX(ctx context.Context) int {
-	affected, err := sku.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return affected
-}
-
-// Exec executes the query.
-func (sku *ServiceKeyUpdate) Exec(ctx context.Context) error {
-	_, err := sku.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (sku *ServiceKeyUpdate) ExecX(ctx context.Context) {
-	if err := sku.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (sku *ServiceKeyUpdate) sqlSave(ctx context.Context) (n int, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   servicekey.Table,
-			Columns: servicekey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: servicekey.FieldID,
-			},
-		},
-	}
-	if ps := sku.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if value, ok := sku.mutation.Resource(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: servicekey.FieldResource,
-		})
-	}
-	if value, ok := sku.mutation.NumUsed(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldNumUsed,
-		})
-	}
-	if value, ok := sku.mutation.AddedNumUsed(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldNumUsed,
-		})
-	}
-	if value, ok := sku.mutation.MaxUses(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldMaxUses,
-		})
-	}
-	if value, ok := sku.mutation.AddedMaxUses(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldMaxUses,
-		})
-	}
-	if value, ok := sku.mutation.Expires(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldExpires,
-		})
-	}
-	if value, ok := sku.mutation.AddedExpires(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldExpires,
-		})
-	}
-	if sku.mutation.OwnerCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   servicekey.OwnerTable,
-			Columns: []string{servicekey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := sku.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   servicekey.OwnerTable,
-			Columns: []string{servicekey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	if n, err = sqlgraph.UpdateNodes(ctx, sku.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{servicekey.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return 0, err
-	}
-	return n, nil
-}
-
-// ServiceKeyUpdateOne is the builder for updating a single ServiceKey entity.
-type ServiceKeyUpdateOne struct {
-	config
-	fields   []string
-	hooks    []Hook
-	mutation *ServiceKeyMutation
-}
-
-// SetResource sets the "resource" field.
-func (skuo *ServiceKeyUpdateOne) SetResource(s string) *ServiceKeyUpdateOne {
-	skuo.mutation.SetResource(s)
-	return skuo
-}
-
-// SetNillableResource sets the "resource" field if the given value is not nil.
-func (skuo *ServiceKeyUpdateOne) SetNillableResource(s *string) *ServiceKeyUpdateOne {
-	if s != nil {
-		skuo.SetResource(*s)
-	}
-	return skuo
-}
-
-// SetNumUsed sets the "num_used" field.
-func (skuo *ServiceKeyUpdateOne) SetNumUsed(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.ResetNumUsed()
-	skuo.mutation.SetNumUsed(i)
-	return skuo
-}
-
-// SetNillableNumUsed sets the "num_used" field if the given value is not nil.
-func (skuo *ServiceKeyUpdateOne) SetNillableNumUsed(i *int64) *ServiceKeyUpdateOne {
-	if i != nil {
-		skuo.SetNumUsed(*i)
-	}
-	return skuo
-}
-
-// AddNumUsed adds i to the "num_used" field.
-func (skuo *ServiceKeyUpdateOne) AddNumUsed(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.AddNumUsed(i)
-	return skuo
-}
-
-// SetMaxUses sets the "max_uses" field.
-func (skuo *ServiceKeyUpdateOne) SetMaxUses(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.ResetMaxUses()
-	skuo.mutation.SetMaxUses(i)
-	return skuo
-}
-
-// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
-func (skuo *ServiceKeyUpdateOne) SetNillableMaxUses(i *int64) *ServiceKeyUpdateOne {
-	if i != nil {
-		skuo.SetMaxUses(*i)
-	}
-	return skuo
-}
-
-// AddMaxUses adds i to the "max_uses" field.
-func (skuo *ServiceKeyUpdateOne) AddMaxUses(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.AddMaxUses(i)
-	return skuo
-}
-
-// SetExpires sets the "expires" field.
-func (skuo *ServiceKeyUpdateOne) SetExpires(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.ResetExpires()
-	skuo.mutation.SetExpires(i)
-	return skuo
-}
-
-// SetNillableExpires sets the "expires" field if the given value is not nil.
-func (skuo *ServiceKeyUpdateOne) SetNillableExpires(i *int64) *ServiceKeyUpdateOne {
-	if i != nil {
-		skuo.SetExpires(*i)
-	}
-	return skuo
-}
-
-// AddExpires adds i to the "expires" field.
-func (skuo *ServiceKeyUpdateOne) AddExpires(i int64) *ServiceKeyUpdateOne {
-	skuo.mutation.AddExpires(i)
-	return skuo
-}
-
-// SetOwnerID sets the "owner" edge to the User entity by ID.
-func (skuo *ServiceKeyUpdateOne) SetOwnerID(id int) *ServiceKeyUpdateOne {
-	skuo.mutation.SetOwnerID(id)
-	return skuo
-}
-
-// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil.
-func (skuo *ServiceKeyUpdateOne) SetNillableOwnerID(id *int) *ServiceKeyUpdateOne {
-	if id != nil {
-		skuo = skuo.SetOwnerID(*id)
-	}
-	return skuo
-}
-
-// SetOwner sets the "owner" edge to the User entity.
-func (skuo *ServiceKeyUpdateOne) SetOwner(u *User) *ServiceKeyUpdateOne {
-	return skuo.SetOwnerID(u.ID)
-}
-
-// Mutation returns the ServiceKeyMutation object of the builder.
-func (skuo *ServiceKeyUpdateOne) Mutation() *ServiceKeyMutation {
-	return skuo.mutation
-}
-
-// ClearOwner clears the "owner" edge to the User entity.
-func (skuo *ServiceKeyUpdateOne) ClearOwner() *ServiceKeyUpdateOne {
-	skuo.mutation.ClearOwner()
-	return skuo
-}
-
-// Select allows selecting one or more fields (columns) of the returned entity.
-// The default is selecting all fields defined in the entity schema.
-func (skuo *ServiceKeyUpdateOne) Select(field string, fields ...string) *ServiceKeyUpdateOne {
-	skuo.fields = append([]string{field}, fields...)
-	return skuo
-}
-
-// Save executes the query and returns the updated ServiceKey entity.
-func (skuo *ServiceKeyUpdateOne) Save(ctx context.Context) (*ServiceKey, error) {
-	var (
-		err  error
-		node *ServiceKey
-	)
-	if len(skuo.hooks) == 0 {
-		node, err = skuo.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*ServiceKeyMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			skuo.mutation = mutation
-			node, err = skuo.sqlSave(ctx)
-			mutation.done = true
-			return node, err
-		})
-		for i := len(skuo.hooks) - 1; i >= 0; i-- {
-			if skuo.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = skuo.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, skuo.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*ServiceKey)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from ServiceKeyMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (skuo *ServiceKeyUpdateOne) SaveX(ctx context.Context) *ServiceKey {
-	node, err := skuo.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// Exec executes the query on the entity.
-func (skuo *ServiceKeyUpdateOne) Exec(ctx context.Context) error {
-	_, err := skuo.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (skuo *ServiceKeyUpdateOne) ExecX(ctx context.Context) {
-	if err := skuo.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (skuo *ServiceKeyUpdateOne) sqlSave(ctx context.Context) (_node *ServiceKey, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   servicekey.Table,
-			Columns: servicekey.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: servicekey.FieldID,
-			},
-		},
-	}
-	id, ok := skuo.mutation.ID()
-	if !ok {
-		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ServiceKey.id" for update`)}
-	}
-	_spec.Node.ID.Value = id
-	if fields := skuo.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, servicekey.FieldID)
-		for _, f := range fields {
-			if !servicekey.ValidColumn(f) {
-				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-			}
-			if f != servicekey.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, f)
-			}
-		}
-	}
-	if ps := skuo.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if value, ok := skuo.mutation.Resource(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: servicekey.FieldResource,
-		})
-	}
-	if value, ok := skuo.mutation.NumUsed(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldNumUsed,
-		})
-	}
-	if value, ok := skuo.mutation.AddedNumUsed(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldNumUsed,
-		})
-	}
-	if value, ok := skuo.mutation.MaxUses(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldMaxUses,
-		})
-	}
-	if value, ok := skuo.mutation.AddedMaxUses(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldMaxUses,
-		})
-	}
-	if value, ok := skuo.mutation.Expires(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldExpires,
-		})
-	}
-	if value, ok := skuo.mutation.AddedExpires(); ok {
-		_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
-			Type:   field.TypeInt64,
-			Value:  value,
-			Column: servicekey.FieldExpires,
-		})
-	}
-	if skuo.mutation.OwnerCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   servicekey.OwnerTable,
-			Columns: []string{servicekey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := skuo.mutation.OwnerIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.M2O,
-			Inverse: true,
-			Table:   servicekey.OwnerTable,
-			Columns: []string{servicekey.OwnerColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: user.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	_node = &ServiceKey{config: skuo.config}
-	_spec.Assign = _node.assignValues
-	_spec.ScanValues = _node.scanValues
-	if err = sqlgraph.UpdateNode(ctx, skuo.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{servicekey.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	return _node, nil
-}
diff --git a/pkg/storage/default/ent/user.go b/pkg/storage/default/ent/user.go
deleted file mode 100644
index 78f3e92..0000000
--- a/pkg/storage/default/ent/user.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"fmt"
-	"strings"
-	"time"
-
-	"entgo.io/ent/dialect/sql"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// User is the model entity for the User schema.
-type User struct {
-	config `json:"-"`
-	// ID of the ent.
-	ID int `json:"id,omitempty"`
-	// Username holds the value of the "username" field.
-	Username string `json:"username,omitempty"`
-	// CreatedAt holds the value of the "created_at" field.
-	CreatedAt time.Time `json:"created_at,omitempty"`
-	// Edges holds the relations/edges for other nodes in the graph.
-	// The values are being populated by the UserQuery when eager-loading is set.
-	Edges UserEdges `json:"edges"`
-}
-
-// UserEdges holds the relations/edges for other nodes in the graph.
-type UserEdges struct {
-	// Servicekeys holds the value of the servicekeys edge.
-	Servicekeys []*ServiceKey `json:"servicekeys,omitempty"`
-	// Apikeys holds the value of the apikeys edge.
-	Apikeys []*APIKey `json:"apikeys,omitempty"`
-	// loadedTypes holds the information for reporting if a
-	// type was loaded (or requested) in eager-loading or not.
-	loadedTypes [2]bool
-}
-
-// ServicekeysOrErr returns the Servicekeys value or an error if the edge
-// was not loaded in eager-loading.
-func (e UserEdges) ServicekeysOrErr() ([]*ServiceKey, error) {
-	if e.loadedTypes[0] {
-		return e.Servicekeys, nil
-	}
-	return nil, &NotLoadedError{edge: "servicekeys"}
-}
-
-// ApikeysOrErr returns the Apikeys value or an error if the edge
-// was not loaded in eager-loading.
-func (e UserEdges) ApikeysOrErr() ([]*APIKey, error) {
-	if e.loadedTypes[1] {
-		return e.Apikeys, nil
-	}
-	return nil, &NotLoadedError{edge: "apikeys"}
-}
-
-// scanValues returns the types for scanning values from sql.Rows.
-func (*User) scanValues(columns []string) ([]any, error) {
-	values := make([]any, len(columns))
-	for i := range columns {
-		switch columns[i] {
-		case user.FieldID:
-			values[i] = new(sql.NullInt64)
-		case user.FieldUsername:
-			values[i] = new(sql.NullString)
-		case user.FieldCreatedAt:
-			values[i] = new(sql.NullTime)
-		default:
-			return nil, fmt.Errorf("unexpected column %q for type User", columns[i])
-		}
-	}
-	return values, nil
-}
-
-// assignValues assigns the values that were returned from sql.Rows (after scanning)
-// to the User fields.
-func (u *User) assignValues(columns []string, values []any) error {
-	if m, n := len(values), len(columns); m < n {
-		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
-	}
-	for i := range columns {
-		switch columns[i] {
-		case user.FieldID:
-			value, ok := values[i].(*sql.NullInt64)
-			if !ok {
-				return fmt.Errorf("unexpected type %T for field id", value)
-			}
-			u.ID = int(value.Int64)
-		case user.FieldUsername:
-			if value, ok := values[i].(*sql.NullString); !ok {
-				return fmt.Errorf("unexpected type %T for field username", values[i])
-			} else if value.Valid {
-				u.Username = value.String
-			}
-		case user.FieldCreatedAt:
-			if value, ok := values[i].(*sql.NullTime); !ok {
-				return fmt.Errorf("unexpected type %T for field created_at", values[i])
-			} else if value.Valid {
-				u.CreatedAt = value.Time
-			}
-		}
-	}
-	return nil
-}
-
-// QueryServicekeys queries the "servicekeys" edge of the User entity.
-func (u *User) QueryServicekeys() *ServiceKeyQuery {
-	return (&UserClient{config: u.config}).QueryServicekeys(u)
-}
-
-// QueryApikeys queries the "apikeys" edge of the User entity.
-func (u *User) QueryApikeys() *APIKeyQuery {
-	return (&UserClient{config: u.config}).QueryApikeys(u)
-}
-
-// Update returns a builder for updating this User.
-// Note that you need to call User.Unwrap() before calling this method if this User
-// was returned from a transaction, and the transaction was committed or rolled back.
-func (u *User) Update() *UserUpdateOne {
-	return (&UserClient{config: u.config}).UpdateOne(u)
-}
-
-// Unwrap unwraps the User entity that was returned from a transaction after it was closed,
-// so that all future queries will be executed through the driver which created the transaction.
-func (u *User) Unwrap() *User {
-	_tx, ok := u.config.driver.(*txDriver)
-	if !ok {
-		panic("ent: User is not a transactional entity")
-	}
-	u.config.driver = _tx.drv
-	return u
-}
-
-// String implements the fmt.Stringer.
-func (u *User) String() string {
-	var builder strings.Builder
-	builder.WriteString("User(")
-	builder.WriteString(fmt.Sprintf("id=%v, ", u.ID))
-	builder.WriteString("username=")
-	builder.WriteString(u.Username)
-	builder.WriteString(", ")
-	builder.WriteString("created_at=")
-	builder.WriteString(u.CreatedAt.Format(time.ANSIC))
-	builder.WriteByte(')')
-	return builder.String()
-}
-
-// Users is a parsable slice of User.
-type Users []*User
-
-func (u Users) config(cfg config) {
-	for _i := range u {
-		u[_i].config = cfg
-	}
-}
diff --git a/pkg/storage/default/ent/user/user.go b/pkg/storage/default/ent/user/user.go
deleted file mode 100644
index 2413474..0000000
--- a/pkg/storage/default/ent/user/user.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package user
-
-import (
-	"time"
-)
-
-const (
-	// Label holds the string label denoting the user type in the database.
-	Label = "user"
-	// FieldID holds the string denoting the id field in the database.
-	FieldID = "id"
-	// FieldUsername holds the string denoting the username field in the database.
-	FieldUsername = "username"
-	// FieldCreatedAt holds the string denoting the created_at field in the database.
-	FieldCreatedAt = "created_at"
-	// EdgeServicekeys holds the string denoting the servicekeys edge name in mutations.
-	EdgeServicekeys = "servicekeys"
-	// EdgeApikeys holds the string denoting the apikeys edge name in mutations.
-	EdgeApikeys = "apikeys"
-	// Table holds the table name of the user in the database.
-	Table = "users"
-	// ServicekeysTable is the table that holds the servicekeys relation/edge.
-	ServicekeysTable = "service_keys"
-	// ServicekeysInverseTable is the table name for the ServiceKey entity.
-	// It exists in this package in order to avoid circular dependency with the "servicekey" package.
-	ServicekeysInverseTable = "service_keys"
-	// ServicekeysColumn is the table column denoting the servicekeys relation/edge.
-	ServicekeysColumn = "user_servicekeys"
-	// ApikeysTable is the table that holds the apikeys relation/edge.
-	ApikeysTable = "api_keys"
-	// ApikeysInverseTable is the table name for the APIKey entity.
-	// It exists in this package in order to avoid circular dependency with the "apikey" package.
-	ApikeysInverseTable = "api_keys"
-	// ApikeysColumn is the table column denoting the apikeys relation/edge.
-	ApikeysColumn = "user_apikeys"
-)
-
-// Columns holds all SQL columns for user fields.
-var Columns = []string{
-	FieldID,
-	FieldUsername,
-	FieldCreatedAt,
-}
-
-// ValidColumn reports if the column name is valid (part of the table columns).
-func ValidColumn(column string) bool {
-	for i := range Columns {
-		if column == Columns[i] {
-			return true
-		}
-	}
-	return false
-}
-
-var (
-	// DefaultUsername holds the default value on creation for the "username" field.
-	DefaultUsername string
-	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
-	DefaultCreatedAt func() time.Time
-)
diff --git a/pkg/storage/default/ent/user/where.go b/pkg/storage/default/ent/user/where.go
deleted file mode 100644
index 6935321..0000000
--- a/pkg/storage/default/ent/user/where.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package user
-
-import (
-	"time"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-)
-
-// ID filters vertices based on their ID field.
-func ID(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDEQ applies the EQ predicate on the ID field.
-func IDEQ(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldID), id))
-	})
-}
-
-// IDNEQ applies the NEQ predicate on the ID field.
-func IDNEQ(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldID), id))
-	})
-}
-
-// IDIn applies the In predicate on the ID field.
-func IDIn(ids ...int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.In(s.C(FieldID), v...))
-	})
-}
-
-// IDNotIn applies the NotIn predicate on the ID field.
-func IDNotIn(ids ...int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		v := make([]any, len(ids))
-		for i := range v {
-			v[i] = ids[i]
-		}
-		s.Where(sql.NotIn(s.C(FieldID), v...))
-	})
-}
-
-// IDGT applies the GT predicate on the ID field.
-func IDGT(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldID), id))
-	})
-}
-
-// IDGTE applies the GTE predicate on the ID field.
-func IDGTE(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldID), id))
-	})
-}
-
-// IDLT applies the LT predicate on the ID field.
-func IDLT(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldID), id))
-	})
-}
-
-// IDLTE applies the LTE predicate on the ID field.
-func IDLTE(id int) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldID), id))
-	})
-}
-
-// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ.
-func Username(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldUsername), v))
-	})
-}
-
-// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
-func CreatedAt(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// UsernameEQ applies the EQ predicate on the "username" field.
-func UsernameEQ(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameNEQ applies the NEQ predicate on the "username" field.
-func UsernameNEQ(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameIn applies the In predicate on the "username" field.
-func UsernameIn(vs ...string) predicate.User {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldUsername), v...))
-	})
-}
-
-// UsernameNotIn applies the NotIn predicate on the "username" field.
-func UsernameNotIn(vs ...string) predicate.User {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldUsername), v...))
-	})
-}
-
-// UsernameGT applies the GT predicate on the "username" field.
-func UsernameGT(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameGTE applies the GTE predicate on the "username" field.
-func UsernameGTE(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameLT applies the LT predicate on the "username" field.
-func UsernameLT(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameLTE applies the LTE predicate on the "username" field.
-func UsernameLTE(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameContains applies the Contains predicate on the "username" field.
-func UsernameContains(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.Contains(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameHasPrefix applies the HasPrefix predicate on the "username" field.
-func UsernameHasPrefix(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.HasPrefix(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameHasSuffix applies the HasSuffix predicate on the "username" field.
-func UsernameHasSuffix(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.HasSuffix(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameEqualFold applies the EqualFold predicate on the "username" field.
-func UsernameEqualFold(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EqualFold(s.C(FieldUsername), v))
-	})
-}
-
-// UsernameContainsFold applies the ContainsFold predicate on the "username" field.
-func UsernameContainsFold(v string) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.ContainsFold(s.C(FieldUsername), v))
-	})
-}
-
-// CreatedAtEQ applies the EQ predicate on the "created_at" field.
-func CreatedAtEQ(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
-func CreatedAtNEQ(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtIn applies the In predicate on the "created_at" field.
-func CreatedAtIn(vs ...time.Time) predicate.User {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.In(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
-func CreatedAtNotIn(vs ...time.Time) predicate.User {
-	v := make([]any, len(vs))
-	for i := range v {
-		v[i] = vs[i]
-	}
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
-	})
-}
-
-// CreatedAtGT applies the GT predicate on the "created_at" field.
-func CreatedAtGT(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtGTE applies the GTE predicate on the "created_at" field.
-func CreatedAtGTE(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLT applies the LT predicate on the "created_at" field.
-func CreatedAtLT(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LT(s.C(FieldCreatedAt), v))
-	})
-}
-
-// CreatedAtLTE applies the LTE predicate on the "created_at" field.
-func CreatedAtLTE(v time.Time) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
-	})
-}
-
-// HasServicekeys applies the HasEdge predicate on the "servicekeys" edge.
-func HasServicekeys() predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(ServicekeysTable, FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, ServicekeysTable, ServicekeysColumn),
-		)
-		sqlgraph.HasNeighbors(s, step)
-	})
-}
-
-// HasServicekeysWith applies the HasEdge predicate on the "servicekeys" edge with a given conditions (other predicates).
-func HasServicekeysWith(preds ...predicate.ServiceKey) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(ServicekeysInverseTable, FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, ServicekeysTable, ServicekeysColumn),
-		)
-		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
-			for _, p := range preds {
-				p(s)
-			}
-		})
-	})
-}
-
-// HasApikeys applies the HasEdge predicate on the "apikeys" edge.
-func HasApikeys() predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(ApikeysTable, FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, ApikeysTable, ApikeysColumn),
-		)
-		sqlgraph.HasNeighbors(s, step)
-	})
-}
-
-// HasApikeysWith applies the HasEdge predicate on the "apikeys" edge with a given conditions (other predicates).
-func HasApikeysWith(preds ...predicate.APIKey) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		step := sqlgraph.NewStep(
-			sqlgraph.From(Table, FieldID),
-			sqlgraph.To(ApikeysInverseTable, FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, ApikeysTable, ApikeysColumn),
-		)
-		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
-			for _, p := range preds {
-				p(s)
-			}
-		})
-	})
-}
-
-// And groups predicates with the AND operator between them.
-func And(predicates ...predicate.User) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for _, p := range predicates {
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Or groups predicates with the OR operator between them.
-func Or(predicates ...predicate.User) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		s1 := s.Clone().SetP(nil)
-		for i, p := range predicates {
-			if i > 0 {
-				s1.Or()
-			}
-			p(s1)
-		}
-		s.Where(s1.P())
-	})
-}
-
-// Not applies the not operator on the given predicate.
-func Not(p predicate.User) predicate.User {
-	return predicate.User(func(s *sql.Selector) {
-		p(s.Not())
-	})
-}
diff --git a/pkg/storage/default/ent/user_create.go b/pkg/storage/default/ent/user_create.go
deleted file mode 100644
index d7c2858..0000000
--- a/pkg/storage/default/ent/user_create.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"time"
-
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// UserCreate is the builder for creating a User entity.
-type UserCreate struct {
-	config
-	mutation *UserMutation
-	hooks    []Hook
-}
-
-// SetUsername sets the "username" field.
-func (uc *UserCreate) SetUsername(s string) *UserCreate {
-	uc.mutation.SetUsername(s)
-	return uc
-}
-
-// SetNillableUsername sets the "username" field if the given value is not nil.
-func (uc *UserCreate) SetNillableUsername(s *string) *UserCreate {
-	if s != nil {
-		uc.SetUsername(*s)
-	}
-	return uc
-}
-
-// SetCreatedAt sets the "created_at" field.
-func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate {
-	uc.mutation.SetCreatedAt(t)
-	return uc
-}
-
-// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
-func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate {
-	if t != nil {
-		uc.SetCreatedAt(*t)
-	}
-	return uc
-}
-
-// AddServicekeyIDs adds the "servicekeys" edge to the ServiceKey entity by IDs.
-func (uc *UserCreate) AddServicekeyIDs(ids ...int) *UserCreate {
-	uc.mutation.AddServicekeyIDs(ids...)
-	return uc
-}
-
-// AddServicekeys adds the "servicekeys" edges to the ServiceKey entity.
-func (uc *UserCreate) AddServicekeys(s ...*ServiceKey) *UserCreate {
-	ids := make([]int, len(s))
-	for i := range s {
-		ids[i] = s[i].ID
-	}
-	return uc.AddServicekeyIDs(ids...)
-}
-
-// AddApikeyIDs adds the "apikeys" edge to the APIKey entity by IDs.
-func (uc *UserCreate) AddApikeyIDs(ids ...int) *UserCreate {
-	uc.mutation.AddApikeyIDs(ids...)
-	return uc
-}
-
-// AddApikeys adds the "apikeys" edges to the APIKey entity.
-func (uc *UserCreate) AddApikeys(a ...*APIKey) *UserCreate {
-	ids := make([]int, len(a))
-	for i := range a {
-		ids[i] = a[i].ID
-	}
-	return uc.AddApikeyIDs(ids...)
-}
-
-// Mutation returns the UserMutation object of the builder.
-func (uc *UserCreate) Mutation() *UserMutation {
-	return uc.mutation
-}
-
-// Save creates the User in the database.
-func (uc *UserCreate) Save(ctx context.Context) (*User, error) {
-	var (
-		err  error
-		node *User
-	)
-	uc.defaults()
-	if len(uc.hooks) == 0 {
-		if err = uc.check(); err != nil {
-			return nil, err
-		}
-		node, err = uc.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*UserMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			if err = uc.check(); err != nil {
-				return nil, err
-			}
-			uc.mutation = mutation
-			if node, err = uc.sqlSave(ctx); err != nil {
-				return nil, err
-			}
-			mutation.id = &node.ID
-			mutation.done = true
-			return node, err
-		})
-		for i := len(uc.hooks) - 1; i >= 0; i-- {
-			if uc.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = uc.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, uc.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*User)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from UserMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX calls Save and panics if Save returns an error.
-func (uc *UserCreate) SaveX(ctx context.Context) *User {
-	v, err := uc.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (uc *UserCreate) Exec(ctx context.Context) error {
-	_, err := uc.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (uc *UserCreate) ExecX(ctx context.Context) {
-	if err := uc.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-// defaults sets the default values of the builder before save.
-func (uc *UserCreate) defaults() {
-	if _, ok := uc.mutation.Username(); !ok {
-		v := user.DefaultUsername
-		uc.mutation.SetUsername(v)
-	}
-	if _, ok := uc.mutation.CreatedAt(); !ok {
-		v := user.DefaultCreatedAt()
-		uc.mutation.SetCreatedAt(v)
-	}
-}
-
-// check runs all checks and user-defined validators on the builder.
-func (uc *UserCreate) check() error {
-	if _, ok := uc.mutation.Username(); !ok {
-		return &ValidationError{Name: "username", err: errors.New(`ent: missing required field "User.username"`)}
-	}
-	if _, ok := uc.mutation.CreatedAt(); !ok {
-		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)}
-	}
-	return nil
-}
-
-func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) {
-	_node, _spec := uc.createSpec()
-	if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil {
-		if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	id := _spec.ID.Value.(int64)
-	_node.ID = int(id)
-	return _node, nil
-}
-
-func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
-	var (
-		_node = &User{config: uc.config}
-		_spec = &sqlgraph.CreateSpec{
-			Table: user.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: user.FieldID,
-			},
-		}
-	)
-	if value, ok := uc.mutation.Username(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: user.FieldUsername,
-		})
-		_node.Username = value
-	}
-	if value, ok := uc.mutation.CreatedAt(); ok {
-		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
-			Type:   field.TypeTime,
-			Value:  value,
-			Column: user.FieldCreatedAt,
-		})
-		_node.CreatedAt = value
-	}
-	if nodes := uc.mutation.ServicekeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges = append(_spec.Edges, edge)
-	}
-	if nodes := uc.mutation.ApikeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges = append(_spec.Edges, edge)
-	}
-	return _node, _spec
-}
-
-// UserCreateBulk is the builder for creating many User entities in bulk.
-type UserCreateBulk struct {
-	config
-	builders []*UserCreate
-}
-
-// Save creates the User entities in the database.
-func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) {
-	specs := make([]*sqlgraph.CreateSpec, len(ucb.builders))
-	nodes := make([]*User, len(ucb.builders))
-	mutators := make([]Mutator, len(ucb.builders))
-	for i := range ucb.builders {
-		func(i int, root context.Context) {
-			builder := ucb.builders[i]
-			builder.defaults()
-			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-				mutation, ok := m.(*UserMutation)
-				if !ok {
-					return nil, fmt.Errorf("unexpected mutation type %T", m)
-				}
-				if err := builder.check(); err != nil {
-					return nil, err
-				}
-				builder.mutation = mutation
-				nodes[i], specs[i] = builder.createSpec()
-				var err error
-				if i < len(mutators)-1 {
-					_, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation)
-				} else {
-					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
-					// Invoke the actual operation on the latest mutation in the chain.
-					if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil {
-						if sqlgraph.IsConstraintError(err) {
-							err = &ConstraintError{msg: err.Error(), wrap: err}
-						}
-					}
-				}
-				if err != nil {
-					return nil, err
-				}
-				mutation.id = &nodes[i].ID
-				if specs[i].ID.Value != nil {
-					id := specs[i].ID.Value.(int64)
-					nodes[i].ID = int(id)
-				}
-				mutation.done = true
-				return nodes[i], nil
-			})
-			for i := len(builder.hooks) - 1; i >= 0; i-- {
-				mut = builder.hooks[i](mut)
-			}
-			mutators[i] = mut
-		}(i, ctx)
-	}
-	if len(mutators) > 0 {
-		if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User {
-	v, err := ucb.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-// Exec executes the query.
-func (ucb *UserCreateBulk) Exec(ctx context.Context) error {
-	_, err := ucb.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (ucb *UserCreateBulk) ExecX(ctx context.Context) {
-	if err := ucb.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
diff --git a/pkg/storage/default/ent/user_delete.go b/pkg/storage/default/ent/user_delete.go
deleted file mode 100644
index 9865cc6..0000000
--- a/pkg/storage/default/ent/user_delete.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// UserDelete is the builder for deleting a User entity.
-type UserDelete struct {
-	config
-	hooks    []Hook
-	mutation *UserMutation
-}
-
-// Where appends a list predicates to the UserDelete builder.
-func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete {
-	ud.mutation.Where(ps...)
-	return ud
-}
-
-// Exec executes the deletion query and returns how many vertices were deleted.
-func (ud *UserDelete) Exec(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(ud.hooks) == 0 {
-		affected, err = ud.sqlExec(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*UserMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			ud.mutation = mutation
-			affected, err = ud.sqlExec(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(ud.hooks) - 1; i >= 0; i-- {
-			if ud.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = ud.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, ud.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (ud *UserDelete) ExecX(ctx context.Context) int {
-	n, err := ud.Exec(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return n
-}
-
-func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) {
-	_spec := &sqlgraph.DeleteSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table: user.Table,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: user.FieldID,
-			},
-		},
-	}
-	if ps := ud.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec)
-	if err != nil && sqlgraph.IsConstraintError(err) {
-		err = &ConstraintError{msg: err.Error(), wrap: err}
-	}
-	return affected, err
-}
-
-// UserDeleteOne is the builder for deleting a single User entity.
-type UserDeleteOne struct {
-	ud *UserDelete
-}
-
-// Exec executes the deletion query.
-func (udo *UserDeleteOne) Exec(ctx context.Context) error {
-	n, err := udo.ud.Exec(ctx)
-	switch {
-	case err != nil:
-		return err
-	case n == 0:
-		return &NotFoundError{user.Label}
-	default:
-		return nil
-	}
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (udo *UserDeleteOne) ExecX(ctx context.Context) {
-	udo.ud.ExecX(ctx)
-}
diff --git a/pkg/storage/default/ent/user_query.go b/pkg/storage/default/ent/user_query.go
deleted file mode 100644
index 46e5bdd..0000000
--- a/pkg/storage/default/ent/user_query.go
+++ /dev/null
@@ -1,684 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"database/sql/driver"
-	"fmt"
-	"math"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// UserQuery is the builder for querying User entities.
-type UserQuery struct {
-	config
-	limit           *int
-	offset          *int
-	unique          *bool
-	order           []OrderFunc
-	fields          []string
-	predicates      []predicate.User
-	withServicekeys *ServiceKeyQuery
-	withApikeys     *APIKeyQuery
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Where adds a new predicate for the UserQuery builder.
-func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery {
-	uq.predicates = append(uq.predicates, ps...)
-	return uq
-}
-
-// Limit adds a limit step to the query.
-func (uq *UserQuery) Limit(limit int) *UserQuery {
-	uq.limit = &limit
-	return uq
-}
-
-// Offset adds an offset step to the query.
-func (uq *UserQuery) Offset(offset int) *UserQuery {
-	uq.offset = &offset
-	return uq
-}
-
-// Unique configures the query builder to filter duplicate records on query.
-// By default, unique is set to true, and can be disabled using this method.
-func (uq *UserQuery) Unique(unique bool) *UserQuery {
-	uq.unique = &unique
-	return uq
-}
-
-// Order adds an order step to the query.
-func (uq *UserQuery) Order(o ...OrderFunc) *UserQuery {
-	uq.order = append(uq.order, o...)
-	return uq
-}
-
-// QueryServicekeys chains the current query on the "servicekeys" edge.
-func (uq *UserQuery) QueryServicekeys() *ServiceKeyQuery {
-	query := &ServiceKeyQuery{config: uq.config}
-	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
-		if err := uq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		selector := uq.sqlQuery(ctx)
-		if err := selector.Err(); err != nil {
-			return nil, err
-		}
-		step := sqlgraph.NewStep(
-			sqlgraph.From(user.Table, user.FieldID, selector),
-			sqlgraph.To(servicekey.Table, servicekey.FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, user.ServicekeysTable, user.ServicekeysColumn),
-		)
-		fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
-		return fromU, nil
-	}
-	return query
-}
-
-// QueryApikeys chains the current query on the "apikeys" edge.
-func (uq *UserQuery) QueryApikeys() *APIKeyQuery {
-	query := &APIKeyQuery{config: uq.config}
-	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
-		if err := uq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		selector := uq.sqlQuery(ctx)
-		if err := selector.Err(); err != nil {
-			return nil, err
-		}
-		step := sqlgraph.NewStep(
-			sqlgraph.From(user.Table, user.FieldID, selector),
-			sqlgraph.To(apikey.Table, apikey.FieldID),
-			sqlgraph.Edge(sqlgraph.O2M, false, user.ApikeysTable, user.ApikeysColumn),
-		)
-		fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
-		return fromU, nil
-	}
-	return query
-}
-
-// First returns the first User entity from the query.
-// Returns a *NotFoundError when no User was found.
-func (uq *UserQuery) First(ctx context.Context) (*User, error) {
-	nodes, err := uq.Limit(1).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nil, &NotFoundError{user.Label}
-	}
-	return nodes[0], nil
-}
-
-// FirstX is like First, but panics if an error occurs.
-func (uq *UserQuery) FirstX(ctx context.Context) *User {
-	node, err := uq.First(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return node
-}
-
-// FirstID returns the first User ID from the query.
-// Returns a *NotFoundError when no User ID was found.
-func (uq *UserQuery) FirstID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = uq.Limit(1).IDs(ctx); err != nil {
-		return
-	}
-	if len(ids) == 0 {
-		err = &NotFoundError{user.Label}
-		return
-	}
-	return ids[0], nil
-}
-
-// FirstIDX is like FirstID, but panics if an error occurs.
-func (uq *UserQuery) FirstIDX(ctx context.Context) int {
-	id, err := uq.FirstID(ctx)
-	if err != nil && !IsNotFound(err) {
-		panic(err)
-	}
-	return id
-}
-
-// Only returns a single User entity found by the query, ensuring it only returns one.
-// Returns a *NotSingularError when more than one User entity is found.
-// Returns a *NotFoundError when no User entities are found.
-func (uq *UserQuery) Only(ctx context.Context) (*User, error) {
-	nodes, err := uq.Limit(2).All(ctx)
-	if err != nil {
-		return nil, err
-	}
-	switch len(nodes) {
-	case 1:
-		return nodes[0], nil
-	case 0:
-		return nil, &NotFoundError{user.Label}
-	default:
-		return nil, &NotSingularError{user.Label}
-	}
-}
-
-// OnlyX is like Only, but panics if an error occurs.
-func (uq *UserQuery) OnlyX(ctx context.Context) *User {
-	node, err := uq.Only(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// OnlyID is like Only, but returns the only User ID in the query.
-// Returns a *NotSingularError when more than one User ID is found.
-// Returns a *NotFoundError when no entities are found.
-func (uq *UserQuery) OnlyID(ctx context.Context) (id int, err error) {
-	var ids []int
-	if ids, err = uq.Limit(2).IDs(ctx); err != nil {
-		return
-	}
-	switch len(ids) {
-	case 1:
-		id = ids[0]
-	case 0:
-		err = &NotFoundError{user.Label}
-	default:
-		err = &NotSingularError{user.Label}
-	}
-	return
-}
-
-// OnlyIDX is like OnlyID, but panics if an error occurs.
-func (uq *UserQuery) OnlyIDX(ctx context.Context) int {
-	id, err := uq.OnlyID(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return id
-}
-
-// All executes the query and returns a list of Users.
-func (uq *UserQuery) All(ctx context.Context) ([]*User, error) {
-	if err := uq.prepareQuery(ctx); err != nil {
-		return nil, err
-	}
-	return uq.sqlAll(ctx)
-}
-
-// AllX is like All, but panics if an error occurs.
-func (uq *UserQuery) AllX(ctx context.Context) []*User {
-	nodes, err := uq.All(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return nodes
-}
-
-// IDs executes the query and returns a list of User IDs.
-func (uq *UserQuery) IDs(ctx context.Context) ([]int, error) {
-	var ids []int
-	if err := uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
-		return nil, err
-	}
-	return ids, nil
-}
-
-// IDsX is like IDs, but panics if an error occurs.
-func (uq *UserQuery) IDsX(ctx context.Context) []int {
-	ids, err := uq.IDs(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return ids
-}
-
-// Count returns the count of the given query.
-func (uq *UserQuery) Count(ctx context.Context) (int, error) {
-	if err := uq.prepareQuery(ctx); err != nil {
-		return 0, err
-	}
-	return uq.sqlCount(ctx)
-}
-
-// CountX is like Count, but panics if an error occurs.
-func (uq *UserQuery) CountX(ctx context.Context) int {
-	count, err := uq.Count(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return count
-}
-
-// Exist returns true if the query has elements in the graph.
-func (uq *UserQuery) Exist(ctx context.Context) (bool, error) {
-	if err := uq.prepareQuery(ctx); err != nil {
-		return false, err
-	}
-	return uq.sqlExist(ctx)
-}
-
-// ExistX is like Exist, but panics if an error occurs.
-func (uq *UserQuery) ExistX(ctx context.Context) bool {
-	exist, err := uq.Exist(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return exist
-}
-
-// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be
-// used to prepare common query builders and use them differently after the clone is made.
-func (uq *UserQuery) Clone() *UserQuery {
-	if uq == nil {
-		return nil
-	}
-	return &UserQuery{
-		config:          uq.config,
-		limit:           uq.limit,
-		offset:          uq.offset,
-		order:           append([]OrderFunc{}, uq.order...),
-		predicates:      append([]predicate.User{}, uq.predicates...),
-		withServicekeys: uq.withServicekeys.Clone(),
-		withApikeys:     uq.withApikeys.Clone(),
-		// clone intermediate query.
-		sql:    uq.sql.Clone(),
-		path:   uq.path,
-		unique: uq.unique,
-	}
-}
-
-// WithServicekeys tells the query-builder to eager-load the nodes that are connected to
-// the "servicekeys" edge. The optional arguments are used to configure the query builder of the edge.
-func (uq *UserQuery) WithServicekeys(opts ...func(*ServiceKeyQuery)) *UserQuery {
-	query := &ServiceKeyQuery{config: uq.config}
-	for _, opt := range opts {
-		opt(query)
-	}
-	uq.withServicekeys = query
-	return uq
-}
-
-// WithApikeys tells the query-builder to eager-load the nodes that are connected to
-// the "apikeys" edge. The optional arguments are used to configure the query builder of the edge.
-func (uq *UserQuery) WithApikeys(opts ...func(*APIKeyQuery)) *UserQuery {
-	query := &APIKeyQuery{config: uq.config}
-	for _, opt := range opts {
-		opt(query)
-	}
-	uq.withApikeys = query
-	return uq
-}
-
-// GroupBy is used to group vertices by one or more fields/columns.
-// It is often used with aggregate functions, like: count, max, mean, min, sum.
-//
-// Example:
-//
-//	var v []struct {
-//		Username string `json:"username,omitempty"`
-//		Count int `json:"count,omitempty"`
-//	}
-//
-//	client.User.Query().
-//		GroupBy(user.FieldUsername).
-//		Aggregate(ent.Count()).
-//		Scan(ctx, &v)
-func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
-	grbuild := &UserGroupBy{config: uq.config}
-	grbuild.fields = append([]string{field}, fields...)
-	grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
-		if err := uq.prepareQuery(ctx); err != nil {
-			return nil, err
-		}
-		return uq.sqlQuery(ctx), nil
-	}
-	grbuild.label = user.Label
-	grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
-	return grbuild
-}
-
-// Select allows the selection one or more fields/columns for the given query,
-// instead of selecting all fields in the entity.
-//
-// Example:
-//
-//	var v []struct {
-//		Username string `json:"username,omitempty"`
-//	}
-//
-//	client.User.Query().
-//		Select(user.FieldUsername).
-//		Scan(ctx, &v)
-func (uq *UserQuery) Select(fields ...string) *UserSelect {
-	uq.fields = append(uq.fields, fields...)
-	selbuild := &UserSelect{UserQuery: uq}
-	selbuild.label = user.Label
-	selbuild.flds, selbuild.scan = &uq.fields, selbuild.Scan
-	return selbuild
-}
-
-func (uq *UserQuery) prepareQuery(ctx context.Context) error {
-	for _, f := range uq.fields {
-		if !user.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-		}
-	}
-	if uq.path != nil {
-		prev, err := uq.path(ctx)
-		if err != nil {
-			return err
-		}
-		uq.sql = prev
-	}
-	return nil
-}
-
-func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) {
-	var (
-		nodes       = []*User{}
-		_spec       = uq.querySpec()
-		loadedTypes = [2]bool{
-			uq.withServicekeys != nil,
-			uq.withApikeys != nil,
-		}
-	)
-	_spec.ScanValues = func(columns []string) ([]any, error) {
-		return (*User).scanValues(nil, columns)
-	}
-	_spec.Assign = func(columns []string, values []any) error {
-		node := &User{config: uq.config}
-		nodes = append(nodes, node)
-		node.Edges.loadedTypes = loadedTypes
-		return node.assignValues(columns, values)
-	}
-	for i := range hooks {
-		hooks[i](ctx, _spec)
-	}
-	if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil {
-		return nil, err
-	}
-	if len(nodes) == 0 {
-		return nodes, nil
-	}
-	if query := uq.withServicekeys; query != nil {
-		if err := uq.loadServicekeys(ctx, query, nodes,
-			func(n *User) { n.Edges.Servicekeys = []*ServiceKey{} },
-			func(n *User, e *ServiceKey) { n.Edges.Servicekeys = append(n.Edges.Servicekeys, e) }); err != nil {
-			return nil, err
-		}
-	}
-	if query := uq.withApikeys; query != nil {
-		if err := uq.loadApikeys(ctx, query, nodes,
-			func(n *User) { n.Edges.Apikeys = []*APIKey{} },
-			func(n *User, e *APIKey) { n.Edges.Apikeys = append(n.Edges.Apikeys, e) }); err != nil {
-			return nil, err
-		}
-	}
-	return nodes, nil
-}
-
-func (uq *UserQuery) loadServicekeys(ctx context.Context, query *ServiceKeyQuery, nodes []*User, init func(*User), assign func(*User, *ServiceKey)) error {
-	fks := make([]driver.Value, 0, len(nodes))
-	nodeids := make(map[int]*User)
-	for i := range nodes {
-		fks = append(fks, nodes[i].ID)
-		nodeids[nodes[i].ID] = nodes[i]
-		if init != nil {
-			init(nodes[i])
-		}
-	}
-	query.withFKs = true
-	query.Where(predicate.ServiceKey(func(s *sql.Selector) {
-		s.Where(sql.InValues(user.ServicekeysColumn, fks...))
-	}))
-	neighbors, err := query.All(ctx)
-	if err != nil {
-		return err
-	}
-	for _, n := range neighbors {
-		fk := n.user_servicekeys
-		if fk == nil {
-			return fmt.Errorf(`foreign-key "user_servicekeys" is nil for node %v`, n.ID)
-		}
-		node, ok := nodeids[*fk]
-		if !ok {
-			return fmt.Errorf(`unexpected foreign-key "user_servicekeys" returned %v for node %v`, *fk, n.ID)
-		}
-		assign(node, n)
-	}
-	return nil
-}
-func (uq *UserQuery) loadApikeys(ctx context.Context, query *APIKeyQuery, nodes []*User, init func(*User), assign func(*User, *APIKey)) error {
-	fks := make([]driver.Value, 0, len(nodes))
-	nodeids := make(map[int]*User)
-	for i := range nodes {
-		fks = append(fks, nodes[i].ID)
-		nodeids[nodes[i].ID] = nodes[i]
-		if init != nil {
-			init(nodes[i])
-		}
-	}
-	query.withFKs = true
-	query.Where(predicate.APIKey(func(s *sql.Selector) {
-		s.Where(sql.InValues(user.ApikeysColumn, fks...))
-	}))
-	neighbors, err := query.All(ctx)
-	if err != nil {
-		return err
-	}
-	for _, n := range neighbors {
-		fk := n.user_apikeys
-		if fk == nil {
-			return fmt.Errorf(`foreign-key "user_apikeys" is nil for node %v`, n.ID)
-		}
-		node, ok := nodeids[*fk]
-		if !ok {
-			return fmt.Errorf(`unexpected foreign-key "user_apikeys" returned %v for node %v`, *fk, n.ID)
-		}
-		assign(node, n)
-	}
-	return nil
-}
-
-func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) {
-	_spec := uq.querySpec()
-	_spec.Node.Columns = uq.fields
-	if len(uq.fields) > 0 {
-		_spec.Unique = uq.unique != nil && *uq.unique
-	}
-	return sqlgraph.CountNodes(ctx, uq.driver, _spec)
-}
-
-func (uq *UserQuery) sqlExist(ctx context.Context) (bool, error) {
-	switch _, err := uq.FirstID(ctx); {
-	case IsNotFound(err):
-		return false, nil
-	case err != nil:
-		return false, fmt.Errorf("ent: check existence: %w", err)
-	default:
-		return true, nil
-	}
-}
-
-func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
-	_spec := &sqlgraph.QuerySpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   user.Table,
-			Columns: user.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: user.FieldID,
-			},
-		},
-		From:   uq.sql,
-		Unique: true,
-	}
-	if unique := uq.unique; unique != nil {
-		_spec.Unique = *unique
-	}
-	if fields := uq.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
-		for i := range fields {
-			if fields[i] != user.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
-			}
-		}
-	}
-	if ps := uq.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if limit := uq.limit; limit != nil {
-		_spec.Limit = *limit
-	}
-	if offset := uq.offset; offset != nil {
-		_spec.Offset = *offset
-	}
-	if ps := uq.order; len(ps) > 0 {
-		_spec.Order = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	return _spec
-}
-
-func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
-	builder := sql.Dialect(uq.driver.Dialect())
-	t1 := builder.Table(user.Table)
-	columns := uq.fields
-	if len(columns) == 0 {
-		columns = user.Columns
-	}
-	selector := builder.Select(t1.Columns(columns...)...).From(t1)
-	if uq.sql != nil {
-		selector = uq.sql
-		selector.Select(selector.Columns(columns...)...)
-	}
-	if uq.unique != nil && *uq.unique {
-		selector.Distinct()
-	}
-	for _, p := range uq.predicates {
-		p(selector)
-	}
-	for _, p := range uq.order {
-		p(selector)
-	}
-	if offset := uq.offset; offset != nil {
-		// limit is mandatory for offset clause. We start
-		// with default value, and override it below if needed.
-		selector.Offset(*offset).Limit(math.MaxInt32)
-	}
-	if limit := uq.limit; limit != nil {
-		selector.Limit(*limit)
-	}
-	return selector
-}
-
-// UserGroupBy is the group-by builder for User entities.
-type UserGroupBy struct {
-	config
-	selector
-	fields []string
-	fns    []AggregateFunc
-	// intermediate query (i.e. traversal path).
-	sql  *sql.Selector
-	path func(context.Context) (*sql.Selector, error)
-}
-
-// Aggregate adds the given aggregation functions to the group-by query.
-func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy {
-	ugb.fns = append(ugb.fns, fns...)
-	return ugb
-}
-
-// Scan applies the group-by query and scans the result into the given value.
-func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error {
-	query, err := ugb.path(ctx)
-	if err != nil {
-		return err
-	}
-	ugb.sql = query
-	return ugb.sqlScan(ctx, v)
-}
-
-func (ugb *UserGroupBy) sqlScan(ctx context.Context, v any) error {
-	for _, f := range ugb.fields {
-		if !user.ValidColumn(f) {
-			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
-		}
-	}
-	selector := ugb.sqlQuery()
-	if err := selector.Err(); err != nil {
-		return err
-	}
-	rows := &sql.Rows{}
-	query, args := selector.Query()
-	if err := ugb.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
-
-func (ugb *UserGroupBy) sqlQuery() *sql.Selector {
-	selector := ugb.sql.Select()
-	aggregation := make([]string, 0, len(ugb.fns))
-	for _, fn := range ugb.fns {
-		aggregation = append(aggregation, fn(selector))
-	}
-	// If no columns were selected in a custom aggregation function, the default
-	// selection is the fields used for "group-by", and the aggregation functions.
-	if len(selector.SelectedColumns()) == 0 {
-		columns := make([]string, 0, len(ugb.fields)+len(ugb.fns))
-		for _, f := range ugb.fields {
-			columns = append(columns, selector.C(f))
-		}
-		columns = append(columns, aggregation...)
-		selector.Select(columns...)
-	}
-	return selector.GroupBy(selector.Columns(ugb.fields...)...)
-}
-
-// UserSelect is the builder for selecting fields of User entities.
-type UserSelect struct {
-	*UserQuery
-	selector
-	// intermediate query (i.e. traversal path).
-	sql *sql.Selector
-}
-
-// Scan applies the selector query and scans the result into the given value.
-func (us *UserSelect) Scan(ctx context.Context, v any) error {
-	if err := us.prepareQuery(ctx); err != nil {
-		return err
-	}
-	us.sql = us.UserQuery.sqlQuery(ctx)
-	return us.sqlScan(ctx, v)
-}
-
-func (us *UserSelect) sqlScan(ctx context.Context, v any) error {
-	rows := &sql.Rows{}
-	query, args := us.sql.Query()
-	if err := us.driver.Query(ctx, query, args, rows); err != nil {
-		return err
-	}
-	defer rows.Close()
-	return sql.ScanSlice(rows, v)
-}
diff --git a/pkg/storage/default/ent/user_update.go b/pkg/storage/default/ent/user_update.go
deleted file mode 100644
index 203f3c7..0000000
--- a/pkg/storage/default/ent/user_update.go
+++ /dev/null
@@ -1,649 +0,0 @@
-// Code generated by ent, DO NOT EDIT.
-
-package ent
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	"entgo.io/ent/dialect/sql"
-	"entgo.io/ent/dialect/sql/sqlgraph"
-	"entgo.io/ent/schema/field"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/apikey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/predicate"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/servicekey"
-	"github.com/loopholelabs/auth/pkg/storage/default/ent/user"
-)
-
-// UserUpdate is the builder for updating User entities.
-type UserUpdate struct {
-	config
-	hooks    []Hook
-	mutation *UserMutation
-}
-
-// Where appends a list predicates to the UserUpdate builder.
-func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate {
-	uu.mutation.Where(ps...)
-	return uu
-}
-
-// SetUsername sets the "username" field.
-func (uu *UserUpdate) SetUsername(s string) *UserUpdate {
-	uu.mutation.SetUsername(s)
-	return uu
-}
-
-// SetNillableUsername sets the "username" field if the given value is not nil.
-func (uu *UserUpdate) SetNillableUsername(s *string) *UserUpdate {
-	if s != nil {
-		uu.SetUsername(*s)
-	}
-	return uu
-}
-
-// AddServicekeyIDs adds the "servicekeys" edge to the ServiceKey entity by IDs.
-func (uu *UserUpdate) AddServicekeyIDs(ids ...int) *UserUpdate {
-	uu.mutation.AddServicekeyIDs(ids...)
-	return uu
-}
-
-// AddServicekeys adds the "servicekeys" edges to the ServiceKey entity.
-func (uu *UserUpdate) AddServicekeys(s ...*ServiceKey) *UserUpdate {
-	ids := make([]int, len(s))
-	for i := range s {
-		ids[i] = s[i].ID
-	}
-	return uu.AddServicekeyIDs(ids...)
-}
-
-// AddApikeyIDs adds the "apikeys" edge to the APIKey entity by IDs.
-func (uu *UserUpdate) AddApikeyIDs(ids ...int) *UserUpdate {
-	uu.mutation.AddApikeyIDs(ids...)
-	return uu
-}
-
-// AddApikeys adds the "apikeys" edges to the APIKey entity.
-func (uu *UserUpdate) AddApikeys(a ...*APIKey) *UserUpdate {
-	ids := make([]int, len(a))
-	for i := range a {
-		ids[i] = a[i].ID
-	}
-	return uu.AddApikeyIDs(ids...)
-}
-
-// Mutation returns the UserMutation object of the builder.
-func (uu *UserUpdate) Mutation() *UserMutation {
-	return uu.mutation
-}
-
-// ClearServicekeys clears all "servicekeys" edges to the ServiceKey entity.
-func (uu *UserUpdate) ClearServicekeys() *UserUpdate {
-	uu.mutation.ClearServicekeys()
-	return uu
-}
-
-// RemoveServicekeyIDs removes the "servicekeys" edge to ServiceKey entities by IDs.
-func (uu *UserUpdate) RemoveServicekeyIDs(ids ...int) *UserUpdate {
-	uu.mutation.RemoveServicekeyIDs(ids...)
-	return uu
-}
-
-// RemoveServicekeys removes "servicekeys" edges to ServiceKey entities.
-func (uu *UserUpdate) RemoveServicekeys(s ...*ServiceKey) *UserUpdate {
-	ids := make([]int, len(s))
-	for i := range s {
-		ids[i] = s[i].ID
-	}
-	return uu.RemoveServicekeyIDs(ids...)
-}
-
-// ClearApikeys clears all "apikeys" edges to the APIKey entity.
-func (uu *UserUpdate) ClearApikeys() *UserUpdate {
-	uu.mutation.ClearApikeys()
-	return uu
-}
-
-// RemoveApikeyIDs removes the "apikeys" edge to APIKey entities by IDs.
-func (uu *UserUpdate) RemoveApikeyIDs(ids ...int) *UserUpdate {
-	uu.mutation.RemoveApikeyIDs(ids...)
-	return uu
-}
-
-// RemoveApikeys removes "apikeys" edges to APIKey entities.
-func (uu *UserUpdate) RemoveApikeys(a ...*APIKey) *UserUpdate {
-	ids := make([]int, len(a))
-	for i := range a {
-		ids[i] = a[i].ID
-	}
-	return uu.RemoveApikeyIDs(ids...)
-}
-
-// Save executes the query and returns the number of nodes affected by the update operation.
-func (uu *UserUpdate) Save(ctx context.Context) (int, error) {
-	var (
-		err      error
-		affected int
-	)
-	if len(uu.hooks) == 0 {
-		affected, err = uu.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*UserMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			uu.mutation = mutation
-			affected, err = uu.sqlSave(ctx)
-			mutation.done = true
-			return affected, err
-		})
-		for i := len(uu.hooks) - 1; i >= 0; i-- {
-			if uu.hooks[i] == nil {
-				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = uu.hooks[i](mut)
-		}
-		if _, err := mut.Mutate(ctx, uu.mutation); err != nil {
-			return 0, err
-		}
-	}
-	return affected, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (uu *UserUpdate) SaveX(ctx context.Context) int {
-	affected, err := uu.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return affected
-}
-
-// Exec executes the query.
-func (uu *UserUpdate) Exec(ctx context.Context) error {
-	_, err := uu.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (uu *UserUpdate) ExecX(ctx context.Context) {
-	if err := uu.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   user.Table,
-			Columns: user.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: user.FieldID,
-			},
-		},
-	}
-	if ps := uu.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if value, ok := uu.mutation.Username(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: user.FieldUsername,
-		})
-	}
-	if uu.mutation.ServicekeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uu.mutation.RemovedServicekeysIDs(); len(nodes) > 0 && !uu.mutation.ServicekeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uu.mutation.ServicekeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	if uu.mutation.ApikeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uu.mutation.RemovedApikeysIDs(); len(nodes) > 0 && !uu.mutation.ApikeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uu.mutation.ApikeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{user.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return 0, err
-	}
-	return n, nil
-}
-
-// UserUpdateOne is the builder for updating a single User entity.
-type UserUpdateOne struct {
-	config
-	fields   []string
-	hooks    []Hook
-	mutation *UserMutation
-}
-
-// SetUsername sets the "username" field.
-func (uuo *UserUpdateOne) SetUsername(s string) *UserUpdateOne {
-	uuo.mutation.SetUsername(s)
-	return uuo
-}
-
-// SetNillableUsername sets the "username" field if the given value is not nil.
-func (uuo *UserUpdateOne) SetNillableUsername(s *string) *UserUpdateOne {
-	if s != nil {
-		uuo.SetUsername(*s)
-	}
-	return uuo
-}
-
-// AddServicekeyIDs adds the "servicekeys" edge to the ServiceKey entity by IDs.
-func (uuo *UserUpdateOne) AddServicekeyIDs(ids ...int) *UserUpdateOne {
-	uuo.mutation.AddServicekeyIDs(ids...)
-	return uuo
-}
-
-// AddServicekeys adds the "servicekeys" edges to the ServiceKey entity.
-func (uuo *UserUpdateOne) AddServicekeys(s ...*ServiceKey) *UserUpdateOne {
-	ids := make([]int, len(s))
-	for i := range s {
-		ids[i] = s[i].ID
-	}
-	return uuo.AddServicekeyIDs(ids...)
-}
-
-// AddApikeyIDs adds the "apikeys" edge to the APIKey entity by IDs.
-func (uuo *UserUpdateOne) AddApikeyIDs(ids ...int) *UserUpdateOne {
-	uuo.mutation.AddApikeyIDs(ids...)
-	return uuo
-}
-
-// AddApikeys adds the "apikeys" edges to the APIKey entity.
-func (uuo *UserUpdateOne) AddApikeys(a ...*APIKey) *UserUpdateOne {
-	ids := make([]int, len(a))
-	for i := range a {
-		ids[i] = a[i].ID
-	}
-	return uuo.AddApikeyIDs(ids...)
-}
-
-// Mutation returns the UserMutation object of the builder.
-func (uuo *UserUpdateOne) Mutation() *UserMutation {
-	return uuo.mutation
-}
-
-// ClearServicekeys clears all "servicekeys" edges to the ServiceKey entity.
-func (uuo *UserUpdateOne) ClearServicekeys() *UserUpdateOne {
-	uuo.mutation.ClearServicekeys()
-	return uuo
-}
-
-// RemoveServicekeyIDs removes the "servicekeys" edge to ServiceKey entities by IDs.
-func (uuo *UserUpdateOne) RemoveServicekeyIDs(ids ...int) *UserUpdateOne {
-	uuo.mutation.RemoveServicekeyIDs(ids...)
-	return uuo
-}
-
-// RemoveServicekeys removes "servicekeys" edges to ServiceKey entities.
-func (uuo *UserUpdateOne) RemoveServicekeys(s ...*ServiceKey) *UserUpdateOne {
-	ids := make([]int, len(s))
-	for i := range s {
-		ids[i] = s[i].ID
-	}
-	return uuo.RemoveServicekeyIDs(ids...)
-}
-
-// ClearApikeys clears all "apikeys" edges to the APIKey entity.
-func (uuo *UserUpdateOne) ClearApikeys() *UserUpdateOne {
-	uuo.mutation.ClearApikeys()
-	return uuo
-}
-
-// RemoveApikeyIDs removes the "apikeys" edge to APIKey entities by IDs.
-func (uuo *UserUpdateOne) RemoveApikeyIDs(ids ...int) *UserUpdateOne {
-	uuo.mutation.RemoveApikeyIDs(ids...)
-	return uuo
-}
-
-// RemoveApikeys removes "apikeys" edges to APIKey entities.
-func (uuo *UserUpdateOne) RemoveApikeys(a ...*APIKey) *UserUpdateOne {
-	ids := make([]int, len(a))
-	for i := range a {
-		ids[i] = a[i].ID
-	}
-	return uuo.RemoveApikeyIDs(ids...)
-}
-
-// Select allows selecting one or more fields (columns) of the returned entity.
-// The default is selecting all fields defined in the entity schema.
-func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne {
-	uuo.fields = append([]string{field}, fields...)
-	return uuo
-}
-
-// Save executes the query and returns the updated User entity.
-func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) {
-	var (
-		err  error
-		node *User
-	)
-	if len(uuo.hooks) == 0 {
-		node, err = uuo.sqlSave(ctx)
-	} else {
-		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
-			mutation, ok := m.(*UserMutation)
-			if !ok {
-				return nil, fmt.Errorf("unexpected mutation type %T", m)
-			}
-			uuo.mutation = mutation
-			node, err = uuo.sqlSave(ctx)
-			mutation.done = true
-			return node, err
-		})
-		for i := len(uuo.hooks) - 1; i >= 0; i-- {
-			if uuo.hooks[i] == nil {
-				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
-			}
-			mut = uuo.hooks[i](mut)
-		}
-		v, err := mut.Mutate(ctx, uuo.mutation)
-		if err != nil {
-			return nil, err
-		}
-		nv, ok := v.(*User)
-		if !ok {
-			return nil, fmt.Errorf("unexpected node type %T returned from UserMutation", v)
-		}
-		node = nv
-	}
-	return node, err
-}
-
-// SaveX is like Save, but panics if an error occurs.
-func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User {
-	node, err := uuo.Save(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return node
-}
-
-// Exec executes the query on the entity.
-func (uuo *UserUpdateOne) Exec(ctx context.Context) error {
-	_, err := uuo.Save(ctx)
-	return err
-}
-
-// ExecX is like Exec, but panics if an error occurs.
-func (uuo *UserUpdateOne) ExecX(ctx context.Context) {
-	if err := uuo.Exec(ctx); err != nil {
-		panic(err)
-	}
-}
-
-func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
-	_spec := &sqlgraph.UpdateSpec{
-		Node: &sqlgraph.NodeSpec{
-			Table:   user.Table,
-			Columns: user.Columns,
-			ID: &sqlgraph.FieldSpec{
-				Type:   field.TypeInt,
-				Column: user.FieldID,
-			},
-		},
-	}
-	id, ok := uuo.mutation.ID()
-	if !ok {
-		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)}
-	}
-	_spec.Node.ID.Value = id
-	if fields := uuo.fields; len(fields) > 0 {
-		_spec.Node.Columns = make([]string, 0, len(fields))
-		_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
-		for _, f := range fields {
-			if !user.ValidColumn(f) {
-				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
-			}
-			if f != user.FieldID {
-				_spec.Node.Columns = append(_spec.Node.Columns, f)
-			}
-		}
-	}
-	if ps := uuo.mutation.predicates; len(ps) > 0 {
-		_spec.Predicate = func(selector *sql.Selector) {
-			for i := range ps {
-				ps[i](selector)
-			}
-		}
-	}
-	if value, ok := uuo.mutation.Username(); ok {
-		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
-			Type:   field.TypeString,
-			Value:  value,
-			Column: user.FieldUsername,
-		})
-	}
-	if uuo.mutation.ServicekeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uuo.mutation.RemovedServicekeysIDs(); len(nodes) > 0 && !uuo.mutation.ServicekeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uuo.mutation.ServicekeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ServicekeysTable,
-			Columns: []string{user.ServicekeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: servicekey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	if uuo.mutation.ApikeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uuo.mutation.RemovedApikeysIDs(); len(nodes) > 0 && !uuo.mutation.ApikeysCleared() {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
-	}
-	if nodes := uuo.mutation.ApikeysIDs(); len(nodes) > 0 {
-		edge := &sqlgraph.EdgeSpec{
-			Rel:     sqlgraph.O2M,
-			Inverse: false,
-			Table:   user.ApikeysTable,
-			Columns: []string{user.ApikeysColumn},
-			Bidi:    false,
-			Target: &sqlgraph.EdgeTarget{
-				IDSpec: &sqlgraph.FieldSpec{
-					Type:   field.TypeInt,
-					Column: apikey.FieldID,
-				},
-			},
-		}
-		for _, k := range nodes {
-			edge.Target.Nodes = append(edge.Target.Nodes, k)
-		}
-		_spec.Edges.Add = append(_spec.Edges.Add, edge)
-	}
-	_node = &User{config: uuo.config}
-	_spec.Assign = _node.assignValues
-	_spec.ScanValues = _node.scanValues
-	if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil {
-		if _, ok := err.(*sqlgraph.NotFoundError); ok {
-			err = &NotFoundError{user.Label}
-		} else if sqlgraph.IsConstraintError(err) {
-			err = &ConstraintError{msg: err.Error(), wrap: err}
-		}
-		return nil, err
-	}
-	return _node, nil
-}
diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go
index 3ec76c8..c54779c 100644
--- a/pkg/storage/storage.go
+++ b/pkg/storage/storage.go
@@ -1,19 +1,200 @@
+/*
+	Copyright 2023 Loophole Labs
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		   http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
 package storage
 
 import (
-	dexStorage "github.com/dexidp/dex/storage"
-	"github.com/loopholelabs/auth/pkg/token"
+	"context"
+	"errors"
+	"github.com/loopholelabs/auth/pkg/apikey"
+	"github.com/loopholelabs/auth/pkg/claims"
+	"github.com/loopholelabs/auth/pkg/servicekey"
+	"github.com/loopholelabs/auth/pkg/session"
+)
+
+var (
+	ErrNotFound = errors.New("key not found")
 )
 
-type ServiceKeyValid func(key *token.ServiceKey) error
-type ServiceKeyUpdate func(key *token.ServiceKey)
+// SessionEvent is the event that is triggered when a session is created, updated, or deleted
+type SessionEvent struct {
+	// Session ID is the Session's unique identifier
+	SessionID string
+
+	// Deleted indicates whether the session was deleted
+	Deleted bool
+}
+
+// SecretKeyEvent is the event that is emitted when a secret key is rotated
+type SecretKeyEvent struct {
+	// SecretKey is the new secret key
+	SecretKey []byte
+}
+
+// RegistrationEvent is the event that is emitted when registration is enabled or disabled
+type RegistrationEvent struct {
+	// Enabled indicates whether registration is enabled
+	Enabled bool
+}
+
+// APIKeyEvent is the event that is emitted when an API key is created, updated, or deleted
+type APIKeyEvent struct {
+	// APIKeyID is the API Key Identifier
+	APIKeyID string
+
+	// Deleted indicates whether the API Key was deleted
+	Deleted bool
+
+	// APIKey is the API Key that was created or updated.
+	// This will be nil if the API Key was deleted.
+	APIKey *apikey.APIKey
+}
+
+// ServiceKeySessionEvent is the event that is triggered when a service key session is created, updated, or deleted
+type ServiceKeySessionEvent struct {
+	// ServiceKeySessionID is the Service Key Session's unique identifier
+	ServiceKeySessionID string
+
+	// Deleted indicates whether the session was deleted
+	Deleted bool
 
+	// ServiceKeySession is the Service Key Session that was created or updated.
+	// This will be nil if the Service Key Session was deleted.
+	ServiceKeySession *servicekey.Session
+}
+
+// Storage is the interface that must be implemented by the application
+// using this auth library for authentication and session handling.
 type Storage interface {
-	dexStorage.Storage
+	// UserExists verifies whether the given userID exists. If there is an error
+	// while checking if the user exists, an error is returned, otherwise
+	// the boolean indicates whether the user exists. An error should not be
+	// returned if the user does not exist.
+	UserExists(ctx context.Context, userID string) (bool, error)
+	// UserOrganizationExists verifies whether the given userID is part of the
+	// given organization. If there is an error while checking if the user
+	// exists, an error is returned, otherwise the boolean indicates whether
+	// the user exists. An error should not be returned if the user does not
+	// exist or if the user is not part of the organization.
+	UserOrganizationExists(ctx context.Context, userID string, organization string) (bool, error)
+	// NewUser creates a new user with the given claims. If the user already
+	// exists, an error is returned. If the user does not exist, the user is
+	// created and the claims are set. If there is an error while creating the
+	// user, an error is returned.
+	NewUser(ctx context.Context, claims *claims.Claims) error
+
+	// SubscribeToRegistration subscribes to registration events. When registration
+	// is enabled or disabled, the event is emitted on the given channel. Cancelling
+	// the provided context will unsubscribe from registration events.
+	SubscribeToRegistration(ctx context.Context) (<-chan *RegistrationEvent, error)
+	// GetRegistration returns whether registration is enabled. If there is an error
+	// while getting the registration status, an error is returned. If there is no
+	// error, the boolean indicates whether registration is enabled.
+	GetRegistration(ctx context.Context) (bool, error)
+	// SetRegistration sets whether registration is enabled. If there is an error
+	// while setting the registration status, an error is returned.
+	// If there is no error, the boolean indicates whether registration is enabled.
+	SetRegistration(ctx context.Context, enabled bool) error
+
+	// SubscribeToSecretKey subscribes to secret key events. When the secret key is
+	// rotated, the event is emitted on the given channel. Cancelling the provided
+	// context will unsubscribe from secret key events.
+	SubscribeToSecretKey(ctx context.Context) (<-chan *SecretKeyEvent, error)
+	// GetSecretKey returns the current secret key. If there is an error while
+	// getting the secret key, an error is returned.
+	// If there is no error, the secret key is returned.
+	// The secret key should be exactly 32 bytes long.
+	GetSecretKey(ctx context.Context) ([]byte, error)
+	// SetSecretKey sets the current secret key. If there is an error while
+	// setting the secret key, an error is returned.
+	// If there is no error, the secret key is returned.
+	// The secret key should be exactly 32 bytes long.
+	SetSecretKey(ctx context.Context, secretKey []byte) error
+
+	// SubscribeToSessionIDs subscribes to session events. When a session is created,
+	// updated, or deleted, the event is emitted on the given channel. Cancelling
+	// the provided context will unsubscribe from session events.
+	SubscribeToSessionIDs(ctx context.Context) (<-chan *SessionEvent, error)
+	// ListSessionIDs returns a list of all session IDs. If there is an error while
+	// listing the session IDs, an error is returned.
+	// If there is no error, the list of session IDs is returned.
+	ListSessionIDs(ctx context.Context) ([]string, error)
+	//SessionIDExists verifies whether the given sessionID exists. If there is an error
+	// while checking if the sessionID exists, an error is returned, otherwise
+	// the boolean indicates whether the sessionID exists. An error should not be
+	// returned if the sessionID does not exist.
+	SessionIDExists(ctx context.Context, sessionID string) (bool, error)
+
+	// SetSession sets the session for the given session.ID. If there is an error
+	// while setting the session, an error is returned. If the organization
+	// associated with the session is not empty, the session is associated with
+	// the organization. If the organization is empty, the session is associated
+	// with the user. If the session is associated with an organization and that
+	// organization is deleted, the session should also be deleted.
+	SetSession(ctx context.Context, session *session.Session) error
+	// DeleteSession deletes the session for the given sessionID. If
+	// there is an error while deleting the session, an error is returned.
+	// An error is returned if the session does not exist.
+	DeleteSession(ctx context.Context, sessionID string) error
 
-	UserExists(id string) (bool, error)
+	// SubscribeToAPIKeys subscribes to API key events. When an API key is created,
+	// updated, or deleted, the event is emitted on the given channel. Cancelling
+	// the provided context will unsubscribe from API key events.
+	SubscribeToAPIKeys(ctx context.Context) (<-chan *APIKeyEvent, error)
+	// ListAPIKeys returns a list of all API keys. If there is an error while
+	// listing the API keys, an error is returned. If there is no error, the list
+	// of API keys is returned.
+	ListAPIKeys(ctx context.Context) ([]*apikey.APIKey, error)
+	// GetAPIKey returns the API key for the given API key ID. If
+	// there is an error while getting the API key, an error is returned.
+	// If there is no error, the API key is returned.
+	GetAPIKey(ctx context.Context, id string) (*apikey.APIKey, error)
 
-	GetAPIKey(id string) (*token.APIKey, error)
+	// SubscribeToServiceKeySessions subscribes to service key session events.
+	// When a service key session is created, updated, or deleted, the event is
+	// emitted on the given channel. Cancelling the provided context will unsubscribe from
+	// service key session events.
+	SubscribeToServiceKeySessions(ctx context.Context) (<-chan *ServiceKeySessionEvent, error)
+	// ListServiceKeySessions returns a list of all service key session IDs. If there is an error while
+	// listing the service key session IDs, an error is returned. If there is no error, the list
+	// of service key session IDs is returned.
+	ListServiceKeySessions(ctx context.Context) ([]*servicekey.Session, error)
+	// SetServiceKeySession sets the service key session for the given servicekeySession.ID. If
+	// there is an error while setting the service key session, an error is returned.
+	// If the organization associated with the service key session is not empty, the service key session is associated with
+	// the organization. If the organization is empty, the service key session is associated
+	// with the user. If the service key session is associated with an organization and that
+	// organization is deleted, the service key session and the service key itself should be deleted.
+	// If the service key associated with the service key session is deleted, the service key session should be deleted.
+	SetServiceKeySession(ctx context.Context, servicekeySession *servicekey.Session) error
+	// GetServiceKeySession returns the service key session for the given servicekeySessionID. If
+	// there is an error while getting the service key session, an error is returned.
+	// If there is no error, the service key session is returned.
+	GetServiceKeySession(ctx context.Context, servicekeySessionID string) (*servicekey.Session, error)
+	// DeleteServiceKeySession deletes the service key session for the given servicekeySessionID. If
+	// there is an error while deleting the service key session, an error is returned.
+	// An error is returned if the service key session does not exist.
+	DeleteServiceKeySession(ctx context.Context, servicekeySessionID string) error
 
-	GetServiceKey(id string, valid ServiceKeyValid, update ServiceKeyUpdate) (*token.ServiceKey, error)
+	// GetServiceKey returns the service key for the given service key ID. If there is an error
+	// while getting the service key, an error is returned. If there is no error, the service key
+	// is returned.
+	GetServiceKey(ctx context.Context, servicekeyID string) (*servicekey.ServiceKey, error)
+	// IncrementServiceKeyNumUsed increments the number of times the service key has been used.
+	// If there is an error while incrementing the number of times the service key has been used,
+	// an error is returned. If the service key does not exist, an error is returned.
+	IncrementServiceKeyNumUsed(ctx context.Context, servicekeyID string) error
 }
diff --git a/pkg/token/api.go b/pkg/token/api.go
deleted file mode 100644
index f407a3a..0000000
--- a/pkg/token/api.go
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"github.com/google/uuid"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"github.com/loopholelabs/auth/pkg/utils"
-	"gopkg.in/square/go-jose.v2"
-	"time"
-)
-
-// APIKey should be of the form "A.<ID>.<SECRET>" - when the secret is stored in the database, it should be hashed
-// to a byte slice.
-//
-// The 'A' prefix is used to differentiate between API keys and Service Keys.
-type APIKey struct {
-	Created int64
-	Name    string
-	ID      string
-	Secret  []byte
-	User    string
-}
-
-func NewAPIKey(name string, user string) (*APIKey, string, error) {
-	id := uuid.New().String()
-	secret := uuid.New().String()
-	encoded := Encode("A", id, secret)
-	hashedSecret, err := Hash(secret)
-	if err != nil {
-		return nil, "", err
-	}
-	return &APIKey{
-		Created: utils.TimeToInt64(time.Now()),
-		Name:    name,
-		ID:      id,
-		Secret:  hashedSecret,
-		User:    user,
-	}, encoded, nil
-}
-
-type APIClaims struct {
-	ID   string `json:"id"`
-	Name string `json:"name"`
-}
-
-func (c *APIClaims) Valid() bool {
-	return len(c.ID) == 36
-}
-
-type APIToken struct {
-	BaseClaims
-	APIClaims
-}
-
-func NewAPIToken(issuer string, apiKey *APIKey, audience Audience) *APIToken {
-	return &APIToken{
-		BaseClaims: BaseClaims{
-			Issuer:   issuer,
-			Subject:  apiKey.User,
-			Audience: audience,
-			Expiry:   Time(time.Now().Add(time.Minute * 5)),
-			IssuedAt: Time(time.Now()),
-			Kind:     tokenKind.APITokenKind,
-		},
-		APIClaims: APIClaims{
-			ID:   apiKey.ID,
-			Name: apiKey.Name,
-		},
-	}
-}
-
-func (t *APIToken) Payload() ([]byte, error) {
-	return json.Marshal(t)
-}
-
-func (t *APIToken) Sign(keySet *keyset.Private, alg jose.SignatureAlgorithm) (string, error) {
-	payload, err := t.Payload()
-	if err != nil {
-		return "", err
-	}
-
-	return keySet.Sign(alg, payload)
-}
-
-func (t *APIToken) Populate(jws string, keySet *keyset.Public) error {
-	sig, err := jose.ParseSigned(jws)
-	if err != nil {
-		return err
-	}
-
-	payload, err := keySet.Verify(sig)
-	if err != nil {
-		return err
-	}
-
-	err = json.Unmarshal(payload, t)
-	if err != nil {
-		return err
-	}
-
-	if t.Kind != tokenKind.APITokenKind {
-		return InvalidTokenKindError
-	}
-
-	return nil
-}
diff --git a/pkg/token/identity/identity.go b/pkg/token/identity/identity.go
deleted file mode 100644
index f1840ca..0000000
--- a/pkg/token/identity/identity.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package identity
-
-import "github.com/loopholelabs/auth/pkg/token"
-
-type FederatedIDClaims struct {
-	ConnectorID string `json:"connector_id,omitempty"`
-	UserID      string `json:"user_id,omitempty"`
-}
-
-type TokenResponse struct {
-	AccessToken  string `json:"access_token"`
-	TokenType    string `json:"token_type"`
-	ExpiresIn    int    `json:"expires_in"`
-	RefreshToken string `json:"refresh_token,omitempty"`
-	IDToken      string `json:"id_token"`
-}
-
-type IDClaims struct {
-	AuthorizingParty  string             `json:"azp,omitempty"`
-	Nonce             string             `json:"nonce,omitempty"`
-	AccessTokenHash   string             `json:"at_hash,omitempty"`
-	CodeHash          string             `json:"c_hash,omitempty"`
-	Email             string             `json:"email,omitempty"`
-	EmailVerified     bool               `json:"email_verified,omitempty"`
-	Groups            []string           `json:"groups,omitempty"`
-	Name              string             `json:"name,omitempty"`
-	PreferredUsername string             `json:"preferred_username,omitempty"`
-	FederatedIDClaims *FederatedIDClaims `json:"federated_claims,omitempty"`
-}
-
-type IDToken struct {
-	token.BaseClaims
-	IDClaims
-}
diff --git a/pkg/token/refresh.go b/pkg/token/refresh.go
deleted file mode 100644
index d5c0fb0..0000000
--- a/pkg/token/refresh.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"gopkg.in/square/go-jose.v2"
-	"time"
-)
-
-type RefreshClaims struct {
-	ID  string         `json:"id"`
-	For tokenKind.Kind `json:"for"`
-}
-
-func (c *RefreshClaims) Valid() bool {
-	return len(c.ID) == 36
-}
-
-type RefreshToken struct {
-	BaseClaims
-	RefreshClaims
-}
-
-func NewRefreshTokenForAPIKey(issuer string, apiKey *APIKey, audience Audience) *RefreshToken {
-	return &RefreshToken{
-		BaseClaims: BaseClaims{
-			Issuer:   issuer,
-			Subject:  apiKey.User,
-			Audience: audience,
-			Expiry:   Time(time.Now().Add(time.Hour * 24 * 7)),
-			IssuedAt: Time(time.Now()),
-			Kind:     tokenKind.RefreshTokenKind,
-		},
-		RefreshClaims: RefreshClaims{
-			ID:  apiKey.ID,
-			For: tokenKind.APITokenKind,
-		},
-	}
-}
-
-func NewRefreshTokenForServiceKey(issuer string, serviceKey *ServiceKey, audience Audience) *RefreshToken {
-	return &RefreshToken{
-		BaseClaims: BaseClaims{
-			Issuer:   issuer,
-			Subject:  serviceKey.User,
-			Audience: audience,
-			Expiry:   Time(time.Now().Add(time.Hour * 24 * 7)),
-			IssuedAt: Time(time.Now()),
-			Kind:     tokenKind.RefreshTokenKind,
-		},
-		RefreshClaims: RefreshClaims{
-			ID:  serviceKey.ID,
-			For: tokenKind.ServiceTokenKind,
-		},
-	}
-}
-
-func (t *RefreshToken) Payload() ([]byte, error) {
-	return json.Marshal(t)
-}
-
-func (t *RefreshToken) Sign(keySet *keyset.Private, alg jose.SignatureAlgorithm) (string, error) {
-	payload, err := t.Payload()
-	if err != nil {
-		return "", err
-	}
-
-	return keySet.Sign(alg, payload)
-}
-
-func (t *RefreshToken) Populate(jws string, keySet *keyset.Public) error {
-	sig, err := jose.ParseSigned(jws)
-	if err != nil {
-		return err
-	}
-
-	payload, err := keySet.Verify(sig)
-	if err != nil {
-		return err
-	}
-
-	err = json.Unmarshal(payload, t)
-	if err != nil {
-		return err
-	}
-
-	if t.Kind != tokenKind.RefreshTokenKind {
-		return InvalidTokenKindError
-	}
-
-	return nil
-}
diff --git a/pkg/token/service.go b/pkg/token/service.go
deleted file mode 100644
index ea4afb7..0000000
--- a/pkg/token/service.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"github.com/google/uuid"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"github.com/loopholelabs/auth/pkg/utils"
-	"gopkg.in/square/go-jose.v2"
-	"time"
-)
-
-// ServiceKey should be of the form "S.<ID>.<SECRET>" - when the secret is stored in the database, it should be hashed
-// to a byte slice.
-//
-// The 'S' prefix is used to differentiate between API keys and Service Keys.
-type ServiceKey struct {
-	Created  int64
-	Name     string
-	ID       string
-	Secret   []byte
-	User     string
-	Resource string
-	NumUsed  int64
-	MaxUses  int64
-	Expires  int64
-}
-
-func NewServiceKey(name string, user string, resource string, maxUses int64, expires int64) (*ServiceKey, string, error) {
-	id := uuid.New().String()
-	secret := uuid.New().String()
-	encoded := Encode("S", id, secret)
-	hashedSecret, err := Hash(secret)
-	if err != nil {
-		return nil, "", err
-	}
-	return &ServiceKey{
-		Created:  utils.TimeToInt64(time.Now()),
-		Name:     name,
-		ID:       id,
-		Secret:   hashedSecret,
-		User:     user,
-		Resource: resource,
-		MaxUses:  maxUses,
-		Expires:  expires,
-	}, encoded, nil
-}
-
-type ServiceClaims struct {
-	ID       string `json:"id"`
-	Name     string `json:"name"`
-	Resource string `json:"resource"`
-}
-
-func (c *ServiceClaims) Valid() bool {
-	return len(c.ID) == 36
-}
-
-type ServiceToken struct {
-	BaseClaims
-	ServiceClaims
-}
-
-func NewServiceToken(issuer string, serviceKey *ServiceKey, audience Audience) *ServiceToken {
-	return &ServiceToken{
-		BaseClaims: BaseClaims{
-			Issuer:   issuer,
-			Subject:  serviceKey.User,
-			Audience: audience,
-			Expiry:   Time(time.Now().Add(time.Minute * 5)),
-			IssuedAt: Time(time.Now()),
-			Kind:     tokenKind.ServiceTokenKind,
-		},
-		ServiceClaims: ServiceClaims{
-			ID:       serviceKey.ID,
-			Name:     serviceKey.Name,
-			Resource: serviceKey.Resource,
-		},
-	}
-}
-
-func (t *ServiceToken) Payload() ([]byte, error) {
-	return json.Marshal(t)
-}
-
-func (t *ServiceToken) Sign(keySet *keyset.Private, alg jose.SignatureAlgorithm) (string, error) {
-	payload, err := t.Payload()
-	if err != nil {
-		return "", err
-	}
-
-	return keySet.Sign(alg, payload)
-}
-
-func (t *ServiceToken) Populate(jws string, keySet *keyset.Public) error {
-	sig, err := jose.ParseSigned(jws)
-	if err != nil {
-		return err
-	}
-
-	payload, err := keySet.Verify(sig)
-	if err != nil {
-		return err
-	}
-
-	err = json.Unmarshal(payload, t)
-	if err != nil {
-		return err
-	}
-
-	if t.Kind != tokenKind.ServiceTokenKind {
-		return InvalidTokenKindError
-	}
-
-	return nil
-}
diff --git a/pkg/token/token.go b/pkg/token/token.go
deleted file mode 100644
index 40fd206..0000000
--- a/pkg/token/token.go
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"github.com/loopholelabs/auth/pkg/keyset"
-	"github.com/loopholelabs/auth/pkg/token/tokenKind"
-	"golang.org/x/crypto/bcrypt"
-	"gopkg.in/square/go-jose.v2"
-	"strings"
-	"time"
-)
-
-var (
-	InvalidTokenKindError = errors.New("invalid token kind")
-	MalformedTokenError   = errors.New("encoded token is malformed")
-)
-
-var (
-	Separator = "."
-)
-
-func Hash(secret string) ([]byte, error) {
-	return bcrypt.GenerateFromPassword([]byte(secret), bcrypt.DefaultCost)
-}
-
-func Encode(kind string, identifier string, secret string) string {
-	return fmt.Sprintf("%s%s%s%s%s", kind, Separator, identifier, Separator, secret)
-}
-
-func Decode(encoded string) (string, string, string, error) {
-	separated := strings.Split(encoded, Separator)
-	if len(separated) != 3 {
-		return "", "", "", MalformedTokenError
-	}
-
-	return separated[0], separated[1], separated[2], nil
-}
-
-func Verify(secret string, hash []byte) bool {
-	return bcrypt.CompareHashAndPassword(hash, []byte(secret)) == nil
-}
-
-type BaseClaims struct {
-	Issuer   string         `json:"iss"`
-	Subject  string         `json:"sub"`
-	Audience Audience       `json:"aud"`
-	Expiry   Time           `json:"exp"`
-	IssuedAt Time           `json:"iat"`
-	Kind     tokenKind.Kind `json:"kind"`
-}
-
-type OAuthTime struct {
-	Expiry   int64 `json:"exp"`
-	IssuedAt int64 `json:"iat"`
-}
-
-type PartialToken BaseClaims
-
-func PartialPopulate(keySet keyset.Verifier, token string) (*PartialToken, []byte, error) {
-	sig, err := jose.ParseSigned(token)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	payload, err := keySet.Verify(sig)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	partialToken := new(PartialToken)
-	err = json.Unmarshal(payload, partialToken)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	switch partialToken.Kind {
-	case tokenKind.OAuthKind:
-		oauthTime := new(OAuthTime)
-		err = json.Unmarshal(payload, oauthTime)
-		if err != nil {
-			return nil, nil, err
-		}
-		partialToken.Expiry = Time(time.Unix(oauthTime.Expiry, 0))
-		partialToken.IssuedAt = Time(time.Unix(oauthTime.IssuedAt, 0))
-		return partialToken, payload, nil
-	case tokenKind.APITokenKind, tokenKind.ServiceTokenKind:
-		return partialToken, payload, nil
-	}
-
-	return nil, nil, InvalidTokenKindError
-}
-
-func UnsafePartialPopulate(token string) (*PartialToken, []byte, error) {
-	sig, err := jose.ParseSigned(token)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	payload := sig.UnsafePayloadWithoutVerification()
-	partialToken := new(PartialToken)
-	return partialToken, payload, json.Unmarshal(payload, partialToken)
-}
-
-func (p *PartialToken) ValidExpiry() bool {
-	return time.Time(p.Expiry).After(time.Now())
-}
-
-func (p *PartialToken) ValidKind(kind tokenKind.Kind) bool {
-	return p.Kind == kind
-}
-
-func (p *PartialToken) ValidIssuer(issuer string) bool {
-	return p.Issuer == issuer
-}
-
-func (p *PartialToken) ValidAudience(audience string) bool {
-	for _, a := range p.Audience {
-		if a == audience {
-			return true
-		}
-	}
-	return false
-}
-
-func (p *PartialToken) ValidSubject(subject string) bool {
-	return p.Subject == subject
-}
diff --git a/pkg/token/types.go b/pkg/token/types.go
deleted file mode 100644
index 5ccf8d9..0000000
--- a/pkg/token/types.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"github.com/loopholelabs/auth/pkg/utils"
-	"time"
-)
-
-var _ json.Marshaler = (*Time)(nil)
-var _ json.Marshaler = Time{}
-var _ json.Unmarshaler = (*Time)(nil)
-
-var _ json.Marshaler = (*Audience)(nil)
-var _ json.Marshaler = Audience{}
-var _ json.Unmarshaler = (*Audience)(nil)
-
-type Audience []string
-
-func (a Audience) MarshalJSON() ([]byte, error) {
-	if len(a) == 1 {
-		return json.Marshal((a)[0])
-	}
-	return json.Marshal([]string(a))
-}
-
-func (a *Audience) UnmarshalJSON(bytes []byte) error {
-	var s string
-	if json.Unmarshal(bytes, &s) == nil {
-		*a = Audience{s}
-		return nil
-	}
-	var auds []string
-	if err := json.Unmarshal(bytes, &auds); err != nil {
-		return err
-	}
-	*a = auds
-	return nil
-}
-
-type Time time.Time
-
-func (t Time) MarshalJSON() ([]byte, error) {
-	return json.Marshal(utils.TimeToInt64(time.Time(t)))
-}
-
-func (t *Time) UnmarshalJSON(b []byte) error {
-	var n json.Number
-	if err := json.Unmarshal(b, &n); err != nil {
-		return err
-	}
-	var unix int64
-
-	if t, err := n.Int64(); err == nil {
-		unix = t
-	} else {
-		f, err := n.Float64()
-		if err != nil {
-			return err
-		}
-		unix = int64(f)
-	}
-	*t = Time(utils.Int64ToTime(unix))
-	return nil
-}
diff --git a/pkg/token/types_test.go b/pkg/token/types_test.go
deleted file mode 100644
index f01b158..0000000
--- a/pkg/token/types_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
-	Copyright 2022 Loophole Labs
-
-	Licensed under the Apache License, Version 2.0 (the "License");
-	you may not use this file except in compliance with the License.
-	You may obtain a copy of the License at
-
-		   http://www.apache.org/licenses/LICENSE-2.0
-
-	Unless required by applicable law or agreed to in writing, software
-	distributed under the License is distributed on an "AS IS" BASIS,
-	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	See the License for the specific language governing permissions and
-	limitations under the License.
-*/
-
-package token
-
-import (
-	"encoding/json"
-	"github.com/stretchr/testify/assert"
-	"testing"
-	"time"
-)
-
-func TestAudience(t *testing.T) {
-	a := Audience{"a", "b"}
-	sa := Audience{"single"}
-
-	b, err := json.Marshal(a)
-	assert.NoError(t, err)
-
-	bt, err := a.MarshalJSON()
-	assert.NoError(t, err)
-	assert.Equal(t, b, bt)
-
-	sb, err := json.Marshal(sa)
-	assert.NoError(t, err)
-
-	sbt, err := sa.MarshalJSON()
-	assert.NoError(t, err)
-	assert.Equal(t, sb, sbt)
-
-	var ta Audience
-	err = json.Unmarshal(b, &ta)
-	assert.NoError(t, err)
-	assert.Equal(t, a, ta)
-
-	var tsa Audience
-	err = json.Unmarshal(sb, &tsa)
-	assert.NoError(t, err)
-	assert.Equal(t, sa, tsa)
-}
-
-func TestTime(t *testing.T) {
-	ti := Time(time.UnixMilli(time.Now().UnixMilli()))
-	bti, err := ti.MarshalJSON()
-	assert.NoError(t, err)
-
-	var tt Time
-	err = tt.UnmarshalJSON(bti)
-	assert.NoError(t, err)
-	assert.True(t, time.Time(tt).Equal(time.Time(ti)))
-
-	bti, err = json.Marshal(ti)
-	assert.NoError(t, err)
-
-	err = json.Unmarshal(bti, &tt)
-	assert.NoError(t, err)
-	assert.True(t, time.Time(tt).Equal(time.Time(ti)))
-}
diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go
index b707960..4e730c6 100644
--- a/pkg/utils/utils.go
+++ b/pkg/utils/utils.go
@@ -1,5 +1,5 @@
 /*
-	Copyright 2022 Loophole Labs
+	Copyright 2023 Loophole Labs
 
 	Licensed under the Apache License, Version 2.0 (the "License");
 	you may not use this file except in compliance with the License.
@@ -16,18 +16,48 @@
 
 package utils
 
-import "time"
-
-// Int64ToTime converts an int64 to a time.Time in a standardized way
-func Int64ToTime(i int64) time.Time {
-	return time.UnixMilli(i).UTC()
+import (
+	"crypto/rand"
+	"encoding/json"
+	"github.com/gofiber/fiber/v2"
+	"math/big"
+	"time"
+	"unsafe"
+)
+
+const (
+	letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+var (
+	maxLetterBytes = big.NewInt(int64(len(letterBytes)))
+)
+
+// RandomBytes generates a random byte slice of length n
+func RandomBytes(n int) []byte {
+	b := make([]byte, n)
+	for i := 0; i < n; i++ {
+		num, _ := rand.Int(rand.Reader, maxLetterBytes)
+		b[i] = letterBytes[num.Int64()]
+	}
+
+	return b
 }
 
-// TimeToInt64 converts a time.Time to an int64 in a standardized way
-func TimeToInt64(t time.Time) int64 {
-	return t.UTC().UnixMilli()
+// RandomString generates a random string of length n
+func RandomString(n int) string {
+	b := RandomBytes(n)
+	return *(*string)(unsafe.Pointer(&b))
 }
 
-func TimeInt64Now() int64 {
-	return TimeToInt64(time.Now())
+// DefaultFiberApp returns a new fiber app with sensible defaults
+func DefaultFiberApp() *fiber.App {
+	return fiber.New(fiber.Config{
+		DisableStartupMessage: true,
+		ReadTimeout:           time.Second * 10,
+		WriteTimeout:          time.Second * 10,
+		IdleTimeout:           time.Second * 10,
+		JSONEncoder:           json.Marshal,
+		JSONDecoder:           json.Unmarshal,
+	})
 }