diff --git a/MIGRATION.md b/MIGRATION.md index 2e1ffb4..b69df1c 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,310 +1,458 @@ # Migration Guide: v0.3.0 to v2.0 -This guide helps you migrate from go-paging v0.3.0 to paging-go v2.0. +This guide helps you migrate from paging-go v0.3.0 to v2.0. ## Overview -v2.0 combines two major improvements: +v2.0 introduces major changes to the API: -1. **Modular architecture** - Separate packages for offset, cursor, and quota-fill pagination -2. **Repository rename** - Aligned with organizational naming standards: `go-paging` → `paging-go/v2` -3. **API consistency** - Type rename: `OrderBy` → `Sort` -4. **Schema pattern** - Ensures encoder/sort order consistency for cursor and quota-fill +1. **Modular Package Structure** - Strategies moved to separate packages (`offset/`, `cursor/`, `quotafill/`) +2. **Fetcher Pattern** - All strategies now take `Fetcher[T]` as their first parameter +3. **Unified Paginator Interface** - All strategies implement the same `Paginator[T].Paginate()` method +4. **Functional Options** - Page size limits moved from constructors to per-request options +5. **Simplified BuildConnection** - Connection builders work with `*Page[T]` result type +6. **Consistent Metadata** - All strategies return rich metadata for observability ## Breaking Changes Summary | Change | Old (v0.3.0) | New (v2.0) | -|--------|-------------|-----------| -| **Module path** | `github.com/nrfta/go-paging` | `github.com/nrfta/paging-go/v2` | -| **Constructor** | `paging.NewOffsetPaginator()` | `offset.New()` | -| **Type** | `paging.OffsetPaginator` | `offset.Paginator` | -| **Sort type** | `paging.OrderBy` | `paging.Sort` | -| **Cursor encoding** | `paging.EncodeOffsetCursor()` | `offset.EncodeCursor()` | +|--------|-----------|-----------| +| **Package imports** | `"github.com/nrfta/paging-go"` | `"github.com/nrfta/paging-go/v2/offset"` | +| **Constructor** | `paging.NewOffsetPaginator(pageArgs, totalCount)` | `offset.New(fetcher)` | +| **Page size config** | Constructor parameter | `Paginate()` options | +| **Pagination method** | Manual count + fetch + `QueryMods()` | `paginator.Paginate(ctx, page, opts...)` | +| **Result type** | Raw items + separate PageInfo | `*Page[T]` with Nodes, PageInfo, Metadata | +| **BuildConnection** | Manual edge/node array building | `offset.BuildConnection(result, transform)` | +| **Cursor strategy** | `paging.NewCursorPaginator(...)` | `cursor.New(fetcher, schema)` | +| **Quota-fill** | Not available | `quotafill.New(fetcher, filter, schema, opts...)` | ## Quick Summary **What you need to change:** -1. Update imports: `"github.com/nrfta/go-paging"` → `"github.com/nrfta/paging-go/v2/offset"` -2. Change constructor: `paging.NewOffsetPaginator(...)` → `offset.New(...)` -3. Change type: `paging.OffsetPaginator` → `offset.Paginator` -4. Rename sort type: `paging.OrderBy` → `paging.Sort` -5. **New (Recommended):** Use `offset.BuildConnection()` to eliminate 60-80% of boilerplate +1. Create a `Fetcher[T]` using `sqlboiler.NewFetcher()` with query and count functions +2. Pass fetcher to strategy constructor: `offset.New(fetcher)` instead of `offset.New(pageArgs, totalCount)` +3. Move page size config from constructor to `Paginate()` options: `paging.WithMaxSize(100)` +4. Replace manual fetch + `QueryMods()` with `paginator.Paginate(ctx, page, opts...)` +5. Update `BuildConnection()` calls to pass `*Page[T]` result instead of raw items **What stays the same:** - `paging.PageArgs` usage -- `paging.PageInfo` type -- `QueryMods()` method -- SQLBoiler integration +- `paging.PageInfo` type and accessors +- `paging.Connection[T]` and `Edge[T]` types +- Transform functions signature +- SQLBoiler integration pattern **What you gain:** -- ✨ **60-80% less boilerplate** with `offset.BuildConnection()` -- ✨ Generic `Connection[T]` and `Edge[T]` types -- ✨ Type-safe transformations with automatic error handling -- ✨ Modular architecture with cursor and quota-fill pagination support -- ✨ **Automatic N+1 pattern** - No more manual `limit + 1` with `cursor.BuildFetchParams()` +- ✨ **Consistent API** across all three pagination strategies +- ✨ **Reusable fetchers** - define once, use across multiple requests +- ✨ **Per-request page size limits** via functional options +- ✨ **Rich metadata** for observability (timing, strategy name, strategy-specific info) +- ✨ **Easier testing** with mockable `Fetcher[T]` interface +- ✨ **Simpler strategy switching** - change three lines of code to switch strategies -## Overview +## Core Concept: The Fetcher Pattern -The library has been refactored to use a modular package structure: +The biggest change in v2.0 is the introduction of the `Fetcher[T]` interface. Instead of passing `PageArgs` and `totalCount` to constructors, you now create a reusable fetcher that handles database queries. -- **`offset/`** package: Offset-based pagination with cursor encoding -- **`cursor/`** package: Cursor-based (keyset) pagination -- **`quotafill/`** package: Filter-aware iterative fetching -- **`sqlboiler/`** package: SQLBoiler ORM adapter (generic + strategy-specific) -- **Root package**: Shared types (`PageArgs`, `PageInfo`, `Connection[T]`, `Edge[T]`) +**Benefits:** +- **Reusable**: Define once, use across multiple requests with different page sizes +- **ORM-agnostic**: Works with SQLBoiler, GORM, sqlc, or raw SQL +- **Strategy-agnostic**: Same fetcher works for offset, cursor, and quota-fill +- **Testable**: Easy to mock for unit tests -## Breaking Changes +**The Fetcher interface:** -### Removed: `paging.NewOffsetPaginator()` +```go +type Fetcher[T any] interface { + Fetch(ctx context.Context, params FetchParams) ([]T, error) + Count(ctx context.Context, params FetchParams) (int64, error) +} +``` -The `NewOffsetPaginator()` function and `OffsetPaginator` type have been removed from the root package. +## Breaking Changes -### Removed: `paging.EncodeOffsetCursor()` / `paging.DecodeOffsetCursor()` +### Constructor signatures changed -Cursor functions moved to `offset` package (see [Cursor Functions](#cursor-functions)). +All strategy constructors now take `Fetcher[T]` as their first parameter: -## Migration Steps +- **Offset**: `offset.New(fetcher)` +- **Cursor**: `cursor.New(fetcher, schema)` +- **Quota-fill**: `quotafill.New(fetcher, filter, schema, opts...)` -### 1. Update Your Imports +### Page size limits moved to Paginate() options -Add the offset package import: +Constructor no longer accepts default limit parameter. Use functional options instead: ```go -import ( - "github.com/nrfta/paging-go/v2" - "github.com/nrfta/paging-go/v2/offset" // Add this +result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(100), // Cap at 100 items + paging.WithDefaultSize(25), // Default to 25 when First is nil ) ``` -### 2. Update Paginator Creation +### Paginate() method returns *Page[T] -**Before:** +Instead of manually fetching data and building connections, call `Paginate()`: ```go -paginator := paging.NewOffsetPaginator(pageArgs, totalCount) +// Returns *Page[T] with Nodes, PageInfo, and Metadata populated +result, err := paginator.Paginate(ctx, page, opts...) ``` -**After:** +### BuildConnection() takes *Page[T] instead of raw items + +Connection builders now work with the `*Page[T]` result: ```go -paginator := offset.New(pageArgs, totalCount) +// Old: pass paginator + raw items +return offset.BuildConnection(paginator, dbUsers, transform) + +// New: pass result from Paginate() +return offset.BuildConnection(result, transform) ``` -**With custom default limit:** +## Migration Steps + +### 1. Create a Fetcher + +First, create a `Fetcher[T]` using `sqlboiler.NewFetcher()`: ```go -// Before -defaultLimit := 100 -paginator := paging.NewOffsetPaginator(pageArgs, totalCount, &defaultLimit) +import ( + "github.com/nrfta/paging-go/v2/sqlboiler" + "github.com/volatiletech/sqlboiler/v4/queries/qm" +) -// After -defaultLimit := 100 -paginator := offset.New(pageArgs, totalCount, &defaultLimit) +fetcher := sqlboiler.NewFetcher( + // Query function: returns slice of items + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, r.DB) + }, + // Count function: returns total count + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return models.Users(mods...).Count(ctx, r.DB) + }, + // Converter function: transforms FetchParams to query mods + sqlboiler.OffsetToQueryMods, // For offset pagination + // OR: sqlboiler.CursorToQueryMods for cursor/quota-fill +) ``` -### 3. Update Type References +**Key points:** +- Query function includes filters, joins, etc. but NOT limit/offset/order (handled by converter) +- Count function uses same filters as query function +- Choose converter based on pagination strategy: `OffsetToQueryMods` or `CursorToQueryMods` -**Before:** +### 2. Update Constructor Calls + +Pass the fetcher to strategy constructors instead of PageArgs and count: + +**Offset pagination:** ```go -var paginator paging.OffsetPaginator +// Old v0.3.0 +totalCount, _ := models.Users().Count(ctx, r.DB) +paginator := offset.New(page, totalCount) + +// New v2.0 +fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.OffsetToQueryMods) +paginator := offset.New(fetcher) ``` -**After:** +**Cursor pagination:** + +```go +// Old v0.3.0 +fetchParams, _ := cursor.BuildFetchParams(page, schema) +users, _ := fetcher.Fetch(ctx, fetchParams) +paginator, _ := cursor.New(page, schema, users) + +// New v2.0 +fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.CursorToQueryMods) +paginator := cursor.New(fetcher, schema) +``` + +**Quota-fill pagination:** ```go -var paginator offset.Paginator +// Old v0.3.0 +paginator := quotafill.New(fetcher, filter, schema, opts...) +result, _ := paginator.Paginate(ctx, page) + +// New v2.0 (same, but fetcher creation is now explicit) +fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.CursorToQueryMods) +paginator := quotafill.New(fetcher, filter, schema, opts...) +result, _ := paginator.Paginate(ctx, page, paging.WithMaxSize(50)) ``` -### 4. Use BuildConnection (Recommended!) +### 3. Replace Manual Fetch with Paginate() -This is the **biggest improvement** in v1.0. Instead of manually building edges and nodes, use `offset.BuildConnection()`: +Instead of calling `QueryMods()` and manually fetching, call `Paginate()`: -**Before (Manual Boilerplate - 15+ lines):** +**Offset example:** ```go -func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*UserConnection, error) { - totalCount, _ := models.Users().Count(ctx, r.DB) - paginator := paging.NewOffsetPaginator(page, totalCount) - - dbUsers, _ := models.Users(paginator.QueryMods()...).All(ctx, r.DB) - - // Manual boilerplate - result := &UserConnection{PageInfo: &paginator.PageInfo} - for i, row := range dbUsers { - user, err := toDomainUser(row) - if err != nil { - return nil, err - } - result.Edges = append(result.Edges, &UserEdge{ - Cursor: *paging.EncodeOffsetCursor(paginator.Offset + i + 1), - Node: user, - }) - result.Nodes = append(result.Nodes, user) - } - return result, nil +// Old v0.3.0 +paginator := offset.New(page, totalCount) +dbUsers, err := models.Users(paginator.QueryMods()...).All(ctx, r.DB) +if err != nil { + return nil, err +} +return offset.BuildConnection(paginator, dbUsers, toDomainUser) + +// New v2.0 +paginator := offset.New(fetcher) +result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(100), + paging.WithDefaultSize(25), +) +if err != nil { + return nil, err } +return offset.BuildConnection(result, toDomainUser) ``` -**After (BuildConnection - 1 line!):** +**Cursor example:** ```go -func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { - totalCount, _ := models.Users().Count(ctx, r.DB) - paginator := offset.New(page, totalCount) +// Old v0.3.0 +fetchParams, _ := cursor.BuildFetchParams(page, schema) +users, _ := fetcher.Fetch(ctx, fetchParams) +paginator, _ := cursor.New(page, schema, users) +return cursor.BuildConnection(paginator, users, toDomainUser) + +// New v2.0 +paginator := cursor.New(fetcher, schema) +result, err := paginator.Paginate(ctx, page, paging.WithMaxSize(100)) +if err != nil { + return nil, err +} +return cursor.BuildConnection(result, schema, page, toDomainUser) +``` - dbUsers, _ := models.Users(paginator.QueryMods()...).All(ctx, r.DB) +### 4. Update BuildConnection() Calls - // One line - library handles everything! - return offset.BuildConnection(paginator, dbUsers, toDomainUser) -} +All `BuildConnection()` functions now take `*Page[T]` as first parameter: -// Transform function (database model → domain model) -func toDomainUser(db *models.User) (*User, error) { - return &User{ - ID: db.ID, - Name: db.Name, - Email: db.Email, - }, nil -} +**Offset:** + +```go +// Old: pass paginator + items +offset.BuildConnection(paginator, items, transform) + +// New: pass result from Paginate() +offset.BuildConnection(result, transform) ``` -**Benefits:** +**Cursor:** -- ✅ 60-80% less code -- ✅ No manual cursor encoding -- ✅ Automatic error handling -- ✅ Type-safe transformations -- ✅ Works with both `edges` and `nodes` fields +```go +// Old: pass paginator + items +cursor.BuildConnection(paginator, items, transform) -### 5. PageInfo Access (No Changes Required!) +// New: pass result + schema + page +cursor.BuildConnection(result, schema, page, transform) +``` -**Good news:** PageInfo usage is identical! The `offset.Paginator.PageInfo` field is `paging.PageInfo` - no conversion needed. +**Quota-fill:** ```go -// Works exactly the same as before -pageInfo := paginator.PageInfo +// Old: manual loop with edges +edges := make([]*paging.Edge[*Org], len(result.Nodes)) +for i, org := range result.Nodes { + domain, _ := toDomain(org) + cursorStr, _ := schema.Encode(org) + edges[i] = &paging.Edge[*Org]{Cursor: *cursorStr, Node: domain} +} +return &paging.Connection[*Org]{Edges: edges, Nodes: nodes, PageInfo: result.PageInfo}, nil + +// New: use BuildConnection helper +quotafill.BuildConnection(result, schema, page, toDomain) +``` -// Or use the helper method -pageInfo := paginator.GetPageInfo() +### 5. Move Page Size Config to Options -// Both return paging.PageInfo directly +Page size limits now passed via functional options to `Paginate()`: + +```go +// Old v0.3.0 (constructor parameter) +defaultLimit := 25 +paginator := offset.New(page, totalCount, &defaultLimit) + +// New v2.0 (Paginate options) +paginator := offset.New(fetcher) +result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(100), // Maximum allowed size + paging.WithDefaultSize(25), // Default when First is nil +) ``` -## Complete Example +**Benefits:** +- Configure page size per-request without creating new paginator +- Compose multiple options together +- Easier to add new options in the future + +## Complete Example: Offset Pagination -### Before (Old API - Manual Boilerplate) +### Before (v0.3.0) ```go -package main +package resolvers import ( "context" - "database/sql" - "github.com/nrfta/paging-go/v2" + "github.com/nrfta/paging-go/v2/offset" "github.com/my-user/my-app/models" ) -type UserConnection struct { - Edges []*UserEdge - PageInfo *paging.PageInfo -} - -type UserEdge struct { - Cursor string - Node *models.User -} - -func GetUsers(ctx context.Context, pageArgs *paging.PageArgs, db *sql.DB) (*UserConnection, error) { +func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { // Get total count - totalCount, err := models.Users().Count(ctx, db) + totalCount, err := models.Users().Count(ctx, r.DB) if err != nil { return nil, err } // Create paginator - paginator := paging.NewOffsetPaginator(pageArgs, totalCount) + paginator := offset.New(page, totalCount) // Fetch records - dbUsers, err := models.Users(paginator.QueryMods()...).All(ctx, db) + dbUsers, err := models.Users(paginator.QueryMods()...).All(ctx, r.DB) if err != nil { return nil, err } - // Manual boilerplate - 15+ lines - result := &UserConnection{PageInfo: &paginator.PageInfo} - for i, row := range dbUsers { - result.Edges = append(result.Edges, &UserEdge{ - Cursor: *paging.EncodeOffsetCursor(paginator.Offset + i + 1), - Node: row, - }) - } + // Build connection + return offset.BuildConnection(paginator, dbUsers, toDomainUser) +} - return result, nil +func toDomainUser(db *models.User) (*User, error) { + return &User{ID: db.ID, Name: db.Name, Email: db.Email}, nil } ``` -### After (New API - BuildConnection) +### After (v2.0) ```go -package main +package resolvers import ( "context" - "database/sql" - "github.com/nrfta/paging-go/v2" "github.com/nrfta/paging-go/v2/offset" + "github.com/nrfta/paging-go/v2/sqlboiler" "github.com/my-user/my-app/models" + "github.com/volatiletech/sqlboiler/v4/queries/qm" ) -func GetUsers(ctx context.Context, pageArgs *paging.PageArgs, db *sql.DB) (*paging.Connection[*models.User], error) { - // Get total count - totalCount, err := models.Users().Count(ctx, db) +func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { + // 1. Create fetcher (once, reusable) + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, r.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return models.Users(mods...).Count(ctx, r.DB) + }, + sqlboiler.OffsetToQueryMods, + ) + + // 2. Create paginator (once, reusable) + paginator := offset.New(fetcher) + + // 3. Paginate with per-request options + result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(100), + paging.WithDefaultSize(25), + ) if err != nil { return nil, err } - // Create paginator - paginator := offset.New(pageArgs, totalCount) + // 4. Build connection + return offset.BuildConnection(result, toDomainUser) +} - // Fetch records - dbUsers, err := models.Users(paginator.QueryMods()...).All(ctx, db) +func toDomainUser(db *models.User) (*User, error) { + return &User{ID: db.ID, Name: db.Name, Email: db.Email}, nil +} +``` + +## Complete Example: Cursor Pagination + +### Before (v0.3.0) + +```go +func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { + schema := cursor.NewSchema[*models.User](). + Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). + FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) + + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + mods = append([]qm.QueryMod{qm.Where("is_active = ?", true)}, mods...) + return models.Users(mods...).All(ctx, r.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return 0, nil + }, + sqlboiler.CursorToQueryMods, + ) + + fetchParams, err := cursor.BuildFetchParams(page, schema) if err != nil { return nil, err } - // One line - automatic edge/node building with identity transform - return offset.BuildConnection(paginator, dbUsers, func(u *models.User) (*models.User, error) { - return u, nil // No transformation needed - }) + users, err := fetcher.Fetch(ctx, fetchParams) + if err != nil { + return nil, err + } + + paginator, err := cursor.New(page, schema, users) + if err != nil { + return nil, err + } + + return cursor.BuildConnection(paginator, users, toDomainUser) } ``` -**With transformation (database model → domain model):** +### After (v2.0) ```go -type DomainUser struct { - ID string - FullName string -} +func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { + // 1. Define schema (once, reusable) + schema := cursor.NewSchema[*models.User](). + Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). + FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) + + // 2. Create fetcher (once, reusable) + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + mods = append([]qm.QueryMod{qm.Where("is_active = ?", true)}, mods...) + return models.Users(mods...).All(ctx, r.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return 0, nil + }, + sqlboiler.CursorToQueryMods, + ) + + // 3. Create paginator (once, reusable) + paginator := cursor.New(fetcher, schema) + + // 4. Paginate + result, err := paginator.Paginate(ctx, page, paging.WithMaxSize(100)) + if err != nil { + return nil, err + } -func GetUsers(ctx context.Context, pageArgs *paging.PageArgs, db *sql.DB) (*paging.Connection[*DomainUser], error) { - totalCount, _ := models.Users().Count(ctx, db) - paginator := offset.New(pageArgs, totalCount) - dbUsers, _ := models.Users(paginator.QueryMods()...).All(ctx, db) - - // Automatic transformation with error handling - return offset.BuildConnection(paginator, dbUsers, func(db *models.User) (*DomainUser, error) { - return &DomainUser{ - ID: db.ID, - FullName: db.Name, - }, nil - }) + // 5. Build connection + return cursor.BuildConnection(result, schema, page, toDomainUser) } ``` @@ -321,7 +469,7 @@ These parts of the API remain unchanged: Cursor encoding/decoding functions have moved to the offset package and been renamed: -| Old (v0.3.0) | New (v1.0) | +| Old (v0.3.0) | New (v2.0) | |--------------|------------| | `paging.EncodeOffsetCursor()` | `offset.EncodeCursor()` | | `paging.DecodeOffsetCursor()` | `offset.DecodeCursor()` | @@ -344,7 +492,7 @@ offsetValue := offset.DecodeCursor(cursor) ## New: Automatic N+1 Pattern for Cursor Pagination -v1.0 introduces `cursor.BuildFetchParams()` which automatically handles the N+1 pattern, eliminating the need to manually add +1 to your limit. +v2.0's unified `Paginate()` method automatically handles the N+1 pattern for all strategies, eliminating the need to manually add +1 to your limit. **Before (Manual N+1):** @@ -390,90 +538,118 @@ This makes cursor pagination consistent with offset and quota-fill pagination, w - Decodes the After cursor - Returns ready-to-use FetchParams -## Advanced: Generic Connection Types +## Reusability Benefits + +The new API encourages reusability at every level: -v1.0 introduces generic `Connection[T]` and `Edge[T]` types: +**Fetcher reusability:** ```go -// Built-in generic types -type Connection[T any] struct { - Edges []Edge[T] - Nodes []T - PageInfo PageInfo +// Define once (e.g., in a repository struct) +type UserRepository struct { + db *sql.DB + fetcher paging.Fetcher[*models.User] } -type Edge[T any] struct { - Cursor string - Node T +func NewUserRepository(db *sql.DB) *UserRepository { + return &UserRepository{ + db: db, + fetcher: sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, db) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return models.Users(mods...).Count(ctx, db) + }, + sqlboiler.OffsetToQueryMods, + ), + } } -``` - -**GraphQL Schema:** -```graphql -# Use these built-in types with gqlgen -type UserConnection { - edges: [UserEdge!]! - nodes: [User!]! - pageInfo: PageInfo! +// Use across multiple methods +func (r *UserRepository) ListWithOffset(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { + paginator := offset.New(r.fetcher) + result, _ := paginator.Paginate(ctx, page, paging.WithMaxSize(100)) + return offset.BuildConnection(result, toDomain) } -type UserEdge { - cursor: String! - node: User! +func (r *UserRepository) ListWithCursor(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { + // Same fetcher, different strategy! + paginator := cursor.New(r.fetcher, userSchema) + result, _ := paginator.Paginate(ctx, page, paging.WithMaxSize(100)) + return cursor.BuildConnection(result, userSchema, page, toDomain) } ``` -**gqlgen.yml:** - -```yaml -models: - UserConnection: - model: github.com/nrfta/paging-go/v2.Connection[github.com/my-user/my-app/domain.User] -``` +**Schema reusability (cursor/quota-fill):** -## Advanced: SQLBoiler Adapter (for library authors) - -The SQLBoiler adapter has been refactored for extensibility. **Most users don't need to change anything** - this only affects advanced use cases. - -**What changed:** +```go +// Define once at package level +var userSchema = cursor.NewSchema[*models.User](). + Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). + Field("name", "n", func(u *models.User) any { return u.Name }). + FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) -- Split into `fetcher.go` (generic ORM integration) + `offset.go` (strategy-specific queries) -- Enables future support for cursor pagination and other ORMs (GORM, sqlc, etc.) +// Reuse in both cursor and quota-fill paginators +``` -**If you were using internal SQLBoiler functions directly:** +**Per-request configuration:** ```go -// Before -mods := sqlboiler.ToQueryMods(params) +// Same paginator, different page sizes per endpoint +adminPaginator := offset.New(fetcher) + +// Admin endpoint: large pages +result, _ := adminPaginator.Paginate(ctx, page, paging.WithMaxSize(500)) -// After -mods := sqlboiler.OffsetToQueryMods(params) +// Public API endpoint: smaller pages +result, _ := adminPaginator.Paginate(ctx, page, paging.WithMaxSize(50)) ``` ## Why This Change? -The new modular architecture provides: +The unified API with Fetcher pattern provides significant benefits: -1. **Eliminates boilerplate**: `BuildConnection()` reduces resolver code by 60-80% -2. **Type-safe transformations**: Generic transform functions with automatic error handling -3. **Clearer separation of concerns**: Each pagination strategy in its own package -4. **Easier to extend**: New strategies (cursor-based, quota-fill) can be added without conflicts -5. **ORM flexibility**: Easy to add support for GORM, sqlc, or custom ORMs -6. **Better documentation**: Each package documented independently -7. **Production-ready**: Comprehensive tests, optimized code, clear patterns +1. **Consistent API**: All three strategies work the same way - learn once, use everywhere +2. **Better reusability**: Define fetchers, paginators, and schemas once, use across multiple requests +3. **Per-request configuration**: No need to create new paginators for different page sizes +4. **Easier testing**: Mock the `Fetcher[T]` interface for unit tests +5. **Simpler strategy switching**: Change three lines of code to switch from offset to cursor +6. **Rich metadata**: Observability for monitoring, debugging, and performance optimization +7. **ORM-agnostic**: Same pattern works with SQLBoiler, GORM, sqlc, or raw SQL +8. **Less boilerplate**: Single `Paginate()` call replaces manual fetch + trim logic ## Migration Checklist -- [ ] Update module import: `"github.com/nrfta/go-paging"` → `"github.com/nrfta/paging-go/v2"` -- [ ] Update subpackage imports: `"github.com/nrfta/go-paging/offset"` → `"github.com/nrfta/paging-go/v2/offset"` -- [ ] Replace `paging.NewOffsetPaginator()` with `offset.New()` -- [ ] Update type references: `paging.OffsetPaginator` → `offset.Paginator` -- [ ] Rename sort type: `paging.OrderBy` → `paging.Sort` -- [ ] Replace manual edge/node building with `offset.BuildConnection()` -- [ ] Update cursor functions (if used): `paging.EncodeOffsetCursor` → `offset.EncodeCursor` +### For Offset Pagination: +- [ ] Add `sqlboiler` package import +- [ ] Create `Fetcher[T]` using `sqlboiler.NewFetcher()` +- [ ] Update constructor: `offset.New(page, totalCount)` → `offset.New(fetcher)` +- [ ] Replace manual fetch with `paginator.Paginate(ctx, page, opts...)` +- [ ] Move page size config from constructor to `WithMaxSize()` / `WithDefaultSize()` options +- [ ] Update `BuildConnection()`: pass `result` instead of `paginator` + `items` +- [ ] Run tests to verify everything works + +### For Cursor Pagination: +- [ ] Create `Fetcher[T]` using `sqlboiler.NewFetcher()` with `CursorToQueryMods` +- [ ] Update constructor: `cursor.New(page, schema, items)` → `cursor.New(fetcher, schema)` +- [ ] Remove manual `BuildFetchParams()` and `fetcher.Fetch()` calls +- [ ] Replace with `paginator.Paginate(ctx, page, opts...)` +- [ ] Update `BuildConnection()`: pass `result, schema, page, transform` instead of `paginator, items, transform` - [ ] Run tests to verify everything works -- [ ] Enjoy 60-80% less boilerplate code! 🎉 + +### For Quota-fill Pagination: +- [ ] Create `Fetcher[T]` using `sqlboiler.NewFetcher()` with `CursorToQueryMods` +- [ ] Constructor already takes fetcher (no change needed) +- [ ] Add per-request options to `Paginate()` call +- [ ] Replace manual connection building loop with `quotafill.BuildConnection()` +- [ ] Run tests to verify everything works + +### General: +- [ ] Review metadata access patterns if needed +- [ ] Consider refactoring to reuse fetchers, paginators, and schemas +- [ ] Update documentation and comments to reflect new API +- [ ] Enjoy the cleaner, more consistent API! 🎉 ## Need Help? diff --git a/README.md b/README.md index 7a57839..6929de3 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,30 @@ go get -u "github.com/nrfta/paging-go/v2" ## Migration from v0.3.0 -Breaking changes in v1.0 moved from monolithic API to modular package structure. See [MIGRATION.md](./MIGRATION.md) for details. +Breaking changes in v2.0 moved from monolithic API to modular packages and unified all strategies around a common `Paginator[T]` interface. See [MIGRATION.md](./MIGRATION.md) for details. Quick summary: -1. Add strategy import: `"github.com/nrfta/paging-go/v2/offset"` -2. Change constructor: `paging.NewOffsetPaginator()` → `offset.New()` -3. Use builder: `offset.BuildConnection()` eliminates 60-80% of boilerplate +1. Import strategy packages: `"github.com/nrfta/paging-go/v2/offset"` +2. Create a `Fetcher[T]` using `sqlboiler.NewFetcher()` +3. Change constructors: `paging.NewOffsetPaginator()` → `offset.New(fetcher)` +4. Call `Paginate()` method instead of manual fetching +5. Use `BuildConnection()` helpers to eliminate boilerplate + +The new API is consistent across all three strategies, making it easier to switch between them or use multiple strategies in the same codebase. + +## API Design Philosophy + +All three pagination strategies follow the same unified pattern, making them easy to learn and switch between: + +**1. Fetcher Pattern:** Create a reusable `Fetcher[T]` that handles database queries. The fetcher is ORM-agnostic and strategy-agnostic - it works with SQLBoiler, GORM, sqlc, or raw SQL. Define it once, use it across multiple requests. + +**2. Paginator Interface:** Each strategy implements `Paginator[T]` with the same `Paginate()` method signature. The only difference is constructor parameters (offset needs nothing, cursor needs schema, quota-fill needs filter and schema). + +**3. Functional Options:** Configure page size limits per-request using options like `WithMaxSize(100)` and `WithDefaultSize(25)`. No need to create new paginators for different limits. + +**4. BuildConnection Helpers:** Eliminate 60-80% of boilerplate by using strategy-specific `BuildConnection()` functions that handle edge creation, cursor generation, and model transformation. + +This design means you can switch from offset to cursor pagination by changing three lines of code, not rewriting your entire resolver. ## Quick Start @@ -57,25 +75,37 @@ import ( "context" "github.com/nrfta/paging-go/v2" "github.com/nrfta/paging-go/v2/offset" + "github.com/nrfta/paging-go/v2/sqlboiler" "github.com/my-user/my-app/models" + "github.com/volatiletech/sqlboiler/v4/queries/qm" ) func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { - // Get total count - totalCount, err := models.Users().Count(ctx, r.DB) - if err != nil { - return nil, err - } + // 1. Create fetcher (once, reusable across requests) + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, r.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return models.Users(mods...).Count(ctx, r.DB) + }, + sqlboiler.OffsetToQueryMods, + ) + + // 2. Create paginator (once, reusable) + paginator := offset.New(fetcher) - // Create paginator and fetch - paginator := offset.New(page, totalCount) - dbUsers, err := models.Users(paginator.QueryMods()...).All(ctx, r.DB) + // 3. Paginate with per-request options + result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(100), // Cap at 100 items + paging.WithDefaultSize(25), // Default to 25 when First is nil + ) if err != nil { return nil, err } - // Build connection with automatic edge/node creation - return offset.BuildConnection(paginator, dbUsers, toDomainUser) + // 4. Build connection with automatic edge/node creation + return offset.BuildConnection(result, toDomainUser) } // Transform database model to domain model @@ -103,11 +133,13 @@ Traditional LIMIT/OFFSET with page numbers. Best for small-to-medium datasets wh **Custom configuration:** ```go -// Custom default limit -defaultLimit := 25 -paginator := offset.New(pageArgs, totalCount, &defaultLimit) +// Page size limits are passed via options to Paginate() +result, err := paginator.Paginate(ctx, page, + paging.WithMaxSize(200), // Cap at 200 items + paging.WithDefaultSize(50), // Default to 50 when First is nil +) -// Single column sort +// Single column sort (use PageArgs helpers) pageArgs := paging.WithSortBy(nil, "created_at", true) // Multi-column sort @@ -135,8 +167,10 @@ High-performance keyset pagination using composite indexes. Provides O(1) perfor ```go import ( + "github.com/nrfta/paging-go/v2" "github.com/nrfta/paging-go/v2/cursor" "github.com/nrfta/paging-go/v2/sqlboiler" + "github.com/volatiletech/sqlboiler/v4/queries/qm" ) func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*User], error) { @@ -145,7 +179,7 @@ func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*pagi Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) - // 2. Create fetcher with cursor strategy + // 2. Create fetcher (once, reusable) fetcher := sqlboiler.NewFetcher( func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { // Add filters only - NO qm.OrderBy here @@ -160,34 +194,25 @@ func (r *queryResolver) Users(ctx context.Context, page *paging.PageArgs) (*pagi sqlboiler.CursorToQueryMods, ) - // 3. Build fetch params with automatic N+1 - fetchParams, err := cursor.BuildFetchParams(page, schema) - if err != nil { - return nil, err - } + // 3. Create paginator (once, reusable) + paginator := cursor.New(fetcher, schema) - // 4. Fetch data - users, err := fetcher.Fetch(ctx, fetchParams) + // 4. Paginate with per-request options + result, err := paginator.Paginate(ctx, page, paging.WithMaxSize(100)) if err != nil { return nil, err } - // 5. Create paginator (trims to requested limit) - paginator, err := cursor.New(page, schema, users) - if err != nil { - return nil, err - } - - // 6. Build connection - return cursor.BuildConnection(paginator, users, toDomainUser) + // 5. Build connection with transformation + return cursor.BuildConnection(result, schema, page, toDomainUser) } ``` **Critical: ORDER BY rules** -ORDER BY clauses must be defined in `FetchParams.OrderBy`, not in query mods. Adding `qm.OrderBy()` to the fetcher causes duplicate records and incorrect results. +ORDER BY clauses must be defined in the schema, not in query mods. Adding `qm.OrderBy()` to the fetcher causes duplicate records and incorrect results. -**Why:** Cursor pagination generates WHERE clauses based on sort columns. If WHERE filters by `created_at` but ORDER BY sorts by `name`, the query returns wrong results. +**Why:** Cursor pagination generates WHERE clauses based on sort columns from the schema. If you add `qm.OrderBy()` in the fetcher function, you'll have conflicting ORDER BY clauses that produce wrong results. ```go // WRONG - Causes duplicates @@ -202,11 +227,11 @@ fetcher := sqlboiler.NewFetcher( sqlboiler.CursorToQueryMods, ) -// CORRECT - Define sorting in FetchParams +// CORRECT - Define sorting in schema fetcher := sqlboiler.NewFetcher( func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { mods = append([]qm.QueryMod{ - qm.Where("is_active = ?", true), + qm.Where("is_active = ?", true), // Filters only }, mods...) return models.Users(mods...).All(ctx, r.DB) }, @@ -214,13 +239,10 @@ fetcher := sqlboiler.NewFetcher( sqlboiler.CursorToQueryMods, ) -// Define sorting here -fetchParams := paging.FetchParams{ - OrderBy: []paging.Sort{ - {Column: "name", Desc: false}, - {Column: "id", Desc: false}, - }, -} +// Define sorting in schema +schema := cursor.NewSchema[*models.User](). + Field("name", "n", func(u *models.User) any { return u.Name }). + FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) ``` **Required database index:** @@ -337,18 +359,21 @@ This creates poor UX: uneven layouts, unpredictable "Load More" behavior, multip ```go import ( + "time" + "github.com/nrfta/paging-go/v2" "github.com/nrfta/paging-go/v2/cursor" "github.com/nrfta/paging-go/v2/quotafill" "github.com/nrfta/paging-go/v2/sqlboiler" + "github.com/volatiletech/sqlboiler/v4/queries/qm" ) func (r *queryResolver) Organizations(ctx context.Context, page *paging.PageArgs) (*paging.Connection[*Organization], error) { - // 1. Create schema + // 1. Define schema (reusable) schema := cursor.NewSchema[*models.Organization](). Field("created_at", "c", func(o *models.Organization) any { return o.CreatedAt }). FixedField("id", cursor.DESC, "i", func(o *models.Organization) any { return o.ID }) - // 2. Create fetcher with database filters + // 2. Create fetcher (once, reusable) fetcher := sqlboiler.NewFetcher( func(ctx context.Context, mods ...qm.QueryMod) ([]*models.Organization, error) { mods = append([]qm.QueryMod{ @@ -362,49 +387,31 @@ func (r *queryResolver) Organizations(ctx context.Context, page *paging.PageArgs sqlboiler.CursorToQueryMods, ) - // 3. Define authorization filter + // 3. Define authorization filter (applied after DB fetch) authFilter := func(ctx context.Context, orgs []*models.Organization) ([]*models.Organization, error) { return r.AuthzClient.FilterAuthorized(ctx, r.CurrentUser(ctx), orgs) } - // 4. Create quota-fill paginator + // 4. Create quota-fill paginator with strategy-specific options paginator := quotafill.New(fetcher, authFilter, schema, quotafill.WithMaxIterations(5), quotafill.WithMaxRecordsExamined(100), + quotafill.WithTimeout(5 * time.Second), ) - // 5. Paginate with quota-fill - result, err := paginator.Paginate(ctx, page) + // 5. Paginate with per-request options + result, err := paginator.Paginate(ctx, page, paging.WithMaxSize(50)) if err != nil { return nil, err } - // 6. Log metadata for monitoring + // 6. Check metadata for safeguard hits if result.Metadata.SafeguardHit != nil { - log.Warnf("Quota-fill safeguard hit: %s", *result.Metadata.SafeguardHit) + log.Warnf("Safeguard hit: %s", *result.Metadata.SafeguardHit) } - // 7. Build connection - edges := make([]*paging.Edge[*Organization], len(result.Nodes)) - nodes := make([]*Organization, len(result.Nodes)) - for i, org := range result.Nodes { - domain, err := toDomainOrg(org) - if err != nil { - return nil, err - } - cursorStr, _ := schema.Encode(org) - edges[i] = &paging.Edge[*Organization]{ - Cursor: *cursorStr, - Node: domain, - } - nodes[i] = domain - } - - return &paging.Connection[*Organization]{ - Edges: edges, - Nodes: nodes, - PageInfo: result.PageInfo, - }, nil + // 7. Build connection (quotafill has BuildConnection helper now) + return quotafill.BuildConnection(result, schema, page, toDomainOrganization) } ``` @@ -444,17 +451,28 @@ When triggered, partial results are returned with metadata indicating which safe **Metadata tracking:** -Provides observability for performance monitoring: +All strategies return metadata in `result.Metadata` for observability: ```go -page, err := paginator.Paginate(ctx, pageArgs) - -fmt.Printf("Strategy: %s\n", page.Metadata.Strategy) // "quotafill" -fmt.Printf("Query Time: %dms\n", page.Metadata.QueryTimeMs) // 42 -fmt.Printf("Items Examined: %d\n", page.Metadata.ItemsExamined) // 15 -fmt.Printf("Iterations Used: %d\n", page.Metadata.IterationsUsed) // 2 -if page.Metadata.SafeguardHit != nil { - fmt.Printf("Safeguard Hit: %s\n", *page.Metadata.SafeguardHit) // "max_iterations" +result, err := paginator.Paginate(ctx, page) + +fmt.Printf("Strategy: %s\n", result.Metadata.Strategy) // "quotafill" +fmt.Printf("Query Time: %dms\n", result.Metadata.QueryTimeMs) // 42 + +// Quota-fill specific metadata +if result.Metadata.ItemsExamined != nil { + fmt.Printf("Items Examined: %d\n", *result.Metadata.ItemsExamined) +} +if result.Metadata.IterationsUsed != nil { + fmt.Printf("Iterations Used: %d\n", *result.Metadata.IterationsUsed) +} +if result.Metadata.SafeguardHit != nil { + fmt.Printf("Safeguard Hit: %s\n", *result.Metadata.SafeguardHit) // "max_iterations" +} + +// Offset-specific metadata +if result.Metadata.Offset != nil { + fmt.Printf("Current Offset: %d\n", *result.Metadata.Offset) } ``` @@ -493,28 +511,42 @@ authFilter := func(ctx context.Context, users []*models.User) ([]*models.User, e ``` go-paging/ -├── connection.go # Generic Connection[T] and Edge[T] types +├── connection.go # Generic Connection[T], Edge[T], and Page[T] types ├── interfaces.go # Core interfaces (Paginator[T], Fetcher[T], FilterFunc[T]) -├── models.go # PageArgs, PageInfo, Metadata +├── models.go # PageArgs, PageInfo, Metadata, PaginateOption ├── offset/ # Offset-based pagination -│ ├── paginator.go # Offset paginator + BuildConnection +│ ├── paginator.go # Offset Paginator[T] + BuildConnection │ └── cursor.go # Offset cursor encoding ├── cursor/ # Cursor-based (keyset) pagination -│ ├── paginator.go # Cursor paginator + BuildConnection -│ └── encoder.go # Composite cursor encoding/decoding -├── quotafill/ # Quota-fill pagination (decorator pattern) -│ └── wrapper.go # Wraps any paginator with iterative filtering +│ ├── paginator.go # Cursor Paginator[T] + BuildConnection +│ ├── encoder.go # Composite cursor encoding/decoding +│ └── schema.go # Schema definition for cursor fields +├── quotafill/ # Quota-fill pagination (iterative fetching) +│ ├── paginator.go # Quota-fill Paginator[T] + BuildConnection +│ └── strategy.go # Adaptive backoff and safeguards └── sqlboiler/ # SQLBoiler ORM adapter - ├── fetcher.go # Generic Fetcher[T] - ├── offset.go # Offset query builder - └── cursor.go # Cursor query builder + ├── fetcher.go # Generic Fetcher[T] implementation + ├── offset.go # OffsetToQueryMods converter + └── cursor.go # CursorToQueryMods converter ``` -Modular architecture with clear separation: -- ORM adapters (sqlboiler/) are generic and strategy-agnostic -- Pagination strategies (offset/, cursor/) are independent packages -- Decorators (quotafill/) wrap any paginator to add capabilities -- Core types (connection.go, interfaces.go) shared across strategies +The architecture reflects the unified API design: + +**Core Abstractions:** +- `Paginator[T]` interface: All strategies implement the same `Paginate(ctx, PageArgs, ...PaginateOption)` method +- `Fetcher[T]` interface: ORM-agnostic data fetching with query mods +- `Page[T]`: Result container with Nodes, PageInfo, and Metadata + +**Strategy Packages:** +- Each strategy (offset/, cursor/, quotafill/) is an independent package +- All implement `Paginator[T]` with consistent API +- Each provides `BuildConnection()` helper to eliminate boilerplate +- Strategy-specific options passed to constructors, page size options passed to `Paginate()` + +**ORM Adapters:** +- sqlboiler/ package provides `Fetcher[T]` implementation +- Converter functions (`OffsetToQueryMods`, `CursorToQueryMods`) transform `FetchParams` to ORM query mods +- Same fetcher pattern can be implemented for GORM, sqlc, or raw SQL ## Comparison: Offset vs Cursor diff --git a/connection_test.go b/connection_test.go index c95d23b..f7d7c60 100644 --- a/connection_test.go +++ b/connection_test.go @@ -1,6 +1,7 @@ package paging_test import ( + "context" "fmt" . "github.com/onsi/ginkgo/v2" @@ -25,7 +26,42 @@ type DomainUser struct { EmailAddr string } +// mockOffsetFetcher creates a simple in-memory fetcher for testing +func mockOffsetFetcher(allUsers []DBUser, totalCount int64) paging.Fetcher[DBUser] { + return &offsetTestFetcher{ + allUsers: allUsers, + totalCount: totalCount, + } +} + +type offsetTestFetcher struct { + allUsers []DBUser + totalCount int64 +} + +func (f *offsetTestFetcher) Fetch(ctx context.Context, params paging.FetchParams) ([]DBUser, error) { + start := params.Offset + end := start + params.Limit + if start >= len(f.allUsers) { + return []DBUser{}, nil + } + if end > len(f.allUsers) { + end = len(f.allUsers) + } + return f.allUsers[start:end], nil +} + +func (f *offsetTestFetcher) Count(ctx context.Context, params paging.FetchParams) (int64, error) { + return f.totalCount, nil +} + var _ = Describe("Connection and Edge", func() { + var ctx context.Context + + BeforeEach(func() { + ctx = context.Background() + }) + Describe("BuildConnection", func() { It("should build a connection with edges and nodes", func() { // Setup: Create mock database records @@ -146,20 +182,27 @@ var _ = Describe("Connection and Edge", func() { Describe("offset.BuildConnection", func() { It("should build connection with offset-based cursors", func() { - // Setup: Create paginator - first := 2 - pageArgs := &paging.PageArgs{ - First: &first, - } - totalCount := int64(10) - paginator := offset.New(pageArgs, totalCount) - - // Setup: Mock database records - dbUsers := []DBUser{ + // Setup: Create mock data + allUsers := []DBUser{ {ID: 1, Name: "Alice", Email: "alice@example.com"}, {ID: 2, Name: "Bob", Email: "bob@example.com"}, + {ID: 3, Name: "Charlie", Email: "charlie@example.com"}, + {ID: 4, Name: "Diana", Email: "diana@example.com"}, + {ID: 5, Name: "Eve", Email: "eve@example.com"}, } + // Create paginator with fetcher + fetcher := mockOffsetFetcher(allUsers, 10) + paginator := offset.New(fetcher) + + // Paginate first page (2 items) + first := 2 + pageArgs := &paging.PageArgs{First: &first} + + page, err := paginator.Paginate(ctx, pageArgs) + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(2)) + // Setup: Transform function transform := func(db DBUser) (*DomainUser, error) { return &DomainUser{ @@ -170,7 +213,7 @@ var _ = Describe("Connection and Edge", func() { } // Execute: Build connection using offset helper - conn, err := offset.BuildConnection(paginator, dbUsers, transform) + conn, err := offset.BuildConnection(page, transform) // Assert: No error Expect(err).ToNot(HaveOccurred()) @@ -183,8 +226,6 @@ var _ = Describe("Connection and Edge", func() { // Assert: Edges have sequential cursors Expect(conn.Edges).To(HaveLen(2)) - // First item at offset 0 → cursor encodes offset 1 - // Second item at offset 1 → cursor encodes offset 2 cursor1 := offset.DecodeCursor(&conn.Edges[0].Cursor) cursor2 := offset.DecodeCursor(&conn.Edges[1].Cursor) Expect(cursor1).To(Equal(1)) @@ -198,21 +239,32 @@ var _ = Describe("Connection and Edge", func() { }) It("should handle second page with offset", func() { - // Setup: Second page starting at offset 2 + // Setup: Create mock data + allUsers := []DBUser{ + {ID: 1, Name: "Alice", Email: "alice@example.com"}, + {ID: 2, Name: "Bob", Email: "bob@example.com"}, + {ID: 3, Name: "Charlie", Email: "charlie@example.com"}, + {ID: 4, Name: "Diana", Email: "diana@example.com"}, + {ID: 5, Name: "Eve", Email: "eve@example.com"}, + } + + // Create paginator + fetcher := mockOffsetFetcher(allUsers, 10) + paginator := offset.New(fetcher) + + // Paginate second page (starting at offset 2) first := 2 cursor := offset.EncodeCursor(2) pageArgs := &paging.PageArgs{ First: &first, After: cursor, } - totalCount := int64(10) - paginator := offset.New(pageArgs, totalCount) - // Setup: Mock records for second page - dbUsers := []DBUser{ - {ID: 3, Name: "Charlie", Email: "charlie@example.com"}, - {ID: 4, Name: "Diana", Email: "diana@example.com"}, - } + page, err := paginator.Paginate(ctx, pageArgs) + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(2)) + Expect(page.Nodes[0].ID).To(Equal(3)) // 3rd user (offset 2) + Expect(page.Nodes[1].ID).To(Equal(4)) // 4th user (offset 3) transform := func(db DBUser) (*DomainUser, error) { return &DomainUser{ @@ -221,7 +273,7 @@ var _ = Describe("Connection and Edge", func() { }, nil } - conn, err := offset.BuildConnection(paginator, dbUsers, transform) + conn, err := offset.BuildConnection(page, transform) Expect(err).ToNot(HaveOccurred()) @@ -235,79 +287,55 @@ var _ = Describe("Connection and Edge", func() { Describe("Real-world use case", func() { It("should eliminate repository boilerplate", func() { - // This test demonstrates the before/after from the research document - - // BEFORE: Manual boilerplate (what users had to write) - beforeConnection := func(dbUsers []DBUser, paginator offset.Paginator) (*paging.Connection[*DomainUser], error) { - result := &paging.Connection[*DomainUser]{ - PageInfo: paginator.PageInfo, - } - - for i, row := range dbUsers { - // Manual transformation - user := &DomainUser{ - ID: fmt.Sprintf("user-%d", row.ID), - FullName: row.Name, - EmailAddr: row.Email, - } - - // Manual cursor encoding - cursor := *offset.EncodeCursor(paginator.Offset + i + 1) - - // Manual edge building - result.Edges = append(result.Edges, paging.Edge[*DomainUser]{ - Cursor: cursor, - Node: user, - }) - - // Manual nodes building - result.Nodes = append(result.Nodes, user) - } - - return result, nil + // Setup: Create mock data + allUsers := []DBUser{ + {ID: 1, Name: "Alice", Email: "alice@example.com"}, + {ID: 2, Name: "Bob", Email: "bob@example.com"}, + {ID: 3, Name: "Charlie", Email: "charlie@example.com"}, + {ID: 4, Name: "Diana", Email: "diana@example.com"}, + {ID: 5, Name: "Eve", Email: "eve@example.com"}, } - // AFTER: Using BuildConnection (new API) - afterConnection := func(dbUsers []DBUser, paginator offset.Paginator) (*paging.Connection[*DomainUser], error) { - return offset.BuildConnection(paginator, dbUsers, func(db DBUser) (*DomainUser, error) { - return &DomainUser{ - ID: fmt.Sprintf("user-%d", db.ID), - FullName: db.Name, - EmailAddr: db.Email, - }, nil - }) - } + fetcher := mockOffsetFetcher(allUsers, 10) + paginator := offset.New(fetcher) - // Test: Both approaches produce identical results first := 3 pageArgs := &paging.PageArgs{First: &first} - totalCount := int64(10) - paginator := offset.New(pageArgs, totalCount) - dbUsers := []DBUser{ - {ID: 1, Name: "Alice", Email: "alice@example.com"}, - {ID: 2, Name: "Bob", Email: "bob@example.com"}, - {ID: 3, Name: "Charlie", Email: "charlie@example.com"}, - } + page, err := paginator.Paginate(ctx, pageArgs) + Expect(err).ToNot(HaveOccurred()) - beforeResult, beforeErr := beforeConnection(dbUsers, paginator) - afterResult, afterErr := afterConnection(dbUsers, paginator) + // AFTER: Using BuildConnection (new API) + // This is now the ONLY way to build a connection + conn, err := offset.BuildConnection(page, func(db DBUser) (*DomainUser, error) { + return &DomainUser{ + ID: fmt.Sprintf("user-%d", db.ID), + FullName: db.Name, + EmailAddr: db.Email, + }, nil + }) + + // Assert: Succeeds + Expect(err).ToNot(HaveOccurred()) - // Assert: Both succeed - Expect(beforeErr).ToNot(HaveOccurred()) - Expect(afterErr).ToNot(HaveOccurred()) + // Assert: Results are correct + Expect(conn.Nodes).To(HaveLen(3)) + Expect(conn.Edges).To(HaveLen(3)) - // Assert: Results are identical - Expect(afterResult.Nodes).To(HaveLen(len(beforeResult.Nodes))) - Expect(afterResult.Edges).To(HaveLen(len(beforeResult.Edges))) + Expect(conn.Nodes[0].ID).To(Equal("user-1")) + Expect(conn.Nodes[1].ID).To(Equal("user-2")) + Expect(conn.Nodes[2].ID).To(Equal("user-3")) - for i := range beforeResult.Nodes { - Expect(afterResult.Nodes[i].ID).To(Equal(beforeResult.Nodes[i].ID)) - Expect(afterResult.Edges[i].Cursor).To(Equal(beforeResult.Edges[i].Cursor)) - } + // Verify cursors + cursor1 := offset.DecodeCursor(&conn.Edges[0].Cursor) + cursor2 := offset.DecodeCursor(&conn.Edges[1].Cursor) + cursor3 := offset.DecodeCursor(&conn.Edges[2].Cursor) + Expect(cursor1).To(Equal(1)) + Expect(cursor2).To(Equal(2)) + Expect(cursor3).To(Equal(3)) - // The key difference: AFTER is 1 line vs BEFORE is 15+ lines - // This is the 60-80% boilerplate reduction mentioned in the research + // The key difference: BuildConnection is 1 line vs manual boilerplate of 15+ lines + // This achieves the 60-80% boilerplate reduction mentioned in the research }) }) }) diff --git a/cursor/paginator.go b/cursor/paginator.go index 8094a38..49e3ec5 100644 --- a/cursor/paginator.go +++ b/cursor/paginator.go @@ -1,297 +1,110 @@ package cursor import ( - "fmt" + "context" "github.com/nrfta/paging-go/v2" ) -const defaultLimitVal = 50 - // PageArgs represents pagination arguments. -// This is a subset of the main PageArgs type to avoid import cycles. -// Implementations should provide the page size (First), cursor position (After), -// and sorting configuration (SortBy). type PageArgs interface { GetFirst() *int GetAfter() *string GetSortBy() []paging.Sort } -// Paginator is the paginator for cursor-based pagination. -// It encapsulates limit, cursor position, and page metadata for keyset queries. -// -// Unlike offset pagination, cursor pagination: -// - Does not require totalCount -// - Uses cursor position instead of offset -// - Provides O(1) performance regardless of page depth -// - Requires composite indexes on sort columns -// -// Paginator now requires a Schema to enforce encoder/OrderBy matching. -type Paginator struct { - limit int - cursor *paging.CursorPosition - PageInfo paging.PageInfo - orderBy []paging.Sort - encoder interface{} // Stored as interface{} to avoid making Paginator generic +// Paginator implements paging.Paginator[T] for cursor-based pagination. +type Paginator[T any] struct { + fetcher paging.Fetcher[T] + schema *Schema[T] } -// parsedParams holds the common parsed state from PageArgs and Schema. -type parsedParams[T any] struct { - encoder paging.CursorEncoder[T] - orderBy []paging.Sort - limit int - cursor *paging.CursorPosition +// New creates a cursor paginator that implements paging.Paginator[T]. +// +// Example: +// +// fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.CursorToQueryMods) +// paginator := cursor.New(fetcher, schema) +// result, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +func New[T any](fetcher paging.Fetcher[T], schema *Schema[T]) paging.Paginator[T] { + return &Paginator[T]{ + fetcher: fetcher, + schema: schema, + } } -// parsePageArgs extracts and validates common parameters from PageArgs and Schema. -func parsePageArgs[T any](page PageArgs, schema *Schema[T], defaultLimit *int) (parsedParams[T], error) { - encoder, err := schema.EncoderFor(page) +// Paginate executes cursor-based pagination and returns a Page[T]. +func (p *Paginator[T]) Paginate( + ctx context.Context, + args *paging.PageArgs, + opts ...paging.PaginateOption, +) (*paging.Page[T], error) { + // Apply page size config + pageConfig := paging.ApplyPaginateOptions(args, opts...) + limit := pageConfig.EffectiveLimit(args) + + // Get encoder for current sort + encoder, err := p.schema.EncoderFor(args) if err != nil { - return parsedParams[T]{}, err + return nil, err } - var sortBy []paging.Sort - if page != nil && page.GetSortBy() != nil { - sortBy = page.GetSortBy() + // Decode cursor + var cursorPos *paging.CursorPosition + if args != nil && args.GetAfter() != nil { + cursorPos, _ = encoder.Decode(*args.GetAfter()) } - orderBy := schema.BuildOrderBy(sortBy) - limit := defaultLimitVal - if defaultLimit != nil { - limit = *defaultLimit - } - if page != nil && page.GetFirst() != nil && *page.GetFirst() > 0 { - limit = *page.GetFirst() - } - if limit == 0 { - limit = defaultLimitVal - } - - var cursor *paging.CursorPosition - if page != nil && page.GetAfter() != nil { - cursor, _ = encoder.Decode(*page.GetAfter()) - } + // Build ORDER BY + orderBy := p.schema.BuildOrderBy(getSortBy(args)) - return parsedParams[T]{ - encoder: encoder, - orderBy: orderBy, - limit: limit, - cursor: cursor, - }, nil -} - -// New creates a new cursor paginator using a Schema. -// -// Parameters: -// - page: Pagination arguments including page size, cursor, and sorting -// - schema: Schema that defines sortable fields, fixed fields, and cursor encoding -// - items: The items fetched from the database (should be LIMIT+1 for accurate HasNextPage) -// - defaultLimit: Optional default page size (defaults to 50 if not provided) -// -// The paginator automatically handles: -// - PageArgs validation: Returns error if sort fields are invalid -// - Encoder/OrderBy matching: Schema guarantees they match -// - N+1 pattern: Detects if you fetched LIMIT+1 for accurate HasNextPage detection -// - Item trimming: Trims results to requested limit if N+1 was fetched -// - Default page size of 50 records -// - Zero-value protection to prevent divide-by-zero errors -// - Cursor decoding using the schema's encoder -// - PageInfo generation based on fetched results -// -// Best Practice (N+1 Pattern): -// - Fetch LIMIT+1 records from database (e.g., if limit=10, fetch 11) -// - Pass all fetched items to New() -// - Paginator will detect N+1 and set HasNextPage accurately -// - BuildConnection will automatically trim to LIMIT items -// -// Example usage: -// -// // Define schema once at app startup -// schema := cursor.NewSchema[*models.User](). -// Field("name", "n", func(u *models.User) any { return u.Name }). -// FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) -// -// // Fetch LIMIT+1 for accurate HasNextPage -// fetchParams := cursor.BuildFetchParams(pageArgs, schema) -// users, _ := fetcher.Fetch(ctx, fetchParams) -// -// paginator, err := cursor.New(pageArgs, schema, users) -// if err != nil { -// return nil, err // Invalid sort field in pageArgs -// } -// conn, _ := cursor.BuildConnection(paginator, users, toDomainUser) -func New[T any]( - page PageArgs, - schema *Schema[T], - items []T, - defaultLimit ...*int, -) (Paginator, error) { - var defLimit *int - if len(defaultLimit) > 0 { - defLimit = defaultLimit[0] + // Fetch with N+1 pattern + params := paging.FetchParams{ + Limit: limit + 1, + Cursor: cursorPos, + OrderBy: orderBy, } - - params, err := parsePageArgs(page, schema, defLimit) + items, err := p.fetcher.Fetch(ctx, params) if err != nil { - return Paginator{}, err + return nil, err } - // N+1 pattern: Check if we got more items than requested - hasNextPage := len(items) > params.limit - - // Trim items to the requested limit - trimmedItems := items + // Detect hasNextPage and trim + hasNextPage := len(items) > limit if hasNextPage { - trimmedItems = items[:params.limit] + items = items[:limit] } - pageInfo := newCursorBasedPageInfo(params.encoder, trimmedItems, params.limit, params.cursor, hasNextPage) + // Build PageInfo + pageInfo := buildCursorPageInfo(encoder, items, cursorPos, hasNextPage) - return Paginator{ - limit: params.limit, - cursor: params.cursor, - PageInfo: pageInfo, - orderBy: params.orderBy, - encoder: params.encoder, + return &paging.Page[T]{ + Nodes: items, + PageInfo: &pageInfo, + Metadata: paging.Metadata{Strategy: "cursor"}, }, nil } -// BuildFetchParams creates FetchParams with automatic N+1 pattern for accurate HasNextPage detection. -// It uses the schema to validate PageArgs, get the encoder, and build the complete OrderBy clause. -func BuildFetchParams[T any]( - page PageArgs, - schema *Schema[T], -) (paging.FetchParams, error) { - params, err := parsePageArgs(page, schema, nil) - if err != nil { - return paging.FetchParams{}, err - } - - return paging.FetchParams{ - Limit: params.limit + 1, // N+1 for HasNextPage detection - Cursor: params.cursor, - OrderBy: params.orderBy, - }, nil -} - -// GetPageInfo returns the PageInfo for this paginator. -// PageInfo contains functions to retrieve pagination metadata like -// cursors and whether next/previous pages exist. -// -// Note: TotalCount always returns nil for cursor pagination. -func (p *Paginator) GetPageInfo() paging.PageInfo { - return p.PageInfo -} - -// GetLimit returns the page size limit. -func (p *Paginator) GetLimit() int { - return p.limit -} - -// GetCursor returns the cursor position for this paginator. -// Returns nil if this is the first page. -func (p *Paginator) GetCursor() *paging.CursorPosition { - return p.cursor -} - -// GetOrderBy returns the OrderBy directives for this paginator. -func (p *Paginator) GetOrderBy() []paging.Sort { - return p.orderBy -} - -// BuildConnection creates a Relay-compliant GraphQL connection from a slice of items. -// It handles transformation from database models to domain models and automatically -// generates composite key cursors for each item. -// -// This function eliminates the manual boilerplate of building edges and nodes arrays, -// reducing repository code by 60-80%. -// -// The encoder is obtained from the paginator (which got it from the schema), -// ensuring encoder/OrderBy matching is enforced. -// -// Type parameters: -// - From: Source type (e.g., *models.User from SQLBoiler) -// - To: Target type (e.g., *domain.User for GraphQL) -// -// Parameters: -// - paginator: The cursor paginator containing pagination state and encoder -// - items: Slice of database records to transform -// - transform: Function that converts database model to domain model -// -// Returns a Connection with edges, nodes, and pageInfo populated. -// -// Example usage: -// -// // Before (manual boilerplate - 25+ lines): -// result := &domain.UserConnection{PageInfo: &paginator.PageInfo} -// for _, row := range dbUsers { -// user, err := toDomainUser(row) -// if err != nil { return nil, err } -// cursor, _ := encoder.Encode(row) -// result.Edges = append(result.Edges, domain.Edge{ -// Cursor: *cursor, -// Node: user, -// }) -// result.Nodes = append(result.Nodes, user) -// } -// -// // After (using BuildConnection - 1 line): -// return cursor.BuildConnection(paginator, dbUsers, toDomainUser) -func BuildConnection[From any, To any]( - paginator Paginator, - items []From, - transform func(From) (To, error), -) (*paging.Connection[To], error) { - // Get encoder from paginator (type assert) - encoder, ok := paginator.encoder.(paging.CursorEncoder[From]) - if !ok { - return nil, fmt.Errorf("paginator encoder type mismatch") - } - - // N+1 pattern: Trim items to the requested limit - // If caller fetched LIMIT+1, we only want to return LIMIT items in the connection - trimmedItems := items - if len(items) > paginator.limit { - trimmedItems = items[:paginator.limit] +// getSortBy safely extracts SortBy from args. +func getSortBy(args *paging.PageArgs) []paging.Sort { + if args == nil || args.GetSortBy() == nil { + return nil } - - return paging.BuildConnection( - trimmedItems, - paginator.PageInfo, - func(i int, item From) string { - cursor, _ := encoder.Encode(item) - if cursor == nil { - return "" - } - return *cursor - }, - transform, - ) + return args.GetSortBy() } -// newCursorBasedPageInfo creates PageInfo for cursor-based pagination. -// It uses the fetched items to generate cursors and determine page boundaries. -// -// Key differences from offset pagination: -// - TotalCount always returns nil (cursor pagination doesn't need total count) -// - HasNextPage uses N+1 pattern: passed in based on whether we got LIMIT+1 records -// - StartCursor/EndCursor encode the first/last items' sort columns -// - HasPreviousPage checks if cursor is not nil (has cursor = not first page) -func newCursorBasedPageInfo[T any]( +// buildCursorPageInfo creates PageInfo for cursor-based pagination. +func buildCursorPageInfo[T any]( encoder paging.CursorEncoder[T], items []T, - limit int, currentCursor *paging.CursorPosition, hasNextPage bool, ) paging.PageInfo { return paging.PageInfo{ - // TotalCount: Not available for cursor pagination TotalCount: func() (*int, error) { return nil, nil }, - // StartCursor: Encode first item (or nil if empty) StartCursor: func() (*string, error) { if len(items) == 0 { return nil, nil @@ -299,7 +112,6 @@ func newCursorBasedPageInfo[T any]( return encoder.Encode(items[0]) }, - // EndCursor: Encode last item (or nil if empty) EndCursor: func() (*string, error) { if len(items) == 0 { return nil, nil @@ -307,15 +119,44 @@ func newCursorBasedPageInfo[T any]( return encoder.Encode(items[len(items)-1]) }, - // HasNextPage: Determined by N+1 pattern (caller fetches LIMIT+1) - // True if we got more items than the requested limit HasNextPage: func() (bool, error) { return hasNextPage, nil }, - // HasPreviousPage: True if we have a cursor (implies we're not on the first page) HasPreviousPage: func() (bool, error) { return currentCursor != nil, nil }, } } + +// BuildConnection transforms a Page[From] to a Connection[To] for GraphQL. +// Uses schema's encoder to generate composite key cursors. +// +// Example: +// +// result, _ := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +// conn, _ := cursor.BuildConnection(result, schema, toDomainUser) +func BuildConnection[From any, To any]( + page *paging.Page[From], + schema *Schema[From], + args *paging.PageArgs, + transform func(From) (To, error), +) (*paging.Connection[To], error) { + encoder, err := schema.EncoderFor(args) + if err != nil { + return nil, err + } + + return paging.BuildConnection( + page.Nodes, + *page.PageInfo, + func(i int, item From) string { + cursor, _ := encoder.Encode(item) + if cursor == nil { + return "" + } + return *cursor + }, + transform, + ) +} diff --git a/cursor/paginator_test.go b/cursor/paginator_test.go index 9e2577a..9e3522c 100644 --- a/cursor/paginator_test.go +++ b/cursor/paginator_test.go @@ -1,7 +1,7 @@ package cursor_test import ( - "errors" + "context" "time" "github.com/nrfta/paging-go/v2" @@ -11,18 +11,65 @@ import ( . "github.com/onsi/gomega" ) +// mockFetcher creates a simple in-memory fetcher for testing cursor pagination +func mockFetcher(allItems []*testUser) paging.Fetcher[*testUser] { + return &testFetcher{allItems: allItems} +} + +type testFetcher struct { + allItems []*testUser +} + +func (f *testFetcher) Fetch(ctx context.Context, params paging.FetchParams) ([]*testUser, error) { + // Simple in-memory filtering based on cursor + var result []*testUser + startIdx := 0 + + // If cursor exists, find where to start + if params.Cursor != nil { + if idVal, ok := params.Cursor.Values["id"]; ok { + if id, ok := idVal.(string); ok { + for i, u := range f.allItems { + if u.ID == id { + startIdx = i + 1 // Start after cursor position + break + } + } + } + } + } + + // Collect items + for i := startIdx; i < len(f.allItems) && len(result) < params.Limit; i++ { + result = append(result, f.allItems[i]) + } + + return result, nil +} + +func (f *testFetcher) Count(ctx context.Context, params paging.FetchParams) (int64, error) { + return int64(len(f.allItems)), nil +} + var _ = Describe("Paginator", func() { var ( - schema *cursor.Schema[*testUser] - users []*testUser + ctx context.Context + schema *cursor.Schema[*testUser] + users []*testUser + fetcher paging.Fetcher[*testUser] + paginator paging.Paginator[*testUser] ) BeforeEach(func() { + ctx = context.Background() + // Create test users users = []*testUser{ - {ID: "user-1", CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Age: 25}, - {ID: "user-2", CreatedAt: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), Age: 30}, - {ID: "user-3", CreatedAt: time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), Age: 35}, + {ID: "user-1", Name: "Alice", Email: "alice@example.com", CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), Age: 25}, + {ID: "user-2", Name: "Bob", Email: "bob@example.com", CreatedAt: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), Age: 30}, + {ID: "user-3", Name: "Charlie", Email: "charlie@example.com", CreatedAt: time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), Age: 35}, + {ID: "user-4", Name: "Diana", Email: "diana@example.com", CreatedAt: time.Date(2024, 1, 4, 0, 0, 0, 0, time.UTC), Age: 40}, + {ID: "user-5", Name: "Eve", Email: "eve@example.com", CreatedAt: time.Date(2024, 1, 5, 0, 0, 0, 0, time.UTC), Age: 45}, } // Create schema with all sortable fields @@ -31,244 +78,179 @@ var _ = Describe("Paginator", func() { Field("name", "n", func(u *testUser) any { return u.Name }). Field("email", "e", func(u *testUser) any { return u.Email }). FixedField("id", cursor.DESC, "i", func(u *testUser) any { return u.ID }) + + fetcher = mockFetcher(users) + paginator = cursor.New(fetcher, schema) }) Describe("Basic functionality", func() { It("uses the default limit when no pageArgs.First is provided", func() { - page := &paging.PageArgs{} + args := &paging.PageArgs{} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - Expect(paginator.GetLimit()).To(Equal(50)) - Expect(paginator.GetCursor()).To(BeNil()) + // Should return all 5 users (less than default 50) + Expect(page.Nodes).To(HaveLen(5)) + Expect(page.Metadata.Strategy).To(Equal("cursor")) }) - It("parses the pageArgs correctly", func() { - // Create PageArgs with sort field to ensure it's in cursor - pageArgsForCursor := paging.WithSortBy(&paging.PageArgs{}, "created_at", true) - - // Get encoder and encode a cursor - encoder, _ := schema.EncoderFor(pageArgsForCursor) - cursorStr, _ := encoder.Encode(users[0]) + It("parses the pageArgs correctly with cursor", func() { + // First, get a cursor from the first page + first := 2 + args := &paging.PageArgs{First: &first} - first := 10 - page := &paging.PageArgs{ - First: &first, - After: cursorStr, - SortBy: []paging.Sort{{Column: "created_at", Desc: true}}, - } - - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(2)) - Expect(paginator.GetLimit()).To(Equal(10)) - Expect(paginator.GetCursor()).ToNot(BeNil()) - Expect(paginator.GetCursor().Values).To(HaveKey("id")) - Expect(paginator.GetCursor().Values).To(HaveKey("created_at")) - }) - - It("handles nil cursor gracefully", func() { - first := 10 - page := &paging.PageArgs{ - First: &first, - } - - paginator, err := cursor.New(page, schema, users) - Expect(err).ToNot(HaveOccurred()) - - Expect(paginator.GetLimit()).To(Equal(10)) - Expect(paginator.GetCursor()).To(BeNil()) - }) + // Get the end cursor + endCursor, _ := page.PageInfo.EndCursor() + Expect(endCursor).ToNot(BeNil()) - It("handles zero limit with default", func() { - first := 0 - page := &paging.PageArgs{ + // Use it for next page + nextArgs := &paging.PageArgs{ First: &first, + After: endCursor, } - paginator, err := cursor.New(page, schema, users) + nextPage, err := paginator.Paginate(ctx, nextArgs) Expect(err).ToNot(HaveOccurred()) - - Expect(paginator.GetLimit()).To(Equal(50)) // Falls back to default + Expect(nextPage.Nodes).To(HaveLen(2)) + // Should start after user-2 + Expect(nextPage.Nodes[0].ID).To(Equal("user-3")) }) - It("uses custom default limit when provided", func() { - customDefault := 25 - page := &paging.PageArgs{} + It("handles nil cursor gracefully", func() { + first := 3 + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users, &customDefault) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - Expect(paginator.GetLimit()).To(Equal(25)) + Expect(page.Nodes).To(HaveLen(3)) + Expect(page.Nodes[0].ID).To(Equal("user-1")) }) }) Describe("PageInfo", func() { It("creates a page info with correct metadata", func() { - first := 10 - page := &paging.PageArgs{ - First: &first, - } + first := 2 + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) // TotalCount should return nil for cursor pagination - totalCount, err := paginator.PageInfo.TotalCount() + totalCount, err := page.PageInfo.TotalCount() Expect(err).ToNot(HaveOccurred()) Expect(totalCount).To(BeNil()) - // HasNextPage should be false (only 3 items, limit is 10) - hasNextPage, err := paginator.PageInfo.HasNextPage() + // HasNextPage should be true (5 users, limit 2) + hasNextPage, err := page.PageInfo.HasNextPage() Expect(err).ToNot(HaveOccurred()) - Expect(hasNextPage).To(BeFalse()) + Expect(hasNextPage).To(BeTrue()) // HasPreviousPage should be false (no cursor) - hasPreviousPage, err := paginator.PageInfo.HasPreviousPage() + hasPreviousPage, err := page.PageInfo.HasPreviousPage() Expect(err).ToNot(HaveOccurred()) Expect(hasPreviousPage).To(BeFalse()) // StartCursor should encode first item - startCursor, err := paginator.PageInfo.StartCursor() + startCursor, err := page.PageInfo.StartCursor() Expect(err).ToNot(HaveOccurred()) Expect(startCursor).ToNot(BeNil()) // EndCursor should encode last item - endCursor, err := paginator.PageInfo.EndCursor() + endCursor, err := page.PageInfo.EndCursor() Expect(err).ToNot(HaveOccurred()) Expect(endCursor).ToNot(BeNil()) }) - It("indicates HasNextPage when more items exist (N+1 pattern)", func() { - first := 2 // Request 2 items - page := &paging.PageArgs{ - First: &first, - } - - // N+1 pattern: Pass 3 items (LIMIT+1) to signal there's a next page - paginator, err := cursor.New(page, schema, users) - Expect(err).ToNot(HaveOccurred()) - - hasNextPage, _ := paginator.PageInfo.HasNextPage() - Expect(hasNextPage).To(BeTrue()) // len(items) > limit means HasNextPage = true - }) - - It("indicates no HasNextPage when exactly limit items exist", func() { - first := 3 // Request 3 items, we have exactly 3 - page := &paging.PageArgs{ - First: &first, - } + It("indicates no HasNextPage when all items fetched", func() { + first := 10 // More than we have + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - hasNextPage, _ := paginator.PageInfo.HasNextPage() - Expect(hasNextPage).To(BeFalse()) // len(items) == limit means HasNextPage = false + hasNextPage, _ := page.PageInfo.HasNextPage() + Expect(hasNextPage).To(BeFalse()) }) It("indicates HasPreviousPage when cursor is provided", func() { - encoder, _ := schema.EncoderFor(&paging.PageArgs{}) - cursorStr, _ := encoder.Encode(users[1]) - - first := 10 - page := &paging.PageArgs{ + // Get cursor from first page + first := 2 + args := &paging.PageArgs{First: &first} + firstPage, _ := paginator.Paginate(ctx, args) + endCursor, _ := firstPage.PageInfo.EndCursor() + + // Second page should have HasPreviousPage = true + nextArgs := &paging.PageArgs{ First: &first, - After: cursorStr, + After: endCursor, } - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, nextArgs) Expect(err).ToNot(HaveOccurred()) - hasPreviousPage, _ := paginator.PageInfo.HasPreviousPage() - Expect(hasPreviousPage).To(BeTrue()) // Has cursor = not first page + hasPreviousPage, _ := page.PageInfo.HasPreviousPage() + Expect(hasPreviousPage).To(BeTrue()) }) It("handles empty results", func() { - emptyUsers := []*testUser{} - page := &paging.PageArgs{} + emptyFetcher := mockFetcher([]*testUser{}) + emptyPaginator := cursor.New(emptyFetcher, schema) + args := &paging.PageArgs{} - paginator, err := cursor.New(page, schema, emptyUsers) + page, err := emptyPaginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - startCursor, _ := paginator.PageInfo.StartCursor() + startCursor, _ := page.PageInfo.StartCursor() Expect(startCursor).To(BeNil()) - endCursor, _ := paginator.PageInfo.EndCursor() + endCursor, _ := page.PageInfo.EndCursor() Expect(endCursor).To(BeNil()) - hasNextPage, _ := paginator.PageInfo.HasNextPage() + hasNextPage, _ := page.PageInfo.HasNextPage() Expect(hasNextPage).To(BeFalse()) }) }) - Describe("OrderBy", func() { - It("should include fixed field when no sort columns provided", func() { - page := &paging.PageArgs{} + Describe("PaginateOption", func() { + It("should use WithDefaultSize when First is nil", func() { + args := &paging.PageArgs{} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args, paging.WithDefaultSize(3)) Expect(err).ToNot(HaveOccurred()) - - // Schema automatically includes fixed "id" field - orderBy := paginator.GetOrderBy() - Expect(orderBy).To(HaveLen(1)) - Expect(orderBy[0].Column).To(Equal("id")) - Expect(orderBy[0].Desc).To(BeTrue()) // Fixed field is DESC + Expect(page.Nodes).To(HaveLen(3)) }) - It("should include user sorts and fixed field", func() { - page := paging.WithSortBy(&paging.PageArgs{}, "created_at", true) + It("should cap page size with WithMaxSize", func() { + first := 10 + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args, paging.WithMaxSize(2)) Expect(err).ToNot(HaveOccurred()) - - // Schema includes user sort + fixed "id" field - orderBy := paginator.GetOrderBy() - Expect(orderBy).To(HaveLen(2)) - Expect(orderBy[0].Column).To(Equal("created_at")) - Expect(orderBy[0].Desc).To(BeTrue()) - Expect(orderBy[1].Column).To(Equal("id")) - Expect(orderBy[1].Desc).To(BeTrue()) // Fixed field + Expect(page.Nodes).To(HaveLen(2)) }) - It("should support multiple user-sortable columns", func() { - page := paging.WithMultiSort(&paging.PageArgs{}, - paging.Sort{Column: "name", Desc: false}, - paging.Sort{Column: "email", Desc: true}, - ) + It("should allow page size within MaxSize", func() { + first := 3 + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args, paging.WithMaxSize(5)) Expect(err).ToNot(HaveOccurred()) - - // Schema includes user sorts + fixed "id" field - orderBy := paginator.GetOrderBy() - Expect(orderBy).To(HaveLen(3)) - Expect(orderBy[0].Column).To(Equal("name")) - Expect(orderBy[0].Desc).To(BeFalse()) - Expect(orderBy[1].Column).To(Equal("email")) - Expect(orderBy[1].Desc).To(BeTrue()) - Expect(orderBy[2].Column).To(Equal("id")) - Expect(orderBy[2].Desc).To(BeTrue()) // Fixed field - }) - - It("should validate sort fields and return error for invalid fields", func() { - page := paging.WithSortBy(&paging.PageArgs{}, "invalid_field", true) - - _, err := cursor.New(page, schema, users) - - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("invalid sort field: invalid_field")) + Expect(page.Nodes).To(HaveLen(3)) }) }) Describe("BuildConnection", func() { It("should build a connection with edges and nodes", func() { first := 3 - page := &paging.PageArgs{ - First: &first, - } + args := &paging.PageArgs{First: &first} - paginator, err := cursor.New(page, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) // Transform function (just return same user for testing) @@ -276,7 +258,7 @@ var _ = Describe("Paginator", func() { return u, nil } - conn, err := cursor.BuildConnection(paginator, users, transform) + conn, err := cursor.BuildConnection(page, schema, args, transform) Expect(err).ToNot(HaveOccurred()) Expect(conn).ToNot(BeNil()) @@ -291,38 +273,19 @@ var _ = Describe("Paginator", func() { Expect(conn.PageInfo).ToNot(BeZero()) }) - It("should handle transform errors", func() { - first := 3 - page := &paging.PageArgs{ - First: &first, - } - - paginator, err := cursor.New(page, schema, users) - Expect(err).ToNot(HaveOccurred()) - - // Transform function that returns error - transform := func(u *testUser) (*testUser, error) { - return nil, errors.New("transform error") - } - - conn, err := cursor.BuildConnection(paginator, users, transform) - - Expect(err).To(HaveOccurred()) - Expect(conn).To(BeNil()) - }) - It("should handle empty results", func() { - emptyUsers := []*testUser{} - page := &paging.PageArgs{} + emptyFetcher := mockFetcher([]*testUser{}) + emptyPaginator := cursor.New(emptyFetcher, schema) + args := &paging.PageArgs{} - paginator, err := cursor.New(page, schema, emptyUsers) + page, err := emptyPaginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) transform := func(u *testUser) (*testUser, error) { return u, nil } - conn, err := cursor.BuildConnection(paginator, emptyUsers, transform) + conn, err := cursor.BuildConnection(page, schema, args, transform) Expect(err).ToNot(HaveOccurred()) Expect(conn.Nodes).To(HaveLen(0)) diff --git a/interfaces.go b/interfaces.go index 538f5ea..9c5cb1f 100644 --- a/interfaces.go +++ b/interfaces.go @@ -9,12 +9,21 @@ import "context" // // Example implementations: // - offset.Paginator: Traditional offset/limit pagination -// - cursor.Paginator: High-performance cursor-based pagination (Phase 2) -// - quotafill.Wrapper: Filtering-aware pagination (Phase 3) +// - cursor.Paginator: High-performance cursor-based pagination +// - quotafill.Wrapper: Filtering-aware pagination +// +// Example usage: +// +// paginator := offset.New(fetcher) +// result, err := paginator.Paginate(ctx, args, +// paging.WithMaxSize(100), +// paging.WithDefaultSize(25), +// ) type Paginator[T any] interface { // Paginate executes pagination and returns a page of results. // The PageArgs contain the page size (First) and cursor position (After). - Paginate(ctx context.Context, args *PageArgs) (*Page[T], error) + // Options like WithMaxSize and WithDefaultSize configure per-request page size limits. + Paginate(ctx context.Context, args *PageArgs, opts ...PaginateOption) (*Page[T], error) } // Page represents a single page of paginated results. @@ -54,6 +63,11 @@ type Metadata struct { // SafeguardHit indicates if a safeguard was triggered during quota-fill. // Values: nil (no safeguard), "max_iterations", "max_records", "timeout" SafeguardHit *string + + // Offset is the current offset position for offset-based pagination. + // This field is only populated when Strategy is "offset". + // Used by BuildConnection to generate accurate cursors for multi-page results. + Offset int } // Fetcher abstracts database queries for any ORM or database layer. diff --git a/offset/offset_suite_test.go b/offset/offset_suite_test.go index 8168dec..2f518cc 100644 --- a/offset/offset_suite_test.go +++ b/offset/offset_suite_test.go @@ -1,10 +1,8 @@ package offset_test import ( - "reflect" "testing" - "github.com/aarondl/sqlboiler/v4/queries/qm" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -13,8 +11,3 @@ func TestOffset(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Offset Suite") } - -// modTypeName returns the type name of a query mod for assertion purposes. -func modTypeName(mod qm.QueryMod) string { - return reflect.TypeOf(mod).String() -} diff --git a/offset/paginator.go b/offset/paginator.go index e76f317..0a58f36 100644 --- a/offset/paginator.go +++ b/offset/paginator.go @@ -1,26 +1,23 @@ // Package offset provides offset-based pagination functionality. // // This package implements traditional offset/limit pagination with support for -// sorting and cursor encoding. It is designed to work with SQLBoiler query mods -// and provides a clean interface for paginating database results. +// sorting and cursor encoding. It implements the paging.Paginator[T] interface +// and works with the Fetcher pattern for database abstraction. // // Example usage: // -// paginator := offset.New(pageArgs, totalCount) -// mods := paginator.QueryMods() -// results, err := models.Items(mods...).All(ctx, db) +// fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.OffsetToQueryMods) +// paginator := offset.New(fetcher) +// result, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +// conn, err := offset.BuildConnection(result, toDomainModel) package offset import ( - "strings" + "context" "github.com/nrfta/paging-go/v2" - - "github.com/aarondl/sqlboiler/v4/queries/qm" ) -const defaultLimitVal = 50 - // PageArgs represents pagination arguments. // This is a subset of the main PageArgs type to avoid import cycles. // Implementations should provide the page size (First), cursor position (After), @@ -31,174 +28,173 @@ type PageArgs interface { GetSortBy() []paging.Sort } -// Paginator is the paginator for offset-based pagination. -// It encapsulates limit, offset, and page metadata for database queries. -type Paginator struct { - Limit int - Offset int - PageInfo paging.PageInfo - orderBy string +// Paginator implements paging.Paginator[T] for offset-based pagination. +// It wraps a Fetcher[T] and handles limit/offset calculation, ordering, +// and page metadata generation. +type Paginator[T any] struct { + fetcher paging.Fetcher[T] } -// New creates a new offset paginator. +// New creates an offset paginator that implements paging.Paginator[T]. +// Takes a Fetcher[T] which handles database queries. // -// Parameters: -// - page: Pagination arguments including page size, cursor, and sorting -// - totalCount: Total number of records available -// - defaultLimit: Optional default page size (defaults to 50 if not provided) -// -// The paginator automatically handles: -// - Default page size of 50 records -// - Zero-value protection to prevent divide-by-zero errors -// - Sorting with default "created_at" column -// - Descending order when specified -// - Cursor encoding/decoding using base64 -func New( - page PageArgs, - totalCount int64, - defaultLimit ...*int, -) Paginator { - limit := defaultLimitVal - if len(defaultLimit) > 0 && defaultLimit[0] != nil { - limit = *defaultLimit[0] - } +// The paginator is reusable across multiple requests - each Paginate() call +// can have different page size limits. +// +// Example: +// +// fetcher := sqlboiler.NewFetcher(queryFunc, countFunc, sqlboiler.OffsetToQueryMods) +// paginator := offset.New(fetcher) +// result, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +func New[T any](fetcher paging.Fetcher[T]) paging.Paginator[T] { + return &Paginator[T]{fetcher: fetcher} +} - if page != nil && page.GetFirst() != nil && *page.GetFirst() > 0 { - limit = *page.GetFirst() +// Paginate executes offset-based pagination and returns a Page[T]. +// +// The method: +// 1. Applies page size configuration from options (WithMaxSize, WithDefaultSize) +// 2. Calculates offset from cursor +// 3. Builds ORDER BY clause from sort directives +// 4. Fetches total count +// 5. Fetches items with limit/offset +// 6. Returns Page[T] with items, PageInfo, and metadata +// +// Options: +// - WithMaxSize(n): Cap page size to maximum of n +// - WithDefaultSize(n): Use n as default when First is nil +// +// Example: +// +// result, err := paginator.Paginate(ctx, args, +// paging.WithMaxSize(1000), +// paging.WithDefaultSize(50), +// ) +func (p *Paginator[T]) Paginate( + ctx context.Context, + args *paging.PageArgs, + opts ...paging.PaginateOption, +) (*paging.Page[T], error) { + // Apply page size config from options + pageConfig := paging.ApplyPaginateOptions(args, opts...) + limit := pageConfig.EffectiveLimit(args) + + // Calculate offset from cursor + offset := 0 + if args != nil && args.GetAfter() != nil { + offset = DecodeCursor(args.GetAfter()) } - // Ensure limit is never 0 to avoid divide by zero - if limit == 0 { - limit = defaultLimitVal - } + // Build ORDER BY clause + orderBy := buildOrderBy(args) - var offset int - if page != nil { - offset = DecodeCursor(page.GetAfter()) + // Get total count + totalCount, err := p.fetcher.Count(ctx, paging.FetchParams{}) + if err != nil { + return nil, err } - orderBy := "created_at" - if page != nil && len(page.GetSortBy()) > 0 { - // Build ORDER BY from sort specifications - var parts []string - for _, sort := range page.GetSortBy() { - part := sort.Column - if sort.Desc { - part += " DESC" - } - parts = append(parts, part) - } - orderBy = strings.Join(parts, ", ") + // Fetch items + params := paging.FetchParams{ + Limit: limit, + Offset: offset, + OrderBy: orderBy, + } + items, err := p.fetcher.Fetch(ctx, params) + if err != nil { + return nil, err } - pageInfo := newOffsetBasedPageInfo(&limit, totalCount, offset) + // Build PageInfo + pageInfo := buildOffsetPageInfo(limit, totalCount, offset) - return Paginator{ - Limit: limit, - Offset: offset, - PageInfo: pageInfo, - orderBy: orderBy, - } + return &paging.Page[T]{ + Nodes: items, + PageInfo: &pageInfo, + Metadata: paging.Metadata{ + Strategy: "offset", + QueryTimeMs: 0, // TODO: track timing + Offset: offset, + }, + }, nil } -// QueryMods returns SQLBoiler query modifiers for pagination. -// These mods apply offset, limit, and order by clauses to a query. -// -// Example usage: -// -// items, err := models.Items(paginator.QueryMods()...).All(ctx, db) -func (p *Paginator) QueryMods() []qm.QueryMod { - return []qm.QueryMod{ - qm.Offset(p.Offset), - qm.Limit(p.Limit), - qm.OrderBy(p.orderBy), +// buildOrderBy constructs the ORDER BY directives from PageArgs. +// Defaults to "created_at" if no sort is specified. +func buildOrderBy(args *paging.PageArgs) []paging.Sort { + if args == nil || args.GetSortBy() == nil || len(args.GetSortBy()) == 0 { + return []paging.Sort{{Column: "created_at", Desc: false}} } + return args.GetSortBy() } -// GetPageInfo returns the PageInfo for this paginator. -// PageInfo contains functions to retrieve pagination metadata like -// total count, cursors, and whether next/previous pages exist. -func (p *Paginator) GetPageInfo() paging.PageInfo { - return p.PageInfo -} +// buildOffsetPageInfo creates PageInfo for offset-based pagination. +// It calculates page boundaries and provides functions to query pagination state. +// +// The endOffset calculation ensures the last page cursor points to the start +// of the final complete page of results. +func buildOffsetPageInfo( + pageSize int, + totalCount int64, + currentOffset int, +) paging.PageInfo { + count := int(totalCount) + endOffset := count - (count % pageSize) + + if endOffset == count { + endOffset = count - pageSize + } + if endOffset < 0 { + endOffset = 0 + } -// GetOrderBy returns the ORDER BY clause used by this paginator. -// This includes the column names and DESC modifier if applicable. -func (p *Paginator) GetOrderBy() string { - return p.orderBy + return paging.PageInfo{ + TotalCount: func() (*int, error) { return &count, nil }, + StartCursor: func() (*string, error) { return EncodeCursor(0), nil }, + EndCursor: func() (*string, error) { return EncodeCursor(endOffset), nil }, + HasNextPage: func() (bool, error) { return (currentOffset + pageSize) < count, nil }, + HasPreviousPage: func() (bool, error) { return currentOffset > 0, nil }, + } } -// BuildConnection creates a Relay-compliant GraphQL connection from a slice of items. -// It handles transformation from database models to domain models and automatically -// generates sequential offset-based cursors for each item. +// BuildConnection transforms a Page[From] to a Connection[To] for GraphQL. +// It handles transformation from database models to domain models and generates +// sequential offset-based cursors for each item. // -// This function eliminates the manual boilerplate of building edges and nodes arrays, -// reducing repository code by 60-80%. +// This function eliminates the manual boilerplate of building edges and nodes arrays. // // Type parameters: // - From: Source type (e.g., *models.User from SQLBoiler) // - To: Target type (e.g., *domain.User for GraphQL) // // Parameters: -// - paginator: The offset paginator containing pagination state -// - items: Slice of database records to transform +// - page: The Page[From] returned from Paginate() // - transform: Function that converts database model to domain model // // Returns a Connection with edges, nodes, and pageInfo populated. // -// Example usage: +// Example: // -// // Before (manual boilerplate - 25+ lines): -// result := &domain.UserConnection{PageInfo: &paginator.PageInfo} -// for i, row := range dbUsers { -// user, err := toDomainUser(row) -// if err != nil { return nil, err } -// result.Edges = append(result.Edges, domain.Edge{ -// Cursor: *offset.EncodeCursor(paginator.Offset + i + 1), -// Node: user, -// }) -// result.Nodes = append(result.Nodes, user) -// } -// -// // After (using BuildConnection - 1 line): -// return offset.BuildConnection(paginator, dbUsers, toDomainUser) +// result, _ := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +// conn, _ := offset.BuildConnection(result, toDomainUser) func BuildConnection[From any, To any]( - paginator Paginator, - items []From, + page *paging.Page[From], transform func(From) (To, error), ) (*paging.Connection[To], error) { + // Get starting offset from page metadata + // This allows correct cursor generation for pages beyond the first one + startOffset := page.Metadata.Offset + return paging.BuildConnection( - items, - paginator.PageInfo, + page.Nodes, + *page.PageInfo, func(i int, _ From) string { - return *EncodeCursor(paginator.Offset + i + 1) + cursor := EncodeCursor(startOffset + i + 1) + if cursor == nil { + return "" + } + return *cursor }, transform, ) } - -// newOffsetBasedPageInfo creates PageInfo for offset-based pagination. -// It calculates page boundaries and provides functions to query pagination state. -// -// The endOffset calculation ensures the last page cursor points to the start -// of the final complete page of results. -func newOffsetBasedPageInfo( - pageSize *int, - totalCount int64, - currentOffset int, -) paging.PageInfo { - count := int(totalCount) - endOffset := count - (count % *pageSize) - - if endOffset == count { - endOffset = count - *pageSize - } - - return paging.PageInfo{ - TotalCount: func() (*int, error) { return &count, nil }, - StartCursor: func() (*string, error) { return EncodeCursor(0), nil }, - EndCursor: func() (*string, error) { return EncodeCursor(endOffset), nil }, - HasNextPage: func() (bool, error) { return (currentOffset+*pageSize < count), nil }, - HasPreviousPage: func() (bool, error) { return currentOffset > 0, nil }, - } -} diff --git a/offset/paginator_test.go b/offset/paginator_test.go index e719077..e8cfede 100644 --- a/offset/paginator_test.go +++ b/offset/paginator_test.go @@ -1,6 +1,8 @@ package offset_test import ( + "context" + "github.com/nrfta/paging-go/v2" "github.com/nrfta/paging-go/v2/offset" @@ -8,136 +10,156 @@ import ( . "github.com/onsi/gomega" ) +type testUser struct { + ID int + Name string +} + +// mockFetcher creates a simple in-memory fetcher for testing +func mockFetcher(totalCount int64, allItems []*testUser) paging.Fetcher[*testUser] { + return &testFetcher{ + totalCount: totalCount, + allItems: allItems, + } +} + +type testFetcher struct { + totalCount int64 + allItems []*testUser +} + +func (f *testFetcher) Fetch(ctx context.Context, params paging.FetchParams) ([]*testUser, error) { + start := params.Offset + end := start + params.Limit + if start >= len(f.allItems) { + return []*testUser{}, nil + } + if end > len(f.allItems) { + end = len(f.allItems) + } + return f.allItems[start:end], nil +} + +func (f *testFetcher) Count(ctx context.Context, params paging.FetchParams) (int64, error) { + return f.totalCount, nil +} + +// generateTestUsers creates a slice of test users +func generateTestUsers(count int) []*testUser { + users := make([]*testUser, count) + for i := 0; i < count; i++ { + users[i] = &testUser{ID: i + 1, Name: "User"} + } + return users +} + var _ = Describe("Paginator", func() { + var ( + ctx context.Context + fetcher paging.Fetcher[*testUser] + paginator paging.Paginator[*testUser] + ) + + BeforeEach(func() { + ctx = context.Background() + // Create 100 test users + allUsers := generateTestUsers(100) + fetcher = mockFetcher(100, allUsers) + paginator = offset.New(fetcher) + }) + Describe("Basic functionality", func() { It("uses the default limit when no pageArgs.First is provided", func() { - page := &paging.PageArgs{} + args := &paging.PageArgs{} + + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) - paginator := offset.New(page, 100) + // Should return 50 items (default page size) + Expect(page.Nodes).To(HaveLen(50)) - Expect(paginator.Limit).To(Equal(50)) - Expect(paginator.Offset).To(Equal(0)) + totalCount, _ := page.PageInfo.TotalCount() + Expect(*totalCount).To(Equal(100)) }) It("parses the pageArgs correctly", func() { first := 10 - page := &paging.PageArgs{ + args := &paging.PageArgs{ First: &first, After: offset.EncodeCursor(20), } - paginator := offset.New(page, 100) + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) - Expect(paginator.Limit).To(Equal(10)) - Expect(paginator.Offset).To(Equal(20)) + // Should return 10 items starting at offset 20 + Expect(page.Nodes).To(HaveLen(10)) + Expect(page.Nodes[0].ID).To(Equal(21)) // offset 20 = ID 21 (1-indexed) }) - It("creates a page info with provided info", func() { + It("creates a page info with correct pagination metadata", func() { first := 10 - page := &paging.PageArgs{ + args := &paging.PageArgs{ First: &first, After: offset.EncodeCursor(20), } - paginator := offset.New(page, 100) + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) - totalCount, _ := paginator.PageInfo.TotalCount() + totalCount, _ := page.PageInfo.TotalCount() Expect(*totalCount).To(Equal(100)) - hasNextPage, _ := paginator.PageInfo.HasNextPage() + hasNextPage, _ := page.PageInfo.HasNextPage() Expect(hasNextPage).To(Equal(true)) - hasPreviousPage, _ := paginator.PageInfo.HasPreviousPage() + hasPreviousPage, _ := page.PageInfo.HasPreviousPage() Expect(hasPreviousPage).To(Equal(true)) - startCursor, _ := paginator.PageInfo.StartCursor() + startCursor, _ := page.PageInfo.StartCursor() Expect(startCursor).To(Equal(offset.EncodeCursor(0))) - endCursor, _ := paginator.PageInfo.EndCursor() + endCursor, _ := page.PageInfo.EndCursor() Expect(endCursor).To(Equal(offset.EncodeCursor(90))) }) - - It("returns the sqlboiler query mods", func() { - first := 10 - page := &paging.PageArgs{ - First: &first, - After: offset.EncodeCursor(20), - } - - paginator := offset.New(page, 100) - - mods := paginator.QueryMods() - - Expect(modTypeName(mods[0])).To(Equal("qm.offsetQueryMod")) - Expect(modTypeName(mods[1])).To(Equal("qm.limitQueryMod")) - Expect(modTypeName(mods[2])).To(Equal("qm.orderByQueryMod")) - }) }) - Describe("Order By", func() { - var pa *paging.PageArgs + Describe("PaginateOption", func() { + It("should use WithDefaultSize when First is nil", func() { + args := &paging.PageArgs{} - BeforeEach(func() { - first := 0 - after := "after" - pa = &paging.PageArgs{ - After: &after, - First: &first, - } + page, err := paginator.Paginate(ctx, args, paging.WithDefaultSize(25)) + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(25)) }) - Describe("Default", func() { - It("should use `created_at` for default orderby column", func() { - sut := offset.New(pa, 5) + It("should cap page size with WithMaxSize", func() { + first := 500 + args := &paging.PageArgs{First: &first} - Expect(sut.GetOrderBy()).To(Equal("created_at")) - }) + page, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) + Expect(err).ToNot(HaveOccurred()) + // Capped to 100, but only 100 total items exist + Expect(page.Nodes).To(HaveLen(100)) }) - Describe("Multiple Columns", func() { - Describe("With DESC", func() { - It("should set the Paginator orderBy field", func() { - pa = paging.WithMultiSort(pa, - paging.Sort{Column: "col1", Desc: true}, - paging.Sort{Column: "col2", Desc: true}, - ) - sut := offset.New(pa, 5) - - Expect(sut.GetOrderBy()).To(Equal("col1 DESC, col2 DESC")) - }) - }) - - Describe("With ASC", func() { - It("should set the Paginator orderBy field", func() { - pa = paging.WithMultiSort(pa, - paging.Sort{Column: "col1", Desc: false}, - paging.Sort{Column: "col2", Desc: false}, - ) - sut := offset.New(pa, 5) - - Expect(sut.GetOrderBy()).To(Equal("col1, col2")) - }) - }) - }) + It("should allow page size within MaxSize", func() { + first := 50 + args := &paging.PageArgs{First: &first} - Describe("Single Column", func() { - Describe("With DESC", func() { - It("should set the Paginator orderBy field", func() { - pa = paging.WithSortBy(pa, "created_at", true) - sut := offset.New(pa, 5) - - Expect(sut.GetOrderBy()).To(Equal("created_at DESC")) - }) - }) + page, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(50)) + }) - Describe("With ASC", func() { - It("should set the Paginator orderBy field", func() { - pa = paging.WithSortBy(pa, "created_at", false) - sut := offset.New(pa, 5) + It("should cap large requests to DefaultMaxPageSize by default", func() { + first := 5000 + args := &paging.PageArgs{First: &first} - Expect(sut.GetOrderBy()).To(Equal("created_at")) - }) - }) + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) + // Capped to DefaultMaxPageSize (1000), but only 100 items exist + Expect(page.Nodes).To(HaveLen(100)) }) }) }) diff --git a/page_args.go b/page_args.go index 3fd9cb4..19c76da 100644 --- a/page_args.go +++ b/page_args.go @@ -1,5 +1,116 @@ package paging +import "fmt" + +const ( + // DefaultPageSize is the default number of items per page when not specified. + DefaultPageSize = 50 + + // DefaultMaxPageSize is the default maximum page size allowed. + // This protects against resource exhaustion from unreasonably large page requests. + DefaultMaxPageSize = 1000 +) + +// PageConfig holds pagination configuration options. +// Use NewPageConfig() to create a config with sensible defaults, +// then customize using the With* methods. +// +// Example: +// +// config := paging.NewPageConfig().WithMaxSize(500) +// limit := config.EffectiveLimit(args) +type PageConfig struct { + // DefaultSize is the page size used when not specified in PageArgs. + DefaultSize int + + // MaxSize is the maximum allowed page size. Requests exceeding this + // will be capped to MaxSize (not rejected). + MaxSize int +} + +// NewPageConfig creates a PageConfig with sensible defaults: +// - DefaultSize: 50 +// - MaxSize: 1000 +func NewPageConfig() *PageConfig { + return &PageConfig{ + DefaultSize: DefaultPageSize, + MaxSize: DefaultMaxPageSize, + } +} + +// WithDefaultSize sets the default page size and returns the config for chaining. +func (c *PageConfig) WithDefaultSize(size int) *PageConfig { + if size > 0 { + c.DefaultSize = size + } + return c +} + +// WithMaxSize sets the maximum page size and returns the config for chaining. +func (c *PageConfig) WithMaxSize(size int) *PageConfig { + if size > 0 { + c.MaxSize = size + } + return c +} + +// EffectiveLimit returns the page size to use, applying defaults and caps. +// - If args is nil or First is nil/zero, returns DefaultSize +// - If First exceeds MaxSize, returns MaxSize +// - Otherwise returns First +func (c *PageConfig) EffectiveLimit(args *PageArgs) int { + if c == nil { + c = NewPageConfig() + } + + defaultSize := c.DefaultSize + if defaultSize <= 0 { + defaultSize = DefaultPageSize + } + + maxSize := c.MaxSize + if maxSize <= 0 { + maxSize = DefaultMaxPageSize + } + + if args == nil || args.First == nil || *args.First <= 0 { + return defaultSize + } + + if *args.First > maxSize { + return maxSize + } + + return *args.First +} + +// Validate checks if the page size exceeds MaxSize and returns an error if so. +// Unlike EffectiveLimit which caps silently, Validate returns an error for +// explicit rejection of invalid requests. +func (c *PageConfig) Validate(args *PageArgs) error { + if c == nil { + c = NewPageConfig() + } + + if args == nil || args.First == nil { + return nil + } + + maxSize := c.MaxSize + if maxSize <= 0 { + maxSize = DefaultMaxPageSize + } + + if *args.First > maxSize { + return &PageSizeError{ + Requested: *args.First, + Maximum: maxSize, + } + } + + return nil +} + // PageArgs represents pagination query parameters. // It follows the Relay cursor pagination specification with First (page size), // After (cursor), and SortBy (sort configuration) fields. @@ -60,3 +171,132 @@ func (pa *PageArgs) GetAfter() *string { func (pa *PageArgs) GetSortBy() []Sort { return pa.SortBy } + +// ValidatePageSize validates that the requested page size does not exceed the maximum. +// Returns an error if First is set and exceeds maxPageSize. +// +// Deprecated: Use PageConfig.Validate() instead for clearer configuration. +// This function is kept for backwards compatibility. +// +// If maxPageSize is 0, uses DefaultMaxPageSize (1000). +// +// Example with custom limit: +// +// func (r *resolver) Users(ctx context.Context, args *paging.PageArgs) (*UserConnection, error) { +// if err := paging.ValidatePageSize(args, 500); err != nil { +// return nil, err +// } +// // ... proceed with pagination +// } +// +// Preferred approach using PageConfig: +// +// config := paging.NewPageConfig().WithMaxSize(500) +// if err := config.Validate(args); err != nil { +// return nil, err +// } +func ValidatePageSize(args *PageArgs, maxPageSize int) error { + config := NewPageConfig() + if maxPageSize > 0 { + config.WithMaxSize(maxPageSize) + } + return config.Validate(args) +} + +// Validate validates the PageArgs using DefaultMaxPageSize (1000). +// This is a convenience method that uses the default PageConfig. +// +// For custom limits, use ValidateWith: +// +// config := paging.NewPageConfig().WithMaxSize(500) +// if err := args.ValidateWith(config); err != nil { +// return nil, err +// } +func (pa *PageArgs) Validate() error { + return NewPageConfig().Validate(pa) +} + +// ValidateWith validates the PageArgs using a custom PageConfig. +// This allows specifying custom maximum page sizes per endpoint. +// +// Example: +// +// config := paging.NewPageConfig().WithMaxSize(100) +// if err := args.ValidateWith(config); err != nil { +// return nil, err // Page size too large +// } +func (pa *PageArgs) ValidateWith(config *PageConfig) error { + return config.Validate(pa) +} + +// PageSizeError is returned when the requested page size exceeds the maximum allowed. +type PageSizeError struct { + Requested int + Maximum int +} + +func (e *PageSizeError) Error() string { + return fmt.Sprintf("requested page size %d exceeds maximum allowed page size of %d", + e.Requested, e.Maximum) +} + +// PaginateOption configures page size limits for a pagination request. +// Options are passed to Paginate() to configure per-request limits. +// +// Example: +// +// result, err := paginator.Paginate(ctx, args, +// paging.WithMaxSize(100), +// paging.WithDefaultSize(25), +// ) +type PaginateOption func(*paginateConfig) + +// paginateConfig holds page size configuration for a pagination request. +type paginateConfig struct { + maxSize int + defaultSize int +} + +// WithMaxSize sets the maximum page size for this request. +// If the requested size exceeds this, it will be capped to maxSize. +// +// Example: +// +// result, err := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) +func WithMaxSize(size int) PaginateOption { + return func(c *paginateConfig) { + if size > 0 { + c.maxSize = size + } + } +} + +// WithDefaultSize sets the default page size for this request. +// Used when args.First is nil or zero. +// +// Example: +// +// result, err := paginator.Paginate(ctx, args, paging.WithDefaultSize(25)) +func WithDefaultSize(size int) PaginateOption { + return func(c *paginateConfig) { + if size > 0 { + c.defaultSize = size + } + } +} + +// ApplyPaginateOptions applies functional options and returns a PageConfig. +// This is an internal helper used by all paginators. +func ApplyPaginateOptions(args *PageArgs, opts ...PaginateOption) *PageConfig { + cfg := &paginateConfig{ + maxSize: DefaultMaxPageSize, + defaultSize: DefaultPageSize, + } + for _, opt := range opts { + opt(cfg) + } + return &PageConfig{ + MaxSize: cfg.maxSize, + DefaultSize: cfg.defaultSize, + } +} diff --git a/page_args_test.go b/page_args_test.go index 15cb69f..da2fcb5 100644 --- a/page_args_test.go +++ b/page_args_test.go @@ -99,3 +99,289 @@ var _ = Describe("NewEmptyPageInfo", func() { Expect(hasPrev).To(BeFalse()) }) }) + +var _ = Describe("ValidatePageSize", func() { + It("should accept nil args", func() { + err := paging.ValidatePageSize(nil, 100) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept args with nil First", func() { + args := &paging.PageArgs{} + err := paging.ValidatePageSize(args, 100) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept page size within limit", func() { + first := 50 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 100) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept page size equal to limit", func() { + first := 100 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 100) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should reject page size exceeding limit", func() { + first := 1010 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 1000) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("1010")) + Expect(err.Error()).To(ContainSubstring("1000")) + Expect(err.Error()).To(ContainSubstring("exceeds maximum")) + }) + + It("should return PageSizeError with correct values", func() { + first := 150 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 100) + Expect(err).To(HaveOccurred()) + + var pageSizeErr *paging.PageSizeError + Expect(err).To(BeAssignableToTypeOf(pageSizeErr)) + + pageSizeErr = err.(*paging.PageSizeError) + Expect(pageSizeErr.Requested).To(Equal(150)) + Expect(pageSizeErr.Maximum).To(Equal(100)) + }) + + It("should handle very large page size requests", func() { + first := 999999 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 100) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("999999")) + }) + + It("should use DefaultMaxPageSize when maxPageSize is 0", func() { + first := 1500 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 0) // Should use DefaultMaxPageSize (1000) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("1500")) + Expect(err.Error()).To(ContainSubstring("1000")) + }) + + It("should accept page size within DefaultMaxPageSize when using 0", func() { + first := 500 + args := &paging.PageArgs{First: &first} + err := paging.ValidatePageSize(args, 0) // Should use DefaultMaxPageSize (1000) + Expect(err).ToNot(HaveOccurred()) + }) +}) + +var _ = Describe("PageArgs.Validate", func() { + It("should validate using DefaultMaxPageSize", func() { + first := 500 + args := &paging.PageArgs{First: &first} + err := args.Validate() + Expect(err).ToNot(HaveOccurred()) + }) + + It("should reject page size exceeding DefaultMaxPageSize", func() { + first := 1500 + args := &paging.PageArgs{First: &first} + err := args.Validate() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("1500")) + Expect(err.Error()).To(ContainSubstring("1000")) + }) + + It("should accept nil args", func() { + var args *paging.PageArgs + err := args.Validate() + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept page size equal to DefaultMaxPageSize", func() { + first := 1000 + args := &paging.PageArgs{First: &first} + err := args.Validate() + Expect(err).ToNot(HaveOccurred()) + }) +}) + +var _ = Describe("PageConfig", func() { + Describe("NewPageConfig", func() { + It("should create config with default values", func() { + config := paging.NewPageConfig() + Expect(config.DefaultSize).To(Equal(paging.DefaultPageSize)) + Expect(config.MaxSize).To(Equal(paging.DefaultMaxPageSize)) + }) + }) + + Describe("WithDefaultSize", func() { + It("should set default size", func() { + config := paging.NewPageConfig().WithDefaultSize(25) + Expect(config.DefaultSize).To(Equal(25)) + }) + + It("should ignore zero or negative values", func() { + config := paging.NewPageConfig().WithDefaultSize(0) + Expect(config.DefaultSize).To(Equal(paging.DefaultPageSize)) + + config = paging.NewPageConfig().WithDefaultSize(-10) + Expect(config.DefaultSize).To(Equal(paging.DefaultPageSize)) + }) + + It("should support method chaining", func() { + config := paging.NewPageConfig(). + WithDefaultSize(25). + WithMaxSize(500) + Expect(config.DefaultSize).To(Equal(25)) + Expect(config.MaxSize).To(Equal(500)) + }) + }) + + Describe("WithMaxSize", func() { + It("should set max size", func() { + config := paging.NewPageConfig().WithMaxSize(500) + Expect(config.MaxSize).To(Equal(500)) + }) + + It("should ignore zero or negative values", func() { + config := paging.NewPageConfig().WithMaxSize(0) + Expect(config.MaxSize).To(Equal(paging.DefaultMaxPageSize)) + + config = paging.NewPageConfig().WithMaxSize(-10) + Expect(config.MaxSize).To(Equal(paging.DefaultMaxPageSize)) + }) + }) + + Describe("EffectiveLimit", func() { + It("should return default size when args is nil", func() { + config := paging.NewPageConfig().WithDefaultSize(25) + limit := config.EffectiveLimit(nil) + Expect(limit).To(Equal(25)) + }) + + It("should return default size when First is nil", func() { + config := paging.NewPageConfig().WithDefaultSize(25) + args := &paging.PageArgs{} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(25)) + }) + + It("should return default size when First is zero", func() { + config := paging.NewPageConfig().WithDefaultSize(25) + zero := 0 + args := &paging.PageArgs{First: &zero} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(25)) + }) + + It("should return default size when First is negative", func() { + config := paging.NewPageConfig().WithDefaultSize(25) + negative := -10 + args := &paging.PageArgs{First: &negative} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(25)) + }) + + It("should return First when within limits", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 50 + args := &paging.PageArgs{First: &first} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(50)) + }) + + It("should cap First to MaxSize when exceeded", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 500 + args := &paging.PageArgs{First: &first} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(100)) + }) + + It("should handle nil config gracefully", func() { + var config *paging.PageConfig + first := 50 + args := &paging.PageArgs{First: &first} + limit := config.EffectiveLimit(args) + Expect(limit).To(Equal(50)) + }) + + It("should use system defaults when config values are zero", func() { + config := &paging.PageConfig{DefaultSize: 0, MaxSize: 0} + limit := config.EffectiveLimit(nil) + Expect(limit).To(Equal(paging.DefaultPageSize)) + }) + }) + + Describe("Validate", func() { + It("should accept valid page size", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 50 + args := &paging.PageArgs{First: &first} + err := config.Validate(args) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept page size equal to max", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 100 + args := &paging.PageArgs{First: &first} + err := config.Validate(args) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should reject page size exceeding max", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 150 + args := &paging.PageArgs{First: &first} + err := config.Validate(args) + Expect(err).To(HaveOccurred()) + + var pageSizeErr *paging.PageSizeError + Expect(err).To(BeAssignableToTypeOf(pageSizeErr)) + pageSizeErr = err.(*paging.PageSizeError) + Expect(pageSizeErr.Requested).To(Equal(150)) + Expect(pageSizeErr.Maximum).To(Equal(100)) + }) + + It("should accept nil args", func() { + config := paging.NewPageConfig() + err := config.Validate(nil) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should accept nil First", func() { + config := paging.NewPageConfig() + args := &paging.PageArgs{} + err := config.Validate(args) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should handle nil config gracefully", func() { + var config *paging.PageConfig + first := 500 + args := &paging.PageArgs{First: &first} + err := config.Validate(args) + Expect(err).ToNot(HaveOccurred()) + }) + }) +}) + +var _ = Describe("PageArgs.ValidateWith", func() { + It("should validate using custom config", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 50 + args := &paging.PageArgs{First: &first} + err := args.ValidateWith(config) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should reject page size exceeding custom max", func() { + config := paging.NewPageConfig().WithMaxSize(100) + first := 150 + args := &paging.PageArgs{First: &first} + err := args.ValidateWith(config) + Expect(err).To(HaveOccurred()) + }) +}) diff --git a/quotafill/quotafill.go b/quotafill/quotafill.go index 27d6b1f..65aa705 100644 --- a/quotafill/quotafill.go +++ b/quotafill/quotafill.go @@ -78,17 +78,16 @@ func (w *Wrapper[T]) getMultiplier(iteration int) int { return w.backoffMultipliers[min(iteration, len(w.backoffMultipliers)-1)] } -// getRequestedSize extracts the requested page size from args, defaulting to defaultPageSize. -func getRequestedSize(args *paging.PageArgs) int { - if args != nil && args.GetFirst() != nil && *args.GetFirst() > 0 { - return *args.GetFirst() - } - return defaultPageSize -} - // New creates a quota-fill paginator that adapts a fetcher with filtering. // The schema parameter provides both the cursor encoder and sort ordering, // ensuring they are always synchronized. +// +// Page size limits are configured per-request via Paginate() options: +// +// paginator := quotafill.New(fetcher, filter, schema, +// quotafill.WithMaxIterations(10), +// ) +// result, _ := paginator.Paginate(ctx, args, paging.WithMaxSize(100)) func New[T any]( fetcher paging.Fetcher[T], filter paging.FilterFunc[T], @@ -117,13 +116,15 @@ func New[T any]( } } -func (w *Wrapper[T]) Paginate(ctx context.Context, args *paging.PageArgs) (*paging.Page[T], error) { +func (w *Wrapper[T]) Paginate(ctx context.Context, args *paging.PageArgs, opts ...paging.PaginateOption) (*paging.Page[T], error) { startTime := time.Now() timeoutCtx, cancel := context.WithTimeout(ctx, w.timeout) defer cancel() - requestedSize := getRequestedSize(args) + // Apply per-request page size config + pageConfig := paging.ApplyPaginateOptions(args, opts...) + requestedSize := pageConfig.EffectiveLimit(args) targetSize := requestedSize + 1 state := &paginationState[T]{ diff --git a/quotafill/quotafill_test.go b/quotafill/quotafill_test.go index e806a88..f29ab41 100644 --- a/quotafill/quotafill_test.go +++ b/quotafill/quotafill_test.go @@ -564,3 +564,78 @@ func rejectAllFilter() func(context.Context, []testItem) ([]testItem, error) { return []testItem{}, nil } } + +var _ = Describe("PaginateOption", func() { + It("should use WithDefaultSize when First is nil", func() { + fetcher := newMockFetcher([]testItem{ + {ID: 1}, {ID: 2}, {ID: 3}, {ID: 4}, {ID: 5}, + {ID: 6}, {ID: 7}, {ID: 8}, {ID: 9}, {ID: 10}, + {ID: 11}, {ID: 12}, {ID: 13}, {ID: 14}, {ID: 15}, + {ID: 16}, {ID: 17}, {ID: 18}, {ID: 19}, {ID: 20}, + {ID: 21}, {ID: 22}, {ID: 23}, {ID: 24}, {ID: 25}, + {ID: 26}, + }) + + wrapper := quotafill.New[testItem](fetcher, passAllFilter(), nil) + + args := &paging.PageArgs{} + page, err := wrapper.Paginate(context.Background(), args, + paging.WithDefaultSize(25), + ) + + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(25)) + }) + + It("should cap page size with WithMaxSize", func() { + fetcher := newMockFetcher([]testItem{ + {ID: 1}, {ID: 2}, {ID: 3}, {ID: 4}, {ID: 5}, + }) + + wrapper := quotafill.New[testItem](fetcher, passAllFilter(), nil) + + first := 100 + args := &paging.PageArgs{First: &first} + page, err := wrapper.Paginate(context.Background(), args, + paging.WithMaxSize(3), + ) + + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(3)) + }) + + It("should allow page size within MaxSize", func() { + fetcher := newMockFetcher([]testItem{ + {ID: 1}, {ID: 2}, {ID: 3}, {ID: 4}, {ID: 5}, + }) + + wrapper := quotafill.New[testItem](fetcher, passAllFilter(), nil) + + first := 3 + args := &paging.PageArgs{First: &first} + page, err := wrapper.Paginate(context.Background(), args, + paging.WithMaxSize(100), + ) + + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(3)) + }) + + It("should not enforce max when no options are provided", func() { + // Generate 60 items (more than default max of 50) + items := make([]testItem, 60) + for i := range items { + items[i] = testItem{ID: i + 1} + } + fetcher := newMockFetcher(items) + + wrapper := quotafill.New[testItem](fetcher, passAllFilter(), nil) + + first := 55 + args := &paging.PageArgs{First: &first} + page, err := wrapper.Paginate(context.Background(), args) + + Expect(err).ToNot(HaveOccurred()) + Expect(page.Nodes).To(HaveLen(55)) + }) +}) diff --git a/tests/cursor_integration_test.go b/tests/cursor_integration_test.go index 8706867..503ef9b 100644 --- a/tests/cursor_integration_test.go +++ b/tests/cursor_integration_test.go @@ -16,6 +16,20 @@ import ( var _ = Describe("Cursor Pagination Integration Tests", func() { var userIDs []string var userSchema *cursor.Schema[*models.User] + var userPaginator paging.Paginator[*models.User] + + // Helper to create a standard user fetcher with cursor strategy + createUserFetcher := func() paging.Fetcher[*models.User] { + return sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, container.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return 0, nil // Count not used for cursor pagination + }, + sqlboiler.CursorToQueryMods, + ) + } BeforeEach(func() { // Clean tables before each test @@ -32,20 +46,11 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). Field("email", "e", func(u *models.User) any { return u.Email }). FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) - }) - // Helper to create a standard user fetcher with cursor strategy - createUserFetcher := func() paging.Fetcher[*models.User] { - return sqlboiler.NewFetcher( - func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { - return models.Users(mods...).All(ctx, container.DB) - }, - func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { - return 0, nil // Count not used for cursor pagination - }, - sqlboiler.CursorToQueryMods, - ) - } + // Create paginator (reusable) + fetcher := createUserFetcher() + userPaginator = cursor.New(fetcher, userSchema) + }) Describe("Basic Cursor Pagination", func() { It("should paginate users with default page size using SQLBoiler", func() { @@ -55,42 +60,33 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { First: &first, }, "created_at", true) - // Create fetcher - fetcher := createUserFetcher() - - // Fetch with pagination using BuildFetchParams for consistent cursor encoding - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // Create paginator - paginator, err := cursor.New(pageArgs, userSchema, users) + // Paginate + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // Verify results (N+1: we fetch 11, paginator trims to 10) - Expect(paginator.GetLimit()).To(Equal(10)) - Expect(paginator.GetCursor()).To(BeNil()) // First page + // Verify results + Expect(page.Nodes).To(HaveLen(10)) + Expect(page.Metadata.Strategy).To(Equal("cursor")) // Verify PageInfo - hasNext, err := paginator.PageInfo.HasNextPage() + hasNext, err := page.PageInfo.HasNextPage() Expect(err).ToNot(HaveOccurred()) Expect(hasNext).To(BeTrue()) // Full page implies more data - hasPrev, err := paginator.PageInfo.HasPreviousPage() + hasPrev, err := page.PageInfo.HasPreviousPage() Expect(err).ToNot(HaveOccurred()) Expect(hasPrev).To(BeFalse()) // No cursor = first page - totalCount, err := paginator.PageInfo.TotalCount() + totalCount, err := page.PageInfo.TotalCount() Expect(err).ToNot(HaveOccurred()) Expect(totalCount).To(BeNil()) // Cursor pagination doesn't provide total count // Verify cursors are populated - startCursor, err := paginator.PageInfo.StartCursor() + startCursor, err := page.PageInfo.StartCursor() Expect(err).ToNot(HaveOccurred()) Expect(startCursor).ToNot(BeNil()) - endCursor, err := paginator.PageInfo.EndCursor() + endCursor, err := page.PageInfo.EndCursor() Expect(err).ToNot(HaveOccurred()) Expect(endCursor).ToNot(BeNil()) }) @@ -104,51 +100,35 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { First: &first, }, "created_at", true) - fetcher := createUserFetcher() - - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - firstPageUsers, err := fetcher.Fetch(ctx, fetchParams) + firstPage, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - - paginator, err := cursor.New(pageArgs, userSchema, firstPageUsers) - Expect(err).ToNot(HaveOccurred()) - endCursor, _ := paginator.PageInfo.EndCursor() + Expect(firstPage.Nodes).To(HaveLen(10)) + endCursor, _ := firstPage.PageInfo.EndCursor() // Fetch second page using EndCursor pageArgs.After = endCursor - fetchParams, err = cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - secondPageUsers, err := fetcher.Fetch(ctx, fetchParams) + secondPage, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // Create second paginator - paginator2, err := cursor.New(pageArgs, userSchema, secondPageUsers) - Expect(err).ToNot(HaveOccurred()) - - // Verify N+1 pattern: fetched LIMIT+1 records (11) because there's a 3rd page - Expect(secondPageUsers).To(HaveLen(11), "N+1: should fetch LIMIT+1 when there's a next page") - - // Verify BuildConnection trims to LIMIT + // Verify BuildConnection works transform := func(u *models.User) (*models.User, error) { return u, nil } - conn, err := cursor.BuildConnection(paginator2, secondPageUsers, transform) + conn, err := cursor.BuildConnection(secondPage, userSchema, pageArgs, transform) Expect(err).ToNot(HaveOccurred()) - Expect(conn.Nodes).To(HaveLen(10), "BuildConnection should trim to LIMIT") + Expect(conn.Nodes).To(HaveLen(10)) - // Verify no overlap with first page (trim to limit for comparison) - limit := 10 - for _, u2 := range TrimToLimit(secondPageUsers, limit) { - for _, u1 := range TrimToLimit(firstPageUsers, limit) { + // Verify no overlap with first page + for _, u2 := range secondPage.Nodes { + for _, u1 := range firstPage.Nodes { Expect(u2.ID).ToNot(Equal(u1.ID)) } } // Still has next page (25 total, we're on second page) - hasNext, _ := paginator2.PageInfo.HasNextPage() + hasNext, _ := secondPage.PageInfo.HasNextPage() Expect(hasNext).To(BeTrue()) // Has previous page now - hasPrev, _ := paginator2.PageInfo.HasPreviousPage() + hasPrev, _ := secondPage.PageInfo.HasPreviousPage() Expect(hasPrev).To(BeTrue()) }) }) @@ -159,8 +139,6 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { first := 10 var currentCursor *string - fetcher := createUserFetcher() - // Get to page 3 (after 20 records, should get last 5) for i := 0; i < 2; i++ { pageArgs := paging.WithSortBy(&paging.PageArgs{ @@ -168,14 +146,9 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { After: currentCursor, }, "created_at", true) - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - paginator, err := cursor.New(pageArgs, userSchema, users) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - currentCursor, _ = paginator.PageInfo.EndCursor() + currentCursor, _ = page.PageInfo.EndCursor() } // Now fetch the last page @@ -184,23 +157,18 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { After: currentCursor, }, "created_at", true) - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - lastPageUsers, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - paginator, err := cursor.New(pageArgs, userSchema, lastPageUsers) + lastPage, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) // Last page has 5 items (25 total - 20 already fetched) - Expect(lastPageUsers).To(HaveLen(5)) + Expect(lastPage.Nodes).To(HaveLen(5)) // No next page - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := lastPage.PageInfo.HasNextPage() Expect(hasNext).To(BeFalse()) // Has previous page - hasPrev, _ := paginator.PageInfo.HasPreviousPage() + hasPrev, _ := lastPage.PageInfo.HasPreviousPage() Expect(hasPrev).To(BeTrue()) }) }) @@ -208,8 +176,6 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { Describe("Empty Results", func() { It("should handle cursor beyond all data", func() { // Create a cursor that's beyond all data for DESC ordering - // With DESC order, we need a cursor in the PAST (before all records) - // to get zero results, since < operator gets records BEFORE the cursor pastUser := &models.User{ ID: "00000000-0000-0000-0000-000000000000", CreatedAt: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), @@ -224,28 +190,21 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { pastCursor, _ := enc.Encode(pastUser) pageArgs.After = pastCursor - fetcher := createUserFetcher() - - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - paginator, err := cursor.New(pageArgs, userSchema, users) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) // Should return 0 items - Expect(users).To(HaveLen(0)) + Expect(page.Nodes).To(HaveLen(0)) // No next page - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := page.PageInfo.HasNextPage() Expect(hasNext).To(BeFalse()) // Start and End cursors should be nil - startCursor, _ := paginator.PageInfo.StartCursor() + startCursor, _ := page.PageInfo.StartCursor() Expect(startCursor).To(BeNil()) - endCursor, _ := paginator.PageInfo.EndCursor() + endCursor, _ := page.PageInfo.EndCursor() Expect(endCursor).To(BeNil()) }) }) @@ -257,37 +216,24 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { paging.Sort{Column: "email", Desc: false}, ) - fetcher := createUserFetcher() - - // Use schema's BuildFetchParams to get complete FetchParams - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) + // Should get 5 users + Expect(page.Nodes).To(HaveLen(5)) - paginator, err := cursor.New(pageArgs, userSchema, users) - Expect(err).ToNot(HaveOccurred()) - - // Verify N+1 pattern: fetched LIMIT+1 records (6) because there are 25 users - Expect(users).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") - - // Verify paginator correctly detects next page from N+1 - hasNext, _ := paginator.PageInfo.HasNextPage() - Expect(hasNext).To(BeTrue(), "HasNextPage should be true when we got LIMIT+1 records") + // Paginator correctly detects next page + hasNext, _ := page.PageInfo.HasNextPage() + Expect(hasNext).To(BeTrue()) // Verify sorted order - Expect(users[0].Email).To(HaveSuffix("@example.com")) + Expect(page.Nodes[0].Email).To(HaveSuffix("@example.com")) }) }) Describe("Composite Key Uniqueness", func() { It("should prevent duplicates with same created_at timestamps", func() { - // This test verifies that the ID tiebreaker prevents duplicates - // when multiple users have the same created_at timestamp - first := 10 - fetcher := createUserFetcher() // Fetch multiple pages and collect all IDs allIDs := make(map[string]bool) @@ -299,26 +245,22 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { After: currentCursor, }, "created_at", true) - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - if len(users) == 0 { + if len(page.Nodes) == 0 { break } - // Check for duplicates (trim to limit for N+1 pattern) - for _, u := range TrimToLimit(users, 10) { + // Check for duplicates + for _, u := range page.Nodes { if allIDs[u.ID] { Fail("Found duplicate ID: " + u.ID) } allIDs[u.ID] = true } - paginator, err := cursor.New(pageArgs, userSchema, users) - Expect(err).ToNot(HaveOccurred()) - currentCursor, _ = paginator.PageInfo.EndCursor() + currentCursor, _ = page.PageInfo.EndCursor() } // Should have collected 25 unique IDs @@ -336,8 +278,9 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { Expect(err).ToNot(HaveOccurred()) Expect(userIDs).To(HaveLen(100)) - // Create fetcher once + // Create new paginator for larger dataset fetcher := createUserFetcher() + paginator := cursor.New(fetcher, userSchema) // Paginate through all pages pageSize := 25 @@ -351,20 +294,11 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { After: currentCursor, }, "created_at", true) - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - // Note: We fetch pageSize+1 but paginator will trim to pageSize - - paginator, err := cursor.New(pageArgs, userSchema, users) + result, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // After paginator processes, we should have pageSize items - // (trimmed from pageSize+1 if we got that many) - totalFetched += pageSize - - currentCursor, _ = paginator.PageInfo.EndCursor() + totalFetched += len(result.Nodes) + currentCursor, _ = result.PageInfo.EndCursor() } Expect(totalFetched).To(Equal(100)) @@ -385,18 +319,12 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { cursorPos, _ := enc.Decode(invalid) Expect(cursorPos).To(BeNil()) - // Fetch should work as if it's the first page (BuildFetchParams will return nil cursor) - fetcher := createUserFetcher() - - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - Expect(fetchParams.Cursor).To(BeNil()) // Invalid cursor decoded as nil - users, err := fetcher.Fetch(ctx, fetchParams) + // Paginate should work as if it's the first page + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // N+1 pattern: With invalid cursor treated as first page, we fetch LIMIT+1 - // We have 25 users, so we should get 11 records (10+1) - Expect(users).To(HaveLen(11), "N+1: should fetch LIMIT+1 records for first page") + // Should get first page results + Expect(page.Nodes).To(HaveLen(10)) }) It("should handle invalid JSON in cursor", func() { @@ -458,100 +386,41 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { sqlboiler.CursorToQueryMods, ) - fetchParams, err := cursor.BuildFetchParams(pageArgs, postSchema) - Expect(err).ToNot(HaveOccurred()) - posts, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - paginator, err := cursor.New(pageArgs, postSchema, posts) + paginator := cursor.New(fetcher, postSchema) + page, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // Verify N+1 pattern: Verify we fetched LIMIT+1 when there's a next page - // 25 users * 3 posts * 2/3 published = 50 posts, so first page should have 11 records - Expect(posts).To(HaveLen(11), "N+1: should fetch LIMIT+1 (10+1=11) when there's a next page") - Expect(paginator.GetLimit()).To(Equal(10)) + // Should get 10 posts + Expect(page.Nodes).To(HaveLen(10)) - // Verify BuildConnection trims to LIMIT + // Verify BuildConnection works transform := func(p *models.Post) (*models.Post, error) { return p, nil } - conn, err := cursor.BuildConnection(paginator, posts, transform) + conn, err := cursor.BuildConnection(page, postSchema, pageArgs, transform) Expect(err).ToNot(HaveOccurred()) - Expect(conn.Nodes).To(HaveLen(10), "BuildConnection should trim to LIMIT") + Expect(conn.Nodes).To(HaveLen(10)) // Verify all posts are published - for _, post := range posts { + for _, post := range page.Nodes { Expect(post.PublishedAt).ToNot(BeNil()) } - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := page.PageInfo.HasNextPage() Expect(hasNext).To(BeTrue()) // 25 users * 3 posts * 2/3 published = 50 posts }) }) - Describe("Sorting Conflicts", func() { - It("should work correctly when ORDER BY is only in FetchParams", func() { - // This is the CORRECT way - don't add qm.OrderBy, let cursor strategy handle it - - first := 5 - pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "created_at", true) - - // Correct fetcher - no manual ORDER BY - fetcher := createUserFetcher() - - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - firstPage, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // N+1 pattern: Verify we fetched LIMIT+1 records - Expect(firstPage).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") - - paginator, err := cursor.New(pageArgs, userSchema, firstPage) - Expect(err).ToNot(HaveOccurred()) - endCursor, _ := paginator.PageInfo.EndCursor() - - // Fetch second page - pageArgs.After = endCursor - fetchParams, err = cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - - secondPage, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // N+1 pattern: Verify we fetched LIMIT+1 for second page too - Expect(secondPage).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") - - // Verify no overlap (trim to limit for N+1 pattern) - limit := 5 - for _, u2 := range TrimToLimit(secondPage, limit) { - for _, u1 := range TrimToLimit(firstPage, limit) { - Expect(u2.ID).ToNot(Equal(u1.ID)) - } - } - }) - }) - Describe("SQL Generation with Filters", func() { It("should combine user filters with cursor conditions correctly", func() { // First, fetch page without filter to get a cursor first := 5 pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "created_at", true) - fetcher := createUserFetcher() - - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - firstPageUsers, err := fetcher.Fetch(ctx, fetchParams) + firstPage, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - - // N+1 pattern: Verify we fetched LIMIT+1 records - Expect(firstPageUsers).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") - - paginator, err := cursor.New(pageArgs, userSchema, firstPageUsers) - Expect(err).ToNot(HaveOccurred()) - endCursor, _ := paginator.PageInfo.EndCursor() + Expect(firstPage.Nodes).To(HaveLen(5)) + endCursor, _ := firstPage.PageInfo.EndCursor() // Now apply a filter AND use the cursor - // This tests that the SQL is: WHERE email LIKE ? AND (cursor conditions) fetcherWithFilter := sqlboiler.NewFetcher( func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { // Prepend user's filter before cursor mods @@ -564,29 +433,24 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { sqlboiler.CursorToQueryMods, ) + paginatorWithFilter := cursor.New(fetcherWithFilter, userSchema) + // Fetch with filter using cursor from first page pageArgs.After = endCursor - fetchParams, err = cursor.BuildFetchParams(pageArgs, userSchema) + filteredPage, err := paginatorWithFilter.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - filteredUsers, err := fetcherWithFilter.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // Should get users that match BOTH conditions: - // 1. email LIKE '%@example.com' - // 2. (created_at, id) < cursor position - // N+1 pattern: Verify we fetched LIMIT+1 records (filter matches all users) - Expect(filteredUsers).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") + // Should get users that match BOTH conditions + Expect(filteredPage.Nodes).To(HaveLen(5)) // Verify all users match the filter - for _, u := range filteredUsers { + for _, u := range filteredPage.Nodes { Expect(u.Email).To(HaveSuffix("@example.com")) } - // Verify no overlap with first page (trim to limit for N+1 pattern) - limit := 5 - for _, u2 := range TrimToLimit(filteredUsers, limit) { - for _, u1 := range TrimToLimit(firstPageUsers, limit) { + // Verify no overlap with first page + for _, u2 := range filteredPage.Nodes { + for _, u1 := range firstPage.Nodes { Expect(u2.ID).ToNot(Equal(u1.ID)) } } @@ -609,25 +473,23 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { sqlboiler.CursorToQueryMods, ) + paginator := cursor.New(fetcher, userSchema) + first := 5 pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "created_at", true) - fetchParams, err := cursor.BuildFetchParams(pageArgs, userSchema) - Expect(err).ToNot(HaveOccurred()) - users, err := fetcher.Fetch(ctx, fetchParams) + page, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // N+1 pattern: Verify we fetched LIMIT+1 records with multiple filters - Expect(users).To(HaveLen(6), "N+1: should fetch LIMIT+1 (5+1=6) when there's a next page") + // Should get 5 users with multiple filters + Expect(page.Nodes).To(HaveLen(5)) - // Verify HasNextPage is correctly set based on N+1 result - paginator, err := cursor.New(pageArgs, userSchema, users) - Expect(err).ToNot(HaveOccurred()) - hasNext, _ := paginator.PageInfo.HasNextPage() - Expect(hasNext).To(BeTrue(), "HasNextPage should be true when we got LIMIT+1 records") + // Verify HasNextPage is correctly set + hasNext, _ := page.PageInfo.HasNextPage() + Expect(hasNext).To(BeTrue()) // Verify all users match the filters - for _, u := range users { + for _, u := range page.Nodes { Expect(u.Email).To(HaveSuffix("@example.com")) Expect(u.Name).ToNot(BeNil()) } @@ -653,10 +515,7 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { }) It("should paginate JOIN queries with qualified column names in ORDER BY", func() { - // CRITICAL: Create schema with QUALIFIED column names - // This ensures the cursor strategy generates WHERE clauses like: - // WHERE (posts.created_at, posts.id) < ($1, $2) - // instead of ambiguous: WHERE (created_at, id) < ($1, $2) + // Create schema with QUALIFIED column names joinSchema := cursor.NewSchema[*UserWithPost](). Field("posts.created_at", "c", func(uwp *UserWithPost) any { return uwp.PostCreatedAt }). FixedField("posts.id", cursor.DESC, "i", func(uwp *UserWithPost) any { return uwp.PostID }) @@ -665,7 +524,6 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "posts.created_at", true) // Create fetcher using SQLBoiler query mods with INNER JOIN - // CRITICAL: Must use qualified column names in qm.Select to avoid ambiguity fetcher := sqlboiler.NewFetcher( func(ctx context.Context, mods ...qm.QueryMod) ([]*UserWithPost, error) { var results []*UserWithPost @@ -694,190 +552,55 @@ var _ = Describe("Cursor Pagination Integration Tests", func() { sqlboiler.CursorToQueryMods, ) - // Fetch first page using BuildFetchParams for consistent cursor encoding - fetchParams, err := cursor.BuildFetchParams(pageArgs, joinSchema) - Expect(err).ToNot(HaveOccurred()) - firstPageResults, err := fetcher.Fetch(ctx, fetchParams) + paginator := cursor.New(fetcher, joinSchema) + + // Fetch first page + firstPage, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - // N+1 pattern: We have 25 users * 2 posts = 50 total, so should get LIMIT+1 records - Expect(firstPageResults).To(Or( - HaveLen(11), // Full page with N+1 - HaveLen(10), // Exact page - ), "N+1: should fetch at least LIMIT records") + // Should get results + Expect(firstPage.Nodes).To(HaveLen(10)) - // Verify data integrity - all results should have valid user and post data - for _, result := range firstPageResults { + // Verify data integrity + for _, result := range firstPage.Nodes { Expect(result.UserID).ToNot(BeEmpty()) Expect(result.PostID).ToNot(BeEmpty()) Expect(result.UserName).ToNot(BeEmpty()) Expect(result.PostTitle).ToNot(BeEmpty()) } - // Create paginator - paginator, err := cursor.New(pageArgs, joinSchema, firstPageResults) - Expect(err).ToNot(HaveOccurred()) - endCursor, _ := paginator.PageInfo.EndCursor() + endCursor, _ := firstPage.PageInfo.EndCursor() Expect(endCursor).ToNot(BeNil()) // Fetch second page using the cursor pageArgs.After = endCursor - fetchParams, err = cursor.BuildFetchParams(pageArgs, joinSchema) + secondPage, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - secondPageResults, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // Should have at least some results (not necessarily LIMIT+1) - Expect(secondPageResults).ToNot(BeEmpty(), "Second page should have results") - - // Verify no overlap between pages (check post IDs) - // Use the actual length of results to trim, accounting for N+1 pattern - firstPageTrimmed := TrimToLimit(firstPageResults, 10) - secondPageTrimmed := TrimToLimit(secondPageResults, 10) + // Should have at least some results + Expect(secondPage.Nodes).ToNot(BeEmpty()) + // Verify no overlap between pages firstPagePostIDs := make(map[string]bool) - for _, r := range firstPageTrimmed { + for _, r := range firstPage.Nodes { firstPagePostIDs[r.PostID] = true } overlaps := []string{} - for _, r := range secondPageTrimmed { + for _, r := range secondPage.Nodes { if firstPagePostIDs[r.PostID] { overlaps = append(overlaps, r.PostID) } } - Expect(overlaps).To(BeEmpty(), "Found overlapping post IDs between pages: %v", overlaps) + Expect(overlaps).To(BeEmpty(), "Found overlapping post IDs between pages") // Verify pagination metadata - paginator2, err := cursor.New(pageArgs, joinSchema, secondPageResults) - Expect(err).ToNot(HaveOccurred()) - hasNext, _ := paginator2.PageInfo.HasNextPage() - Expect(hasNext).To(BeTrue()) // Still have more pages (50 total posts) - - hasPrev, _ := paginator2.PageInfo.HasPreviousPage() - Expect(hasPrev).To(BeTrue()) // We're on page 2 - }) - - It("should handle ORDER BY with unqualified column names causing ambiguity", func() { - // This test intentionally uses UNQUALIFIED column names to verify error handling - // Both users and posts tables have 'created_at' and 'id' columns - - // Create fetcher using SQLBoiler with INNER JOIN - fetcher := sqlboiler.NewFetcher( - func(ctx context.Context, mods ...qm.QueryMod) ([]*UserWithPost, error) { - var results []*UserWithPost - - // Build query mods with explicit SELECT and INNER JOIN - queryMods := []qm.QueryMod{ - qm.Select( - "users.id AS user_id", - "users.name AS user_name", - "users.email AS user_email", - "users.created_at AS user_created_at", - "posts.id AS post_id", - "posts.title AS post_title", - "posts.created_at AS post_created_at", - ), - qm.InnerJoin("users ON posts.user_id = users.id"), - } - queryMods = append(queryMods, mods...) - - err := models.Posts(queryMods...).Bind(ctx, container.DB, &results) - return results, err - }, - func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { - return 0, nil - }, - sqlboiler.CursorToQueryMods, - ) - - // Use UNQUALIFIED column names in OrderBy - // This will cause "column reference 'created_at' is ambiguous" error - fetchParams := paging.FetchParams{ - Limit: 10 + 1, - Cursor: nil, - OrderBy: []paging.Sort{ - {Column: "created_at", Desc: true}, // UNQUALIFIED - ambiguous! - {Column: "id", Desc: true}, // UNQUALIFIED - ambiguous! - }, - } - - _, err := fetcher.Fetch(ctx, fetchParams) - - // Should get an error about ambiguous column reference - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Or( - ContainSubstring("ambiguous"), - ContainSubstring("column"), - )) - }) - - It("should correctly order by user columns when specified", func() { - // This test verifies we can sort by user columns instead of post columns - // Create schema and paginator to verify cursor generation works - joinSchema := cursor.NewSchema[*UserWithPost](). - Field("users.created_at", "c", func(uwp *UserWithPost) any { return uwp.UserCreatedAt }). - FixedField("users.id", cursor.DESC, "i", func(uwp *UserWithPost) any { return uwp.UserID }) - - first := 10 - pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "users.created_at", true) - - // Create fetcher using SQLBoiler with INNER JOIN - fetcher := sqlboiler.NewFetcher( - func(ctx context.Context, mods ...qm.QueryMod) ([]*UserWithPost, error) { - var results []*UserWithPost - - // Build query mods with explicit SELECT and INNER JOIN - queryMods := []qm.QueryMod{ - qm.Select( - "users.id AS user_id", - "users.name AS user_name", - "users.email AS user_email", - "users.created_at AS user_created_at", - "posts.id AS post_id", - "posts.title AS post_title", - "posts.created_at AS post_created_at", - ), - qm.InnerJoin("users ON posts.user_id = users.id"), - } - queryMods = append(queryMods, mods...) - - err := models.Posts(queryMods...).Bind(ctx, container.DB, &results) - return results, err - }, - func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { - return 0, nil - }, - sqlboiler.CursorToQueryMods, - ) - - // Order by USERS columns (qualified) using BuildFetchParams - fetchParams, err := cursor.BuildFetchParams(pageArgs, joinSchema) - Expect(err).ToNot(HaveOccurred()) - results, err := fetcher.Fetch(ctx, fetchParams) - Expect(err).ToNot(HaveOccurred()) - - // N+1 pattern: Should get LIMIT+1 records - Expect(results).To(HaveLen(11), "N+1: should fetch LIMIT+1 (10+1=11)") - - paginator, err := cursor.New(pageArgs, joinSchema, results) - Expect(err).ToNot(HaveOccurred()) - - // Verify HasNextPage is set correctly - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := secondPage.PageInfo.HasNextPage() Expect(hasNext).To(BeTrue()) - // Verify results are sorted by user creation time - // Each user has 2 posts, so consecutive posts might have the same user - for i := 1; i < len(results)-1; i++ { - prev := results[i-1] - curr := results[i] - - // User created_at should be DESC (newer or equal) - Expect(prev.UserCreatedAt.After(curr.UserCreatedAt) || prev.UserCreatedAt.Equal(curr.UserCreatedAt)).To(BeTrue()) - } + hasPrev, _ := secondPage.PageInfo.HasPreviousPage() + Expect(hasPrev).To(BeTrue()) }) }) }) diff --git a/tests/offset_integration_test.go b/tests/offset_integration_test.go index bc13797..8c91dba 100644 --- a/tests/offset_integration_test.go +++ b/tests/offset_integration_test.go @@ -14,17 +14,7 @@ import ( var _ = Describe("Offset Pagination Integration Tests", func() { var userIDs []string - - BeforeEach(func() { - // Clean tables before each test - err := CleanupTables(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - - // Seed test data - userIDs, err = SeedUsers(ctx, container.DB, 25) - Expect(err).ToNot(HaveOccurred()) - Expect(userIDs).To(HaveLen(25)) - }) + var userPaginator paging.Paginator[*models.User] // Helper to create a standard user fetcher with offset strategy createUserFetcher := func() paging.Fetcher[*models.User] { @@ -39,145 +29,111 @@ var _ = Describe("Offset Pagination Integration Tests", func() { ) } + BeforeEach(func() { + // Clean tables before each test + err := CleanupTables(ctx, container.DB) + Expect(err).ToNot(HaveOccurred()) + + // Seed test data + userIDs, err = SeedUsers(ctx, container.DB, 25) + Expect(err).ToNot(HaveOccurred()) + Expect(userIDs).To(HaveLen(25)) + + // Create paginator (reusable) + fetcher := createUserFetcher() + userPaginator = offset.New(fetcher) + }) + Describe("Basic Offset Pagination", func() { It("should paginate users with default page size using SQLBoiler", func() { - // Get total count using SQLBoiler - totalCount, err := models.Users().Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - Expect(totalCount).To(Equal(int64(25))) - // Create paginator (first page) first := 10 - pageArgs := &paging.PageArgs{ + pageArgs := paging.WithSortBy(&paging.PageArgs{ First: &first, - } - paginator := offset.New(pageArgs, totalCount) + }, "created_at", true) - // Create fetcher - fetcher := createUserFetcher() - - // Fetch with pagination - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "created_at", Desc: true}}, - } - users, err := fetcher.Fetch(ctx, fetchParams) + // Paginate + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) // Verify results - Expect(users).To(HaveLen(10)) - Expect(paginator.Limit).To(Equal(10)) - Expect(paginator.Offset).To(Equal(0)) + Expect(page.Nodes).To(HaveLen(10)) + Expect(page.Metadata.Strategy).To(Equal("offset")) // Verify PageInfo - hasNext, err := paginator.PageInfo.HasNextPage() + hasNext, err := page.PageInfo.HasNextPage() Expect(err).ToNot(HaveOccurred()) Expect(hasNext).To(BeTrue()) - hasPrev, err := paginator.PageInfo.HasPreviousPage() + hasPrev, err := page.PageInfo.HasPreviousPage() Expect(err).ToNot(HaveOccurred()) Expect(hasPrev).To(BeFalse()) - total, err := paginator.PageInfo.TotalCount() + total, err := page.PageInfo.TotalCount() Expect(err).ToNot(HaveOccurred()) Expect(*total).To(Equal(25)) }) It("should paginate to second page", func() { - totalCount, err := models.Users().Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - // Create second page cursor first := 10 cursor := offset.EncodeCursor(10) // After first 10 records - pageArgs := &paging.PageArgs{ + pageArgs := paging.WithSortBy(&paging.PageArgs{ First: &first, After: cursor, - } - paginator := offset.New(pageArgs, totalCount) - - fetcher := createUserFetcher() + }, "created_at", true) - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "created_at", Desc: true}}, - } - users, err := fetcher.Fetch(ctx, fetchParams) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) // Verify - Expect(users).To(HaveLen(10)) - Expect(paginator.Offset).To(Equal(10)) + Expect(page.Nodes).To(HaveLen(10)) // Still has next page (25 total, we're at 10-19) - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := page.PageInfo.HasNextPage() Expect(hasNext).To(BeTrue()) - hasPrev, _ := paginator.PageInfo.HasPreviousPage() + hasPrev, _ := page.PageInfo.HasPreviousPage() Expect(hasPrev).To(BeTrue()) }) It("should handle last page correctly", func() { - totalCount, err := models.Users().Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - // Go to last page first := 10 cursor := offset.EncodeCursor(20) // After 20 records, should get last 5 - pageArgs := &paging.PageArgs{ + pageArgs := paging.WithSortBy(&paging.PageArgs{ First: &first, After: cursor, - } - paginator := offset.New(pageArgs, totalCount) - - fetcher := createUserFetcher() + }, "created_at", true) - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "created_at", Desc: true}}, - } - users, err := fetcher.Fetch(ctx, fetchParams) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) // Last page has 5 items (25 total - 20 offset) - Expect(users).To(HaveLen(5)) + Expect(page.Nodes).To(HaveLen(5)) // No next page - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := page.PageInfo.HasNextPage() Expect(hasNext).To(BeFalse()) // Has previous page - hasPrev, _ := paginator.PageInfo.HasPreviousPage() + hasPrev, _ := page.PageInfo.HasPreviousPage() Expect(hasPrev).To(BeTrue()) }) }) Describe("Custom Sorting", func() { It("should sort by email ascending", func() { - totalCount, err := models.Users().Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - // Sort by email first := 5 pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "email", false) - paginator := offset.New(pageArgs, totalCount) - fetcher := createUserFetcher() - - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "email", Desc: false}}, - } - users, err := fetcher.Fetch(ctx, fetchParams) + page, err := userPaginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - Expect(users).To(HaveLen(5)) + Expect(page.Nodes).To(HaveLen(5)) // Verify sorted order (user1@, user10@, user11@, ...) - Expect(users[0].Email).To(HaveSuffix("@example.com")) + Expect(page.Nodes[0].Email).To(HaveSuffix("@example.com")) }) }) @@ -191,34 +147,28 @@ var _ = Describe("Offset Pagination Integration Tests", func() { Expect(err).ToNot(HaveOccurred()) Expect(userIDs).To(HaveLen(100)) - totalCount, err := models.Users().Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - Expect(totalCount).To(Equal(int64(100))) - + // Create new paginator for larger dataset fetcher := createUserFetcher() + paginator := offset.New(fetcher) // Paginate through all pages pageSize := 25 - pageArgs := &paging.PageArgs{ - First: &pageSize, - } + var currentCursor *string for page := 0; page < 4; page++ { - paginator := offset.New(pageArgs, totalCount) + pageArgs := paging.WithSortBy(&paging.PageArgs{ + First: &pageSize, + After: currentCursor, + }, "created_at", true) - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "created_at", Desc: true}}, - } - users, err := fetcher.Fetch(ctx, fetchParams) + result, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - Expect(users).To(HaveLen(25)) + Expect(result.Nodes).To(HaveLen(25)) // Advance cursor for next page if page < 3 { nextCursor := offset.EncodeCursor((page + 1) * pageSize) - pageArgs.After = nextCursor + currentCursor = nextCursor } } }) @@ -232,13 +182,8 @@ var _ = Describe("Offset Pagination Integration Tests", func() { }) It("should paginate posts with published filter", func() { - // Get count of published posts using SQLBoiler - totalCount, err := models.Posts(qm.Where("published_at IS NOT NULL")).Count(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) - first := 10 - pageArgs := &paging.PageArgs{First: &first} - paginator := offset.New(pageArgs, totalCount) + pageArgs := paging.WithSortBy(&paging.PageArgs{First: &first}, "published_at", true) // Create fetcher with WHERE filter fetcher := sqlboiler.NewFetcher( @@ -254,21 +199,17 @@ var _ = Describe("Offset Pagination Integration Tests", func() { sqlboiler.OffsetToQueryMods, ) - fetchParams := paging.FetchParams{ - Offset: paginator.Offset, - Limit: paginator.Limit, - OrderBy: []paging.Sort{{Column: "published_at", Desc: true}}, - } - posts, err := fetcher.Fetch(ctx, fetchParams) + paginator := offset.New(fetcher) + page, err := paginator.Paginate(ctx, pageArgs) Expect(err).ToNot(HaveOccurred()) - Expect(posts).To(HaveLen(10)) + Expect(page.Nodes).To(HaveLen(10)) // Verify all posts are published - for _, post := range posts { + for _, post := range page.Nodes { Expect(post.PublishedAt).ToNot(BeNil()) } - hasNext, _ := paginator.PageInfo.HasNextPage() + hasNext, _ := page.PageInfo.HasNextPage() Expect(hasNext).To(BeTrue()) // 25 users * 3 posts * 2/3 published = 50 posts }) }) diff --git a/tests/security_test.go b/tests/security_test.go index b408773..f37f5cf 100644 --- a/tests/security_test.go +++ b/tests/security_test.go @@ -2,6 +2,7 @@ package paging_test import ( "context" + "fmt" "strings" "github.com/nrfta/paging-go/v2" @@ -256,61 +257,95 @@ var _ = Describe("Security Tests", func() { negative := -10 args := &paging.PageArgs{First: &negative} - paginator := offset.New(args, 100) - Expect(paginator).ToNot(BeNil()) - // Should normalize to safe default or minimum + // Create mock fetcher + fetcher := &mockSecurityFetcher{totalCount: 100} + paginator := offset.New(fetcher) + + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) + // Should normalize to safe default + Expect(page.Nodes).ToNot(BeEmpty()) }) It("should handle zero page size", func() { zero := 0 args := &paging.PageArgs{First: &zero} - paginator := offset.New(args, 100) - Expect(paginator).ToNot(BeNil()) - // Should use default page size + fetcher := &mockSecurityFetcher{totalCount: 100} + paginator := offset.New(fetcher) + + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) + // Should use default page size (50) + Expect(page.Nodes).To(HaveLen(50)) }) It("should enforce maximum page size limits", func() { huge := 999999 args := &paging.PageArgs{First: &huge} - paginator := offset.New(args, 100) - Expect(paginator).ToNot(BeNil()) - // Implementation should cap at reasonable maximum + fetcher := &mockSecurityFetcher{totalCount: 100} + paginator := offset.New(fetcher) + + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) + // Should cap at DefaultMaxPageSize (1000), but only 100 items exist + Expect(page.Nodes).To(HaveLen(100)) }) }) Context("Cursor Validation", func() { It("should handle nil cursors gracefully", func() { - args := &paging.PageArgs{After: nil} + first := 10 + args := &paging.PageArgs{First: &first, After: nil} schema := cursor.NewSchema[*models.User](). Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) - users, err := models.Users(qm.Limit(10)).All(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) + // Create fetcher and paginator + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, container.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return 0, nil + }, + sqlboiler.CursorToQueryMods, + ) + paginator := cursor.New(fetcher, schema) - paginator, err := cursor.New(args, schema, users) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - Expect(paginator).ToNot(BeZero()) + Expect(page).ToNot(BeNil()) + Expect(page.Nodes).To(HaveLen(10)) }) It("should handle empty string cursors", func() { empty := "" - args := &paging.PageArgs{After: &empty} + first := 10 + args := &paging.PageArgs{First: &first, After: &empty} schema := cursor.NewSchema[*models.User](). Field("created_at", "c", func(u *models.User) any { return u.CreatedAt }). FixedField("id", cursor.DESC, "i", func(u *models.User) any { return u.ID }) - users, err := models.Users(qm.Limit(10)).All(ctx, container.DB) - Expect(err).ToNot(HaveOccurred()) + // Create fetcher and paginator + fetcher := sqlboiler.NewFetcher( + func(ctx context.Context, mods ...qm.QueryMod) ([]*models.User, error) { + return models.Users(mods...).All(ctx, container.DB) + }, + func(ctx context.Context, mods ...qm.QueryMod) (int64, error) { + return 0, nil + }, + sqlboiler.CursorToQueryMods, + ) + paginator := cursor.New(fetcher, schema) - // Should handle empty cursor gracefully - paginator, err := cursor.New(args, schema, users) + // Should handle empty cursor gracefully (treats as nil) + page, err := paginator.Paginate(ctx, args) Expect(err).ToNot(HaveOccurred()) - Expect(paginator).ToNot(BeZero()) + Expect(page).ToNot(BeNil()) }) }) }) @@ -419,12 +454,43 @@ var _ = Describe("Security Tests", func() { huge := 1000000 args := &paging.PageArgs{First: &huge} - totalCount := int64(25) - paginator := offset.New(args, totalCount) + fetcher := &mockSecurityFetcher{totalCount: 25} + paginator := offset.New(fetcher) - // Should cap at reasonable limit - Expect(paginator).ToNot(BeNil()) + page, err := paginator.Paginate(ctx, args) + Expect(err).ToNot(HaveOccurred()) + // Should cap at DefaultMaxPageSize (1000), but only 25 items exist + Expect(page.Nodes).To(HaveLen(25)) }) }) }) }) + +// mockSecurityFetcher is a simple in-memory fetcher for security tests +type mockSecurityFetcher struct { + totalCount int64 +} + +func (f *mockSecurityFetcher) Fetch(ctx context.Context, params paging.FetchParams) ([]*models.User, error) { + // Generate mock users up to the limit + count := params.Limit + if params.Offset+count > int(f.totalCount) { + count = int(f.totalCount) - params.Offset + } + if count < 0 { + count = 0 + } + + users := make([]*models.User, count) + for i := 0; i < count; i++ { + users[i] = &models.User{ + ID: fmt.Sprintf("user-%d", params.Offset+i+1), + Email: fmt.Sprintf("user%d@example.com", params.Offset+i+1), + } + } + return users, nil +} + +func (f *mockSecurityFetcher) Count(ctx context.Context, params paging.FetchParams) (int64, error) { + return f.totalCount, nil +}