repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
river
github_2023
go
140
riverqueue
brandur
@@ -265,7 +267,7 @@ func TestScheduler(t *testing.T) { statusUpdate := func(status componentstatus.Status) { statusUpdateCh <- status } - notify := notifier.New(&scheduler.Archetype, dbPool.Config().ConnConfig, statusUpdate) + notify := notifier.New(&scheduler.Archetype, dbPool.Config().ConnConfig, statusUpdate, slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}))
Can you use a `riverinternaltest.Logger(t)` here instead? Makes sure that log output is properly collated with log output.
river
github_2023
go
140
riverqueue
brandur
@@ -34,7 +36,7 @@ func TestNotifierReceivesNotification(t *testing.T) { statusUpdateCh <- status } - notifier := New(riverinternaltest.BaseServiceArchetype(t), db.Config().ConnConfig, statusUpdate) + notifier := New(riverinternaltest.BaseServiceArchetype(t), db.Config().ConnConfig, statusUpdate, slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}))
Same here: `riverinternaltest.Logger(t)`.
river
github_2023
go
140
riverqueue
brandur
@@ -172,7 +174,7 @@ func Test_Producer_Run(t *testing.T) { workers := NewWorkers() - notifier := notifier.New(archetype, dbPool.Config().ConnConfig, func(componentstatus.Status) {}) + notifier := notifier.New(archetype, dbPool.Config().ConnConfig, func(componentstatus.Status) {}, slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}))
And same for both of these as well.
river
github_2023
go
140
riverqueue
brandur
@@ -135,7 +137,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { if errors.Is(err, context.Canceled) { return } - log.Printf("error establishing connection from pool: %v", err) + n.logger.Error("error establishing connection from pool", "err", err)
Okay this one's a bit tricky in that it's unfortunately failing example tests when changed to slog because especially in CI tests can finish before the notifier's able to connect, and by the time it's done trying to, it may produce a connection error (rather than a context cancelled error which is handled above), which shows up in test output and fails example tests. Change this line to this: ``` go // Log at a lower verbosity level in case an error is received when the // context is already done (probably because the client is stopping). // Example tests can finish before the notifier connects and starts // listening, and on client stop may produce a connection error that // would otherwise pollute output and fail the test. select { case <-ctx.Done(): n.logger.Info("error establishing connection from pool", "err", err) default: n.logger.Error("error establishing connection from pool", "err", err) } ``` It's a little hacky, but it seems to get the tests passing again.
river
github_2023
go
138
riverqueue
brandur
@@ -682,6 +682,10 @@ func (c *Client[TTx]) signalStopComplete(ctx context.Context) { // by cancelling the context passed to Start or by calling StopAndCancel. func (c *Client[TTx]) Stop(ctx context.Context) error { c.baseService.Logger.InfoContext(ctx, c.baseService.Name+": Stop started") + if c.fetchNewWorkCancel != nil { + return errors.New("client not started") + }
@pmenglund What do you think about moving this to be above the log info line? I think it makes more sense for the log line to only be written in cases where a stop really is starting (i.e. not a false start).
river
github_2023
go
138
riverqueue
brandur
@@ -389,6 +390,15 @@ func Test_Client_Stop(t *testing.T) { } } + t.Run("not started", func(t *testing.T) { + t.Parallel() + client := newTestClient(ctx, t, newTestConfig(t, nil)) + + err := client.Stop(ctx) + require.Error(t, err) + assert.Equal(t, "client not started", err.Error())
See lint error here, but we've tried to standardize around using `require` over `assert` everywhere.
river
github_2023
go
138
riverqueue
brandur
@@ -389,6 +389,15 @@ func Test_Client_Stop(t *testing.T) { } } + t.Run("not started", func(t *testing.T) { + t.Parallel() + client := newTestClient(ctx, t, newTestConfig(t, nil)) + + err := client.Stop(ctx) + require.Error(t, err) + require.Equal(t, "client not started", err.Error())
Yeah. I think it's okay not to give them a full error type until we're more sure that someone will need it. This case is pretty clearly a misuse problem that'll rarely occur outside of dev and not need normal handling.
river
github_2023
others
134
riverqueue
brandur
@@ -40,7 +40,7 @@ WITH currently_held_leaders AS ( ), notified_resignations AS ( SELECT - pg_notify('river_leaderhip', json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), + pg_notify('river_leadership', json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text),
Yeah, good idea. It's already referenced in code anyway, so couldn't hurt just to use the same ref here.
river
github_2023
others
114
riverqueue
bgentry
@@ -27,15 +27,27 @@ queries. After changing an sqlc `.sql` file, generate Go with: 1. First, prepare a PR with a `CHANGELOG.md` update describing the changes, and update the root `go.mod` to point to the `riverpgxv5` version that is about to be released. 2. Merge the above PR. -3. Next, fetch the repo locally and push new tags: +3. Next, fetch the repo locally, bump dependency versions, tag each submodule, and push those tags: ```shell git checkout master && git pull --rebase -VERSION=v0.0.x +export VERSION=v0.0.x +go run ./internal/cmd/update-submodule-versions/main.go git tag cmd/river/$VERSION -m "release cmd/river/$VERSION" git tag riverdriver/$VERSION -m "release riverdriver/$VERSION" git tag riverdriver/riverpgxv5/$VERSION -m "release riverdriver/riverpgxv5/$VERSION" git tag riverdriver/riverdatabasesql/$VERSION -m "release riverdriver/riverdatabasesql/$VERSION" git tag $VERSION git push --tags ``` + +### Releasing River CLI + +The CLI (`./cmd/river`) is different than other River submodules in that it doesn't use any `replace` directives so that it can stay installable with `go install ...@latest`. + +If changes to it don't require updates to its other River dependencies (i.e. they're internal to the CLI only), it can be released normally as shown above. + +If updates to River dependencies _are_ required, then a two-phase update is necessary: + +1. Release River dependencies with an initial version (e.g. `v0.0.14`). +2. From `./cmd/river`, `go get` to upgrade to the version from (1), run `go mod tidy`, then release another version (e.g. `v0.0.15`).
I think we can tweak this to avoid the double versioning as I did in my testing. All you have to do is tag & push everything else first as above but skip the `cmd/river` tag. Once that's done, you can follow step 2 here to bump the versions in `cmd/river/go.sum` to the newly released version, commit & push that, then tag it as `cmd/river/v0.0.14`. The released versions won't have the same commit, but the numbers will at least align this way.
river
github_2023
others
98
riverqueue
bgentry
@@ -0,0 +1,25 @@ +module github.com/riverqueue/river/riverdriver/riversql
I have some qualms with this package name. Primarily worried that it could be confusing or take away a name that we might want to use for other purposes. However the best alternative I can come up with is `riverdbsql` which I don't love either 😕
river
github_2023
go
98
riverqueue
bgentry
@@ -32,10 +42,95 @@ import ( type Driver[TTx any] interface { // GetDBPool returns a database pool.This doesn't make sense in a world // where multiple drivers are supported and is subject to change. + // + // API is not stable. DO NOT USE. GetDBPool() *pgxpool.Pool + // GetExecutor gets an executor for the driver. + // + // API is not stable. DO NOT USE. + GetExecutor() Executor + + // UnwrapExecutor gets unwraps executor from a driver transaction. + // + // API is not stable. DO NOT USE. + UnwrapExecutor(tx TTx) Executor + // UnwrapTx turns a generically typed transaction into a pgx.Tx for use with // internal infrastructure. This doesn't make sense in a world where // multiple drivers are supported and is subject to change. + // + // API is not stable. DO NOT USE. UnwrapTx(tx TTx) pgx.Tx } + +// Executor provides River operations against a database. It may be a database +// pool or transaction. +type Executor interface { + // Begin begins a new subtransaction. ErrSubTxNotSupported may be returned + // if the executor is a transaction and the driver doesn't support + // subtransactions (like riverdriver/riversql for database/sql). + // + // API is not stable. DO NOT USE. + Begin(ctx context.Context) (ExecutorTx, error) + + // Exec executes raw SQL. Used for migrations. + // + // API is not stable. DO NOT USE. + Exec(ctx context.Context, sql string) (struct{}, error) + + // MigrationDeleteByVersionMany deletes many migration versions. + // + // API is not stable. DO NOT USE. + MigrationDeleteByVersionMany(ctx context.Context, versions []int) ([]*Migration, error) + + // MigrationGetAll gets all currently applied migrations. + // + // API is not stable. DO NOT USE. + MigrationGetAll(ctx context.Context) ([]*Migration, error) + + // MigrationInsertMany inserts many migration versions. + // + // API is not stable. DO NOT USE. + MigrationInsertMany(ctx context.Context, versions []int) ([]*Migration, error) + + // TableExists checks whether a table exists for the schema in the current + // search schema. + // + // API is not stable. DO NOT USE. + TableExists(ctx context.Context, tableName string) (bool, error) +} + +// ExecutorTx is an executor which is a transaction. In addition to standard +// Executor operations, it may be commited or rolled back.
```suggestion // Executor operations, it may be committed or rolled back. ```
river
github_2023
others
98
riverqueue
bgentry
@@ -0,0 +1,24 @@ +version: "2" +sql: + - engine: "postgresql" + queries: + - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql + schema: + - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql
Is the thought that in general we should be able to share queries between drivers and let sqlc abstract away the differences? More generated code to execute or compile, but less code to actually maintain? After reading your comment about how each driver would need to implement the full set of query methods I had some concerns about the amount of duplication we'd end up with, but if we can share the underlying schema and query definitions then that should help considerably.
river
github_2023
go
98
riverqueue
bgentry
@@ -29,8 +34,96 @@ type Driver struct { // in testing so that inserts can be performed and verified on a test // transaction that will be rolled back. func New(dbPool *pgxpool.Pool) *Driver { - return &Driver{dbPool: dbPool} + return &Driver{dbPool: dbPool, queries: dbsqlc.New()} +} + +func (d *Driver) GetDBPool() *pgxpool.Pool { return d.dbPool } +func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, dbsqlc.New()} } +func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.Executor { return &Executor{tx, dbsqlc.New()} } +func (d *Driver) UnwrapTx(tx pgx.Tx) pgx.Tx { return tx } + +type Executor struct { + dbtx interface { + dbsqlc.DBTX + Begin(ctx context.Context) (pgx.Tx, error) + } + queries *dbsqlc.Queries +} + +func (e *Executor) Begin(ctx context.Context) (riverdriver.ExecutorTx, error) { + tx, err := e.dbtx.Begin(ctx) + if err != nil { + return nil, err + } + return &ExecutorTx{Executor: Executor{tx, e.queries}, tx: tx}, nil +} + +func (e *Executor) Exec(ctx context.Context, sql string) (struct{}, error) { + _, err := e.dbtx.Exec(ctx, sql) + return struct{}{}, interpretError(err) +} + +func (e *Executor) MigrationDeleteByVersionMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { + migrations, err := e.queries.RiverMigrationDeleteByVersionMany(ctx, e.dbtx, mapSlice(versions, func(v int) int64 { return int64(v) })) + return mapMigrations(migrations), interpretError(err) +} + +func (e *Executor) MigrationGetAll(ctx context.Context) ([]*riverdriver.Migration, error) { + migrations, err := e.queries.RiverMigrationGetAll(ctx, e.dbtx) + return mapMigrations(migrations), interpretError(err) +} + +func (e *Executor) MigrationInsertMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { + migrations, err := e.queries.RiverMigrationInsertMany(ctx, e.dbtx, mapSlice(versions, func(v int) int64 { return int64(v) })) + return mapMigrations(migrations), interpretError(err) +} + +func (e *Executor) TableExists(ctx context.Context, tableName string) (bool, error) { + exists, err := e.queries.TableExists(ctx, e.dbtx, tableName) + return exists, interpretError(err) +} + +type ExecutorTx struct { + Executor + tx pgx.Tx +} + +func (t *ExecutorTx) Commit(ctx context.Context) error { + return t.tx.Commit(ctx) +} + +func (t *ExecutorTx) Rollback(ctx context.Context) error { + return t.tx.Rollback(ctx) } -func (d *Driver) GetDBPool() *pgxpool.Pool { return d.dbPool } -func (d *Driver) UnwrapTx(tx pgx.Tx) pgx.Tx { return tx } +func interpretError(err error) error { + if errors.Is(err, pgx.ErrNoRows) { + return riverdriver.ErrNoRows + } + return err +} + +func mapMigrations(migrations []*dbsqlc.RiverMigration) []*riverdriver.Migration { + if migrations == nil { + return nil + } + + return mapSlice(migrations, func(m *dbsqlc.RiverMigration) *riverdriver.Migration { + return &riverdriver.Migration{ + ID: int(m.ID), + CreatedAt: m.CreatedAt, + Version: int(m.Version), + } + }) +} + +// mapSlice manipulates a slice and transforms it to a slice of another type. +func mapSlice[T any, R any](collection []T, mapFunc func(T) R) []R { + result := make([]R, len(collection)) + + for i, item := range collection { + result[i] = mapFunc(item) + } + + return result +}
I assume this is included in the pgxv5 driver to avoid a circular dependency on `internal/util/sliceutil`?
river
github_2023
others
98
riverqueue
bgentry
@@ -32,7 +32,9 @@ queries. After changing an sqlc `.sql` file, generate Go with: ```shell git checkout master && git pull --rebase VERSION=v0.0.x +git tag riverdriver/VERSION -m "release riverdriver/VERSION" git tag riverdriver/riverpgxv5/$VERSION -m "release riverdriver/riverpgxv5/$VERSION" +git tag riverdriver/riversql/$VERSION -m "release riverdriver/riversql/$VERSION"
Getting closer to the point where this might warrant a little bit of effort to automate.
river
github_2023
others
105
riverqueue
bgentry
@@ -72,6 +72,7 @@ WITH locked_jobs AS ( WHERE state = 'available'::river_job_state AND queue = @queue::text + AND scheduled_at <= now()
This should be totally fine from a query performance perspective because it's part of the compound fetching index and is already being used for sorting.
river
github_2023
go
101
riverqueue
bgentry
@@ -377,66 +352,53 @@ func (a *StandardAdapter) JobGetAvailableTx(ctx context.Context, tx pgx.Tx, queu return jobs, nil } -func (a *StandardAdapter) JobSetCancelledIfRunning(ctx context.Context, id int64, finalizedAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCancelledIfRunning(ctx, a.executor, dbsqlc.JobSetCancelledIfRunningParams{ - ID: id, - Error: err, - FinalizedAt: finalizedAt, - }) +// JobSetStateParams are parameters to update the state of a currently running +// job. Use one of the constructors below to ensure a correct combination of +// parameters. +type JobSetStateParams struct { + ID int64 + errData []byte + finalizedAt *time.Time + maxAttempts *int + scheduledAt *time.Time + state dbsqlc.JobState } -func (a *StandardAdapter) JobSetCompletedIfRunning(ctx context.Context, job JobToComplete) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCompletedIfRunning(ctx, a.executor, dbsqlc.JobSetCompletedIfRunningParams{ - ID: job.ID, - FinalizedAt: job.FinalizedAt.UTC(), - }) +func JobSetStateCancelled(id int64, finalizedAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCancelled} } -func (a *StandardAdapter) JobSetCompletedTx(ctx context.Context, tx pgx.Tx, id int64, completedAt time.Time) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCompleted(ctx, tx, dbsqlc.JobSetCompletedParams{ - ID: id, - FinalizedAt: completedAt.UTC(), - }) +func JobSetStateCompleted(id int64, finalizedAt time.Time) *JobSetStateParams { + return &JobSetStateParams{ID: id, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCompleted} } -func (a *StandardAdapter) JobSetDiscardedIfRunning(ctx context.Context, id int64, finalizedAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetDiscardedIfRunning(ctx, a.executor, dbsqlc.JobSetDiscardedIfRunningParams{ - ID: id, - Error: err, - FinalizedAt: finalizedAt, - }) +func JobSetStateDiscarded(id int64, finalizedAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateDiscarded} } -func (a *StandardAdapter) JobSetErroredIfRunning(ctx context.Context, id int64, scheduledAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() +func JobSetStateErrored(id int64, scheduledAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, scheduledAt: &scheduledAt, state: dbsqlc.JobStateRetryable} +} - return a.queries.JobSetErroredIfRunning(ctx, a.executor, dbsqlc.JobSetErroredIfRunningParams{ - ID: id, - Error: err, - ScheduledAt: scheduledAt, - }) +func JobSetStateSnoozed(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateParams { + return &JobSetStateParams{ID: id, maxAttempts: &maxAttempts, scheduledAt: &scheduledAt, state: dbsqlc.JobStateScheduled} } -func (a *StandardAdapter) JobSetSnoozedIfRunning(ctx context.Context, id int64, scheduledAt time.Time) (*dbsqlc.RiverJob, error) { +func (a *StandardAdapter) JobSetState(ctx context.Context, params *JobSetStateParams) (*dbsqlc.RiverJob, error) {
The loss of the `IsRunning` suffix confused me a bit here, because you have a `JobSetState` query which _does not_ ensure that the job is still running, whereas you also have a `JobSetStateIfRunning` variant that still does ensure it's running before updating it. Given the comment on `JobSetState` in the interface makes clear it's only supposed to make changes if the job is still running, I think it might be clearer & less confusing to keep the suffix throughout. Thoughts?
river
github_2023
go
101
riverqueue
bgentry
@@ -377,66 +352,53 @@ func (a *StandardAdapter) JobGetAvailableTx(ctx context.Context, tx pgx.Tx, queu return jobs, nil } -func (a *StandardAdapter) JobSetCancelledIfRunning(ctx context.Context, id int64, finalizedAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCancelledIfRunning(ctx, a.executor, dbsqlc.JobSetCancelledIfRunningParams{ - ID: id, - Error: err, - FinalizedAt: finalizedAt, - }) +// JobSetStateParams are parameters to update the state of a currently running +// job. Use one of the constructors below to ensure a correct combination of +// parameters. +type JobSetStateParams struct { + ID int64 + errData []byte + finalizedAt *time.Time + maxAttempts *int + scheduledAt *time.Time + state dbsqlc.JobState } -func (a *StandardAdapter) JobSetCompletedIfRunning(ctx context.Context, job JobToComplete) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCompletedIfRunning(ctx, a.executor, dbsqlc.JobSetCompletedIfRunningParams{ - ID: job.ID, - FinalizedAt: job.FinalizedAt.UTC(), - }) +func JobSetStateCancelled(id int64, finalizedAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCancelled} } -func (a *StandardAdapter) JobSetCompletedTx(ctx context.Context, tx pgx.Tx, id int64, completedAt time.Time) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetCompleted(ctx, tx, dbsqlc.JobSetCompletedParams{ - ID: id, - FinalizedAt: completedAt.UTC(), - }) +func JobSetStateCompleted(id int64, finalizedAt time.Time) *JobSetStateParams { + return &JobSetStateParams{ID: id, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCompleted} } -func (a *StandardAdapter) JobSetDiscardedIfRunning(ctx context.Context, id int64, finalizedAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetDiscardedIfRunning(ctx, a.executor, dbsqlc.JobSetDiscardedIfRunningParams{ - ID: id, - Error: err, - FinalizedAt: finalizedAt, - }) +func JobSetStateDiscarded(id int64, finalizedAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateDiscarded} } -func (a *StandardAdapter) JobSetErroredIfRunning(ctx context.Context, id int64, scheduledAt time.Time, err []byte) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() +func JobSetStateErrored(id int64, scheduledAt time.Time, errData []byte) *JobSetStateParams { + return &JobSetStateParams{ID: id, errData: errData, scheduledAt: &scheduledAt, state: dbsqlc.JobStateRetryable} +} - return a.queries.JobSetErroredIfRunning(ctx, a.executor, dbsqlc.JobSetErroredIfRunningParams{ - ID: id, - Error: err, - ScheduledAt: scheduledAt, - }) +func JobSetStateSnoozed(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateParams { + return &JobSetStateParams{ID: id, maxAttempts: &maxAttempts, scheduledAt: &scheduledAt, state: dbsqlc.JobStateScheduled} } -func (a *StandardAdapter) JobSetSnoozedIfRunning(ctx context.Context, id int64, scheduledAt time.Time) (*dbsqlc.RiverJob, error) { +func (a *StandardAdapter) JobSetState(ctx context.Context, params *JobSetStateParams) (*dbsqlc.RiverJob, error) { ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) defer cancel() - return a.queries.JobSetSnoozedIfRunning(ctx, a.executor, dbsqlc.JobSetSnoozedIfRunningParams{ - ID: id, - ScheduledAt: scheduledAt, + return a.queries.JobSetStateIfRunning(ctx, a.executor, dbsqlc.JobSetStateIfRunningParams{ + ID: params.ID, + ErrorDoUpdate: params.errData != nil, + Error: params.errData, + FinalizedAtDoUpdate: params.finalizedAt != nil, + FinalizedAt: params.finalizedAt, + MaxAttemptsUpdate: params.maxAttempts != nil, + MaxAttempts: int16(min(ptrutil.ValOrDefault(params.maxAttempts, 0), math.MaxInt16)), // default never used
This line is confusing me a bit. What is the `min` achieving?
river
github_2023
go
67
riverqueue
bgentry
@@ -0,0 +1,502 @@ +// Package rivermigrate provides a Go API for running migrations as alternative +// to migrating via the bundled CLI. +package rivermigrate + +import ( + "context" + "embed" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "maps" + "os" + "slices" + "strconv" + "strings" + "time" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/dbsqlc" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/internal/util/maputil" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" +) + +// A bundled migration containing a version (1, 2, 3), and SQL for up and down +// directions. +type migrationBundle struct { + Version int + Up string + Down string +} + +//nolint:gochecknoglobals +var ( + //go:embed migration/*.sql + migrationFS embed.FS + + riverMigrations = mustMigrationsFromFS(migrationFS) + riverMigrationsMap = validateAndInit(riverMigrations) +) + +// Config contains configuration for Migrator. +type Config struct { + // Logger is the structured logger to use for logging purposes. If none is + // specified, logs will be emitted to STDOUT with messages at warn level + // or higher. + Logger *slog.Logger +} + +// Migrator is a database migration tool for River which can run up or down +// migrations in order to establish the schema that the queue needs to run. +type Migrator[TTx any] struct { + baseservice.BaseService + + driver riverdriver.Driver[TTx] + migrations map[int]*migrationBundle // allows us to inject test migrations + queries *dbsqlc.Queries +} + +// New returns a new migrator with the given database driver and configuration. +// The config parameter may be omitted as nil. +// +// Currently only one driver is supported, which is Pgx v5. See package +// riverpgxv5. +// +// The function takes a generic parameter TTx representing a transaction type, +// but it can be omitted because it'll generally always be inferred from the +// driver. For example: +// +// import "github.com/riverqueue/river/riverdriver/riverpgxv5" +// import "github.com/riverqueue/rivermigrate" +// +// ... +// +// dbPool, err := pgxpool.New(ctx, os.Getenv("DATABASE_URL")) +// if err != nil { +// // handle error +// } +// defer dbPool.Close() +// +// migrator, err := rivermigrate.New(riverpgxv5.New(dbPool), nil) +// if err != nil { +// // handle error +// } +func New[TTx any](driver riverdriver.Driver[TTx], config *Config) *Migrator[TTx] { + if config == nil { + config = &Config{} + } + + logger := config.Logger + if logger == nil { + logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelWarn, + })) + } + + archetype := &baseservice.Archetype{ + Logger: logger, + TimeNowUTC: func() time.Time { return time.Now().UTC() }, + } + + return baseservice.Init(archetype, &Migrator[TTx]{ + driver: driver, + migrations: riverMigrationsMap, + queries: dbsqlc.New(), + }) +} + +// MigrateOpts are options for a migrate operation. +type MigrateOpts struct { + // MaxSteps is the maximum number of migrations to apply either up or down. + // Leave zero for an unlimited number. Set to -1 to apply no migrations (for + // testing/checking purposes). + MaxSteps int + + // TargetVersion is a specific migration version to apply migrations to. The + // version must exist and it must be in the possible list of migrations to + // apply. e.g. If requesting an up migration with version 3, version 3 not + // already be applied. + // + // When applying migrations up, migrations are applied including the target + // version, so when starting at version 0 and requesting version 3, versions + // 1, 2, and 3 would be applied. When applying migrations down, down + // migrations are applied excluding the target version, so when starting at + // version 5 an requesting version 3, down migrations for versions 5 and 4 + // would be applied, leaving the final schema at version 3. + TargetVersion int +} + +// MigrateResult is the result of a migrate operation. +type MigrateResult struct { + // Versions are migration versions that were added (for up migrations) or + // removed (for down migrations) for this run. + Versions []MigrateVersion +} + +// MigrateVersion is the result for a single applied migration. +type MigrateVersion struct { + // Version is the version of the migration applied. + Version int +} + +func migrateVersionToInt(version MigrateVersion) int { return version.Version } +func migrateVersionToInt64(version MigrateVersion) int64 { return int64(version.Version) } + +type Direction string + +const ( + DirectionDown Direction = "down" + DirectionUp Direction = "up" +) + +// Migrate migrates the database in the given direction (up or down). The opts +// parameter may be omitted for convenience. +// +// By default, applies all outstanding migrations possible in either direction. +// When migrating up all outstanding migrations are applied, and when migrating +// down all existing migrations are unapplied. +// +// When migrating down, use with caution. MigrateOpts.MaxSteps should be set to +// 1 to only migrate down one step.
This seems like a bit of a footgun. I would have been inclined to default to running a single migration at a time just to avoid this. Do you think it's too risky? At the very least we might want the examples to show the single-migration usage.
river
github_2023
go
67
riverqueue
bgentry
@@ -0,0 +1,502 @@ +// Package rivermigrate provides a Go API for running migrations as alternative +// to migrating via the bundled CLI. +package rivermigrate + +import ( + "context" + "embed" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "maps" + "os" + "slices" + "strconv" + "strings" + "time" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/dbsqlc" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/internal/util/maputil" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" +) + +// A bundled migration containing a version (1, 2, 3), and SQL for up and down +// directions. +type migrationBundle struct { + Version int + Up string + Down string +} + +//nolint:gochecknoglobals +var ( + //go:embed migration/*.sql + migrationFS embed.FS + + riverMigrations = mustMigrationsFromFS(migrationFS) + riverMigrationsMap = validateAndInit(riverMigrations) +) + +// Config contains configuration for Migrator. +type Config struct { + // Logger is the structured logger to use for logging purposes. If none is + // specified, logs will be emitted to STDOUT with messages at warn level + // or higher. + Logger *slog.Logger +} + +// Migrator is a database migration tool for River which can run up or down +// migrations in order to establish the schema that the queue needs to run. +type Migrator[TTx any] struct { + baseservice.BaseService + + driver riverdriver.Driver[TTx] + migrations map[int]*migrationBundle // allows us to inject test migrations + queries *dbsqlc.Queries +} + +// New returns a new migrator with the given database driver and configuration. +// The config parameter may be omitted as nil. +// +// Currently only one driver is supported, which is Pgx v5. See package +// riverpgxv5. +// +// The function takes a generic parameter TTx representing a transaction type, +// but it can be omitted because it'll generally always be inferred from the +// driver. For example: +// +// import "github.com/riverqueue/river/riverdriver/riverpgxv5" +// import "github.com/riverqueue/rivermigrate" +// +// ... +// +// dbPool, err := pgxpool.New(ctx, os.Getenv("DATABASE_URL")) +// if err != nil { +// // handle error +// } +// defer dbPool.Close() +// +// migrator, err := rivermigrate.New(riverpgxv5.New(dbPool), nil) +// if err != nil { +// // handle error +// } +func New[TTx any](driver riverdriver.Driver[TTx], config *Config) *Migrator[TTx] { + if config == nil { + config = &Config{} + } + + logger := config.Logger + if logger == nil { + logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelWarn, + })) + } + + archetype := &baseservice.Archetype{ + Logger: logger, + TimeNowUTC: func() time.Time { return time.Now().UTC() }, + } + + return baseservice.Init(archetype, &Migrator[TTx]{ + driver: driver, + migrations: riverMigrationsMap, + queries: dbsqlc.New(), + }) +} + +// MigrateOpts are options for a migrate operation. +type MigrateOpts struct { + // MaxSteps is the maximum number of migrations to apply either up or down. + // Leave zero for an unlimited number. Set to -1 to apply no migrations (for + // testing/checking purposes). + MaxSteps int + + // TargetVersion is a specific migration version to apply migrations to. The + // version must exist and it must be in the possible list of migrations to + // apply. e.g. If requesting an up migration with version 3, version 3 not + // already be applied. + // + // When applying migrations up, migrations are applied including the target + // version, so when starting at version 0 and requesting version 3, versions + // 1, 2, and 3 would be applied. When applying migrations down, down + // migrations are applied excluding the target version, so when starting at + // version 5 an requesting version 3, down migrations for versions 5 and 4 + // would be applied, leaving the final schema at version 3. + TargetVersion int +} + +// MigrateResult is the result of a migrate operation. +type MigrateResult struct { + // Versions are migration versions that were added (for up migrations) or + // removed (for down migrations) for this run. + Versions []MigrateVersion +} + +// MigrateVersion is the result for a single applied migration. +type MigrateVersion struct { + // Version is the version of the migration applied. + Version int +} + +func migrateVersionToInt(version MigrateVersion) int { return version.Version } +func migrateVersionToInt64(version MigrateVersion) int64 { return int64(version.Version) } + +type Direction string + +const ( + DirectionDown Direction = "down" + DirectionUp Direction = "up" +) + +// Migrate migrates the database in the given direction (up or down). The opts +// parameter may be omitted for convenience. +// +// By default, applies all outstanding migrations possible in either direction. +// When migrating up all outstanding migrations are applied, and when migrating +// down all existing migrations are unapplied. +// +// When migrating down, use with caution. MigrateOpts.MaxSteps should be set to +// 1 to only migrate down one step. +// +// res, err := migrator.Migrate(ctx, rivermigrate.DirectionUp, nil) +// if err != nil { +// // handle error +// } +func (m *Migrator[TTx]) Migrate(ctx context.Context, direction Direction, opts *MigrateOpts) (*MigrateResult, error) {
Related to the other comment, I'm not quite sure about the API here. I'm thinking in terms of trying to make this foolproof and as difficult as possible to accidentally mess up your prod database, while also being sure exactly what's being run at a given step. Rather than stating _how many_ migrations to run, do you think it might be safer to specify _which_ migration you want to run? This would require a bit of a different API, because it'd be something like `RunUp(ctx, 1)` or `Run(ctx, rivermigrate.DirectionUp, 3)`. That forces the use case of running multiple migrations into a different API like `RunN()`, but I expect that use case to be more unusual. Also if you went that route, the `N` arg in `RunN()` could be an explicit arg and not something with a default zero value. My thought process here is that if I'm running an app with River and I see that there's a new version out, I'll check the changelog or website for notes about new migrations in that version. If I see one, I'm going to make a new migration file in my goose/golang-migrate/etc migration setup where I have it call out to `rivermigrate`. I would want to be extra sure that I'm running exactly the migration I expect to be running so that I'm sure I'm prepared and have followed any related instructions if necessary.
river
github_2023
go
67
riverqueue
bgentry
@@ -0,0 +1,502 @@ +// Package rivermigrate provides a Go API for running migrations as alternative +// to migrating via the bundled CLI. +package rivermigrate + +import ( + "context" + "embed" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "maps" + "os" + "slices" + "strconv" + "strings" + "time" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/dbsqlc" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/internal/util/maputil" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" +) + +// A bundled migration containing a version (1, 2, 3), and SQL for up and down +// directions. +type migrationBundle struct { + Version int + Up string + Down string +} + +//nolint:gochecknoglobals +var ( + //go:embed migration/*.sql + migrationFS embed.FS + + riverMigrations = mustMigrationsFromFS(migrationFS) + riverMigrationsMap = validateAndInit(riverMigrations) +) + +// Config contains configuration for Migrator. +type Config struct { + // Logger is the structured logger to use for logging purposes. If none is + // specified, logs will be emitted to STDOUT with messages at warn level + // or higher. + Logger *slog.Logger +} + +// Migrator is a database migration tool for River which can run up or down +// migrations in order to establish the schema that the queue needs to run. +type Migrator[TTx any] struct { + baseservice.BaseService + + driver riverdriver.Driver[TTx] + migrations map[int]*migrationBundle // allows us to inject test migrations + queries *dbsqlc.Queries +} + +// New returns a new migrator with the given database driver and configuration. +// The config parameter may be omitted as nil. +// +// Currently only one driver is supported, which is Pgx v5. See package +// riverpgxv5. +// +// The function takes a generic parameter TTx representing a transaction type, +// but it can be omitted because it'll generally always be inferred from the +// driver. For example: +// +// import "github.com/riverqueue/river/riverdriver/riverpgxv5" +// import "github.com/riverqueue/rivermigrate" +// +// ... +// +// dbPool, err := pgxpool.New(ctx, os.Getenv("DATABASE_URL")) +// if err != nil { +// // handle error +// } +// defer dbPool.Close() +// +// migrator, err := rivermigrate.New(riverpgxv5.New(dbPool), nil) +// if err != nil { +// // handle error +// } +func New[TTx any](driver riverdriver.Driver[TTx], config *Config) *Migrator[TTx] { + if config == nil { + config = &Config{} + } + + logger := config.Logger + if logger == nil { + logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelWarn, + })) + } + + archetype := &baseservice.Archetype{ + Logger: logger, + TimeNowUTC: func() time.Time { return time.Now().UTC() }, + } + + return baseservice.Init(archetype, &Migrator[TTx]{ + driver: driver, + migrations: riverMigrationsMap, + queries: dbsqlc.New(), + }) +} + +// MigrateOpts are options for a migrate operation. +type MigrateOpts struct { + // MaxSteps is the maximum number of migrations to apply either up or down. + // Leave zero for an unlimited number. Set to -1 to apply no migrations (for + // testing/checking purposes). + MaxSteps int + + // TargetVersion is a specific migration version to apply migrations to. The + // version must exist and it must be in the possible list of migrations to + // apply. e.g. If requesting an up migration with version 3, version 3 not + // already be applied.
```suggestion // apply. e.g. If requesting an up migration with version 3, version 3 must // not already be applied. ```
river
github_2023
go
93
riverqueue
brandur
@@ -1146,7 +1146,7 @@ func Test_Client_ErrorHandler(t *testing.T) { HandleErrorFunc: func(ctx context.Context, job *rivertype.JobRow, err error) *ErrorHandlerResult { var unknownJobKindErr *UnknownJobKindError require.ErrorAs(t, err, &unknownJobKindErr) - require.Equal(t, *unknownJobKindErr, UnknownJobKindError{Kind: "RandomWorkerNameThatIsNeverRegistered"})
Gotta say that I really like this new lint — hard to catch the reversal during PR reviews so should shore up consistency nicely.
river
github_2023
go
93
riverqueue
brandur
@@ -111,8 +111,8 @@ func (s *JobCleaner) Start(ctx context.Context) error { //nolint:dupl // races. defer close(stopped) - s.Logger.InfoContext(ctx, s.Name+": Run loop started") - defer s.Logger.InfoContext(ctx, s.Name+": Run loop stopped") + s.Logger.InfoContext(ctx, s.Name+logPrefixRunLoopStarted)
I could take or leave this one for sure. Let's try it for a while and see how it feels.
river
github_2023
go
93
riverqueue
brandur
@@ -16,7 +16,7 @@ import ( // `riverinternaltest_test`, except that the test below uses internal variables // like `dbPool`. type Executor interface { - Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Exec(ctx context.Context, query string, args ...interface{}) (pgconn.CommandTag, error)
Like this one too. The nice part about having default param names is that when you tell your IDE to autocomplete an implementation scaffold, it can give you nice-looking names in it instead of something generically generated.
river
github_2023
go
93
riverqueue
brandur
@@ -70,8 +70,8 @@ func TestMap(t *testing.T) { return strconv.FormatInt(x, 10) }) - require.Equal(t, len(result1), 4) - require.Equal(t, len(result2), 4) - require.Equal(t, result1, []string{"Hello", "Hello", "Hello", "Hello"}) - require.Equal(t, result2, []string{"1", "2", "3", "4"}) + require.Len(t, result1, 4)
Another good one IMO.
river
github_2023
others
92
riverqueue
bgentry
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.0.11] - 2023-12-02
Sorry, but I put my changelog entry for `Stopped` in the prior version again 🤦‍♂️ mind fixing that too before merging?
river
github_2023
go
90
riverqueue
brandur
@@ -492,6 +492,46 @@ func Test_Client_Stop(t *testing.T) { }) } +func Test_Client_Stopped(t *testing.T) { + t.Parallel() + ctx := context.Background() + + t.Run("ClosesWhenStopCompletes", func(t *testing.T) { + t.Parallel() + + jobStartedChan := make(chan int64) + jobDoneChan := make(chan struct{}) + callbackFunc := func(ctx context.Context, job *Job[callbackArgs]) error { + jobStartedChan <- job.ID + <-ctx.Done() + close(jobDoneChan) + return nil + } + client := runNewTestClient(ctx, t, newTestConfig(t, callbackFunc)) + + insertedJob, err := client.Insert(ctx, callbackArgs{}, nil) + require.NoError(t, err) + + startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertedJob.ID, startedJobID) + + select { + case <-client.Stopped(): + t.Fatal("expected client to not be stopped yet") + default: + } + + stopCtx, stopCancel := context.WithTimeout(ctx, 5*time.Second) + t.Cleanup(stopCancel) + + if err := client.StopAndCancel(stopCtx); err != nil { + t.Fatal(err) + } + + riverinternaltest.WaitOrTimeout(t, client.Stopped()) + }) +}
Probably not worth rewriting for this, but it might not be the worst idea to put this sort of thing in the general `Test_Client` suite for other changes of this sort. Normalizes code a little more, and needs less bespoke set up code for every client function.
river
github_2023
go
90
riverqueue
brandur
@@ -705,6 +705,12 @@ func (c *Client[TTx]) StopAndCancel(ctx context.Context) error { return c.awaitStop(ctx) } +// Stopped returns a channel that will be closed when the Client has stopped. +// It can be used to wait for a graceful shutdown to complete.
Maybe add a sentence on what happens with it when a stop context is cancelled?
river
github_2023
others
79
riverqueue
brandur
@@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed
IMO, it might be better to describe how an app might be broken by this change and why we did it. Might not be the best habit to get into to start including a regular "Breaking" section in the semver doc heh.
river
github_2023
go
79
riverqueue
brandur
@@ -105,52 +105,40 @@ func Example_gracefulShutdown() { <-sigintOrTerm fmt.Printf("Received SIGINT/SIGTERM; initiating soft stop (try to wait for jobs to finish)\n") - softStopSucceeded := make(chan struct{}) + softStopCtx, softStopCtxCancel := context.WithTimeout(ctx, 10*time.Second)
This example's fully reproduced in the docs, so worth updating it over there too.
river
github_2023
go
37
riverqueue
bgentry
@@ -69,7 +68,7 @@ type JobRow struct { // Errors is a set of errors that occurred when the job was worked, one for // each attempt. Ordered from earliest error to the latest error. - Errors []AttemptError + Errors [][]byte
This seems like a fairly big downside and it's the reason I think #36 is the only viable option. I know we'll also be making use of metadata in the future and I think it's important that we keep these both as usable as possible.
river
github_2023
go
53
riverqueue
brandur
@@ -298,6 +298,10 @@ func (a *StandardAdapter) JobInsertManyTx(ctx context.Context, tx pgx.Tx, params metadata = []byte("{}") } + tags := params.Tags + if tags == nil { + tags = []string{} + }
Doh. COPY/FROM.
river
github_2023
others
53
riverqueue
brandur
@@ -0,0 +1,2 @@ +ALTER TABLE river_job ALTER COLUMN tags DROP NOT NULL,
Doh, this is still kinda bad and could be changed, but you still need to add these to a list of migrations here along with `go:embed` statements: https://github.com/riverqueue/river/blob/master/internal/dbmigrate/db_migrate.go#L47-L48 We should fix that, but it was the fastest/easiest way to introduce migrations at the time.
river
github_2023
go
43
riverqueue
bgentry
@@ -108,9 +110,21 @@ const ( JobStateScheduled JobState = "scheduled" ) +// AttemptError is an error from a single job attempt that failed due to an +// error or a panic. type AttemptError struct { - At time.Time `json:"at"` - Error string `json:"error"` - Num int `json:"num"` - Trace string `json:"trace"` + // At is the time at which the error occurred. + At time.Time `json:"at"` + + // Attempt is the attempt number on which the error occurred (maps to + // Attempt on a job row). + Attempt int `json:"attempt"`
I think it’s a nice ergonomic improvement at the small cost of a few more bytes for each error in the db. Given we’re serializing stack traces that’s an insignificant difference. Related, should we be limiting the buffer size for stack traces somehow? These could result in very large payloads.
river
github_2023
go
36
riverqueue
bgentry
@@ -0,0 +1,116 @@ +// Package rivertype stores some of the lowest level River primitives so they +// can be shared amongst a number of packages including the top-level river +// package, database drivers, and internal utilities. +package rivertype + +import ( + "time" +) + +// JobRow contains the properties of a job that are persisted to the database. +// Use of `Job[T]` will generally be preferred in user-facing code like worker +// interfaces. +type JobRow struct { + // ID of the job. Generated as part of a Postgres sequence and generally + // ascending in nature, but there may be gaps in it as transactions roll + // back. + ID int64 + + // Attempt is the attempt number of the job. Jobs are inserted at 0, the + // number is incremented to 1 the first time work its worked, and may + // increment further if it's either snoozed or errors. + Attempt int + + // AttemptedAt is the time that the job was last worked. Starts out as `nil` + // on a new insert. + AttemptedAt *time.Time + + // AttemptedBy is the set of worker IDs that have worked this job. A worker + // ID differs between different programs, but is shared by all executors + // within any given one. (i.e. Different Go processes have different IDs, + // but IDs are shared within any given process.) A process generates a new + // ULID (an ordered UUID) worker ID when it starts up. + AttemptedBy []string
Post launch we are probably going to want to think more about the worker ID thing and maybe offer a way for users to set this per client to match an internal identifier they already have for the node.
river
github_2023
others
35
riverqueue
bgentry
@@ -23,6 +23,8 @@ github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNs github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.0.4 h1:336o1gbjpsSe1I9Es0eoJkBagXTmkXdzZ1M8j9kwLUU= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.0.4/go.mod h1:k6hsPkW9Fl3qURzyLHbvxUCqWDpit0WrZ3oEaKezD3E=
1. What's the process for cutting a new release if we go this route? 2. As part of cutting a new release, will the `go.mod` and `go.sum` disagree with each other around the required version of `riverpgxv5`? 3. After shipping this change, if a user installs `river@latest`, will they also get the latest `riverpgxv5` no matter what this `go.sum` says? If we need to be certain about any of this we can experiment w/ my other repo first.
river
github_2023
go
29
riverqueue
bgentry
@@ -54,7 +54,13 @@ func Example_requireInserted() { panic(err) } - _, err = riverClient.Insert(ctx, RequiredArgs{ + tx, err := dbPool.Begin(ctx) + if err != nil { + panic(err) + } + defer func() { _ = tx.Rollback(ctx) }()
Is this actually safe in pgx? From [the docs](https://pkg.go.dev/github.com/jackc/pgx/v5#Conn.Begin): > Begin starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation. Implies to me that if your context is cancelled or times out, the tx will be left open with the code here.
river
github_2023
go
31
riverqueue
bgentry
@@ -203,6 +214,54 @@ func Test_Client(t *testing.T) { rivertest.WaitOrTimeout(t, workedChan) }) + t.Run("JobCancel", func(t *testing.T) { + t.Parallel() + + client, bundle := setup(t) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + return JobCancel(fmt.Errorf("a persisted internal error")) + }))
Should we check the job in the database after this is all done for completeness? i.e. make sure it is cancelled and the error is saved
river
github_2023
go
31
riverqueue
bgentry
@@ -203,6 +214,54 @@ func Test_Client(t *testing.T) { rivertest.WaitOrTimeout(t, workedChan) }) + t.Run("JobCancel", func(t *testing.T) { + t.Parallel() + + client, bundle := setup(t) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + return JobCancel(fmt.Errorf("a persisted internal error")) + })) + + startClient(ctx, t, client) + + _, err := client.Insert(ctx, &JobArgs{}, nil) + require.NoError(t, err) + + event := rivertest.WaitOrTimeout(t, bundle.subscribeChan) + require.Equal(t, EventKindJobCancelled, event.Kind) + require.Equal(t, JobStateCancelled, event.Job.State) + require.WithinDuration(t, time.Now(), *event.Job.FinalizedAt, 2*time.Second) + }) + + t.Run("JobSnooze", func(t *testing.T) { + t.Parallel() + + client, bundle := setup(t) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + return JobSnooze(15 * time.Minute) + }))
Likewise, should we check anything about the job in the DB after this executes? Since this is an integration test it might be a good safety check.
river
github_2023
go
30
riverqueue
bgentry
@@ -433,7 +433,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client // There are a number of internal components that are only needed/desired if // we're actually going to be working jobs (as opposed to just enqueueing // them): - if config.willExecuteJobs() {
Should we also make it a config error to specify `Queues` with no pool? `Queues` implies there will be workers working on those queues, which doesn't make sense if those workers could never be enabled.
river
github_2023
go
30
riverqueue
bgentry
@@ -697,6 +710,21 @@ func Test_Client_InsertTx(t *testing.T) { require.Equal(t, []string{"custom"}, jobRow.Tags) }) + // A client's allowed to send nil to their drive so they can, for example,
```suggestion // A client's allowed to send nil to their driver so they can, for example, ```
river
github_2023
go
30
riverqueue
bgentry
@@ -856,6 +900,24 @@ func Test_Client_InsertManyTx(t *testing.T) { require.Len(t, jobs, 2, fmt.Sprintf("Expected to find exactly two jobs of kind: %s", (noOpArgs{}).Kind())) }) + // A client's allowed to send nil to their drive so they can, for example, + // easily use test transactions in their test suite.
```suggestion // A client's allowed to send nil to their driver so they can, for example, // easily use test transactions in their test suite. ```
river
github_2023
others
19
riverqueue
bgentry
@@ -0,0 +1,28 @@ +# River + +River is an experimental Postgres queue for Go. + +## Development + +### Run tests + +Raise test databases: + + go run ./internal/cmd/testdbman create + +Run tests: + + go test ./...
Unfortunately our current `-p 1` requirement makes this not quite right 😞
river
github_2023
go
17
riverqueue
bgentry
@@ -0,0 +1,120 @@ +package river_test + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/slogutil" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +type SubscriptionArgs struct { + Cancel bool `json:"cancel"` + Fail bool `json:"fail"` +} + +func (SubscriptionArgs) Kind() string { return "subscription" } + +type SubscriptionWorker struct { + river.WorkerDefaults[SubscriptionArgs] +} + +func (w *SubscriptionWorker) Work(ctx context.Context, job *river.Job[SubscriptionArgs]) error { + switch { + case job.Args.Cancel: + return river.JobCancel(fmt.Errorf("cancelling job")) + case job.Args.Fail: + return fmt.Errorf("failing job") + } + return nil +} + +// Example_subscription demonstrates the use of client subscriptions to receive +// events containing information about worked jobs. +func Example_subscription() { + ctx := context.Background() + + dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_testdb_example")) + if err != nil { + panic(err) + } + defer dbPool.Close() + + // Required for the purpose of this test, but not necessary in real usage. + if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { + panic(err) + } + + workers := river.NewWorkers() + river.AddWorker(workers, &SubscriptionWorker{}) + + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ + Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: 9}), // Suppress logging so example output is cleaner (9 > slog.LevelError). + Queues: map[string]river.QueueConfig{ + river.DefaultQueue: {MaxWorkers: 100}, + }, + Workers: workers, + }) + if err != nil { + panic(err) + } + + // Subscribers tell the River client the kinds of events they'd like to receive. + completedChan, completedSubscribeCancel := riverClient.Subscribe(river.EventKindJobCompleted) + defer completedSubscribeCancel() + + // Multiple simultaneous subscriptions are allowed. + failedChan, failedSubscribeCancel := riverClient.Subscribe(river.EventKindJobFailed) + defer failedSubscribeCancel() + + otherChan, otherSubscribeCancel := riverClient.Subscribe(river.EventKindJobCancelled, river.EventKindJobSnoozed) + defer otherSubscribeCancel() + + if err := riverClient.Start(ctx); err != nil { + panic(err) + } + + // Insert one job for each subscription above: one to succeed, one to fail, + // and one that's cancelled that'll arrive on the "other" channel. + _, err = riverClient.Insert(ctx, SubscriptionArgs{}, nil) + if err != nil { + panic(err) + } + _, err = riverClient.Insert(ctx, SubscriptionArgs{Fail: true}, nil) + if err != nil { + panic(err) + } + _, err = riverClient.Insert(ctx, SubscriptionArgs{Cancel: true}, nil) + if err != nil { + panic(err) + } + + waitForJob := func(subscribeChan <-chan *river.Event) {
Yep, that's the right call, keep everything important inline for an example like this 👍
river
github_2023
go
16
riverqueue
bgentry
@@ -0,0 +1,113 @@ +package river_test + +import ( + "context" + "fmt" + "log/slog" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/slogutil" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +type CustomErrorHandler struct{} + +func (*CustomErrorHandler) HandleError(ctx context.Context, job *river.JobRow, err error) *river.ErrorHandlerResult { + fmt.Printf("Job errored with: %s\n", err) + + // Either function can also set the job to be immediately cancelled, which + // we take advantage of here to make sure it's not retried in the example. + // Can also be `return nil`. + return &river.ErrorHandlerResult{SetCancelled: true} +} + +func (*CustomErrorHandler) HandlePanic(ctx context.Context, job *river.JobRow, panicVal any) *river.ErrorHandlerResult { + fmt.Printf("Job panicked with: %v\n", panicVal) + return &river.ErrorHandlerResult{SetCancelled: true} +} + +type ErroringArgs struct { + ShouldError bool + ShouldPanic bool +} + +func (args ErroringArgs) Kind() string { return "erroring" } + +type ErroringWorker struct { + river.WorkerDefaults[ErroringArgs] +} + +func (w *ErroringWorker) Work(ctx context.Context, j *river.Job[ErroringArgs]) error { + switch { + case j.Args.ShouldError: + return fmt.Errorf("this job errored") + case j.Args.ShouldPanic: + panic("this job panicked") + } + return nil +} + +// Example_errorHandler demonstrates how to use the ErrorHandler interface for +// custom application telemetry. +func Example_errorHandler() { + ctx := context.Background() + + dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_testdb_example")) + if err != nil { + panic(err) + } + defer dbPool.Close() + + // Required for the purpose of this test, but not necessary in real usage. + if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { + panic(err) + } + + workers := river.NewWorkers() + river.AddWorker(workers, &ErroringWorker{}) + + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ + ErrorHandler: &CustomErrorHandler{}, + Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: 9}), // Suppress logging so example output is cleaner (9 > slog.LevelError).
The verbose error/panic output might be something we need to take a look at. I don't think there was much intentional design there, more that it just evolved that way as we pieced together functionality.
river
github_2023
go
16
riverqueue
bgentry
@@ -0,0 +1,113 @@ +package river_test + +import ( + "context" + "fmt" + "log/slog" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/slogutil" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +type CustomErrorHandler struct{} + +func (*CustomErrorHandler) HandleError(ctx context.Context, job *river.JobRow, err error) *river.ErrorHandlerResult { + fmt.Printf("Job errored with: %s\n", err) + + // Either function can also set the job to be immediately cancelled, which + // we take advantage of here to make sure it's not retried in the example. + // Can also be `return nil`. + return &river.ErrorHandlerResult{SetCancelled: true}
I'm torn. It's useful for this example to also demonstrate `SetCancelled` because there's nowhere else we'd do that. However I worry a bit about people copy-pasta'ing this without realizing what they're doing, particularly in `HandlePanic` where there's no comment about it. Maybe the answer is to do both? If we set MaxAttempts to 1 for the entire worker, then we could easily show off both the `SetCancelled: true` usage as well as the more common usage where you probably don't want to cancel any job that panics or errors. If we go that route, I'd say it makes the most sense to put the `SetCancelled` in `HandlePanic` so that `HandleError` can demonstrate the most common usage.
river
github_2023
go
11
riverqueue
bgentry
@@ -14,44 +14,47 @@ import ( "github.com/riverqueue/river/riverdriver/riverpgxv5" ) -// Account represents a minimal account containing a unique identifier, recent -// expenditures, and a remaining total. +// Account represents a minimal account recent expenditures and remaining total.
typo or missing words in here I think? "account recent expenditures"
river
github_2023
go
7
riverqueue
bgentry
@@ -1087,6 +1091,10 @@ func (c *Client[TTx]) InsertManyTx(ctx context.Context, tx TTx, params []InsertM return 0, err } + if err := c.validateManyInsertOpts(param.InsertOpts); err != nil { + return 0, err + } +
These method bodies are getting to be fairly long for being entirely duplicated, thoughts on extracting everything except the last line of each into a shared unexported method or helper?
river
github_2023
others
3
riverqueue
bgentry
@@ -22,9 +22,10 @@ require ( github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.0.0-20231110014757-1a7176abdf3c // indirect
Hmm, does `bun` [get away](https://github.com/uptrace/bun/blob/master/go.mod) with not needing this because they don't directly use their driver packages within the main package? I'm just wondering if this is going to cause more pain than it's worth right now, like will we have to bump this with every version and always run into a chicken & egg issue?
versatile_audio_super_resolution
github_2023
python
24
haoheliu
haoheliu
@@ -277,10 +307,12 @@ def save_wave(waveform, savepath, name="outwav", samplerate=16000): if len(fname) > 255: fname = f"{hex(hash(fname))}.wav" - path = os.path.join(savepath, fname) + save_path = os.path.join(savepath, fname) + temp_path = os.path.join(tempfile.gettempdir(), fname) print("\033[98m {}\033[00m" .format("Don't forget to try different seeds by setting --seed <int> so that AudioSR can have optimal performance on your hardware.")) - print("Save audio to %s." % path) - sf.write(path, waveform[i, 0], samplerate=samplerate) + print("Save audio to %s." % save_path) + sf.write(temp_path, waveform[i, 0], samplerate=samplerate) + strip_silence(inputpath, temp_path, save_path)
Good work
EasyAlbum
github_2023
others
5
BillyWei01
BillyWei01
@@ -55,11 +60,15 @@ class MainActivity : AppCompatActivity() { private class TestMediaFilter(private val opt: Option) : MediaFilter { override fun accept(media: MediaData): Boolean { - return when (opt) { + val mediaType = when (opt) { Option.VIDEO -> media.isVideo Option.IMAGE -> !media.isVideo else -> true } + val valid = ((media.fileSize in ((minSize + 1) until maxSize)) + && (media.width > minWidth) + && (media.height > minHeight)) + return if (opt == Option.ALL) valid else mediaType && valid
'width' and 'height' from MediaStore(database) sometimes may be 0, but the real width and height of file is not 0 (just hadn't set sizes into MediaStore, but the file is good)。 So it's not recommended to filter medias like "media.width > minWidth", maybe "media.realWidth > minWidth" will be better, even 'realWidth' might takes sometime. In my opinion, just filter media with fileSize is ok, is there any image with 0px width or height when the fileSize is not 0 ?
PowerInfer
github_2023
c
73
SJTU-IPADS
akemimadoka
@@ -14407,7 +14392,8 @@ static void ggml_compute_forward_mul_mat_axpy_dense( // int *gid = (int *)(dst->src[3]->data); // printf("down %d up %d ne00 %d\n", ir10, ir11, ne00); - float vec[ne00*4]; + // float vec[ne00*4]; + float* vec = (float *)malloc(ne00 * 4 * sizeof(float));
[malloc 速度有可能很慢,尤其在 msvc 下](https://developercommunity.visualstudio.com/t/mallocfree-dramatic-performance-slowdown/552439),或许可以考虑使用 alloca(msvc 下 [_alloca](https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/alloca?view=msvc-170),有替代 [_malloca](https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/malloca?view=msvc-170))在栈上分配内存代替? 另外 vla 在 gcc/clang/msvc 中[只有 msvc 不支持](https://devblogs.microsoft.com/cppblog/c11-and-c17-standard-support-arriving-in-msvc/),或许此处加个对 msvc 的条件编译更好,避免影响其他编译器下的性能?
PowerInfer
github_2023
others
187
SJTU-IPADS
hodlen
@@ -4614,6 +4614,44 @@ static __global__ void dequantize_mul_mat_axpy_sparse_batch(const void * __restr } } +// nrows: 11008(or 32 * x < 11008), ncols: 4096 +template <int qk, int qr, dequantize_kernel_t dequantize_kernel> +static __global__ void dequantize_mul_mat_axpy_sparse_batch_lessatom(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows, int *lst, float *idx) { + int warp_id = threadIdx.y; + int tid = threadIdx.x + blockIdx.x * 32; + int col = tid * 2; + dfloat2 v; + int iqs = (col % qk) / qr; + float tmp[2]; + tmp[0] = 0.0; + tmp[1] = 0.0; + __shared__ float res[64]; + res[threadIdx.x] = 0.0; + res[threadIdx.x + 32] = 0.0; + +#pragma unroll 32 + for (int row = warp_id; row < nrows; row += 32) { + int raw_row = lst ? lst[row] : row; + // int raw_row = row; + dfloat y_row = y[raw_row]; + if (y_row == 0.0) { + continue; + } + const int ib = (row * ncols + col) / qk; + dequantize_kernel(vx, ib, iqs, v); + tmp[0] += v.x * y_row; + tmp[1] += v.y * y_row; + } + const int adder_loc = threadIdx.x % 16 + threadIdx.x / 16 * 32; + atomicAdd(res + adder_loc, tmp[0]); + atomicAdd(res + adder_loc + 16, tmp[1]); + __syncthreads(); + if (warp_id <= 1) {
```suggestion if (warp_id < 1) { ```
PowerInfer
github_2023
others
187
SJTU-IPADS
hodlen
@@ -5598,13 +5636,10 @@ static void dequantize_axpy_vec_q4_0_cuda(const void * vx, const dfloat * y, flo } static void dequantize_axpy_sparse_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream, int *lst, float *idx) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); - const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); - // dequantize_mul_mat_axpy<QK4_0, QR4_0, dequantize_q4_0> - // <<<block_nums, block_dims, ncols*sizeof(float), stream>>>(vx, y, dst, ncols, nrows); - dequantize_mul_mat_axpy_sparse<QK4_0, QR4_0, dequantize_q4_0> - <<<block_nums, block_dims, ncols*sizeof(float), stream>>>(vx, y, dst, ncols, nrows, lst, idx); + const dim3 block_dim = dim3(32, 32); + const int block_num = ncols / 64;
```suggestion const int block_num = (ncols + 63) / 64; ```
PowerInfer
github_2023
others
44
SJTU-IPADS
hodlen
@@ -173,9 +173,16 @@ Please refer to our [paper](https://ipads.se.sjtu.edu.cn/_media/publications/pow ## FAQs 1. What if I encountered `CUDA_ERROR_OUT_OF_MEMORY`? - You can try to run with `--reset-gpu-index` argument to rebuild the GPU index for this model to avoid any stale cache. - - Due to our current implementation, model offloading might not be as accurate as expected. You can try with `--vram-budget` with a slightly lower value or `--disable-gpu-index` to disable FFN offloading. -2. What if... - - Issues are welcomed! Please feel free to open an issue and attach your running environment and running parameters. We will try our best to help you. + - Due to our current implementation, model offloading might not be as accurate as expected. You can try with `--vram-budget` with a slightly lower value or `--disable-gpu-index` to disable FFN offloading. + +2. Does PowerInfer support mistral, original llama, Qwen, ...? + - Now we only support models with ReLU/ReGLU/Squared ReLU activation function. So we do not support these models now. It's worth mentioning that a [paper](https://arxiv.org/pdf/2310.04564.pdf) has demonstrated that using the ReLU/ReGLU activation function has a negligible impact on convergence and performance. + +3. Why is there a noticeable downgrade in the performance metrics of our current ReLU model, particularly the 70B model? + - Due to the typical requirement of around 2T tokens for LLM training, our model's fine-tuning was conducted with only 5B tokens. This insufficient retraining has resulted in the model's inability to regain its original performance. We are actively working on updating to a more capable model, so please stay tuned.
```suggestion - In contrast to the typical requirement of around 2T tokens for LLM training, our model's fine-tuning was conducted with only 5B tokens. This insufficient retraining has resulted in the model's inability to regain its original performance. We are actively working on updating to a more capable model, so please stay tuned. ```
PowerInfer
github_2023
others
44
SJTU-IPADS
hodlen
@@ -173,9 +173,16 @@ Please refer to our [paper](https://ipads.se.sjtu.edu.cn/_media/publications/pow ## FAQs 1. What if I encountered `CUDA_ERROR_OUT_OF_MEMORY`? - You can try to run with `--reset-gpu-index` argument to rebuild the GPU index for this model to avoid any stale cache. - - Due to our current implementation, model offloading might not be as accurate as expected. You can try with `--vram-budget` with a slightly lower value or `--disable-gpu-index` to disable FFN offloading. -2. What if... - - Issues are welcomed! Please feel free to open an issue and attach your running environment and running parameters. We will try our best to help you. + - Due to our current implementation, model offloading might not be as accurate as expected. You can try with `--vram-budget` with a slightly lower value or `--disable-gpu-index` to disable FFN offloading. + +2. Does PowerInfer support mistral, original llama, Qwen, ...? + - Now we only support models with ReLU/ReGLU/Squared ReLU activation function. So we do not support these models now. It's worth mentioning that a [paper](https://arxiv.org/pdf/2310.04564.pdf) has demonstrated that using the ReLU/ReGLU activation function has a negligible impact on convergence and performance. + +3. Why is there a noticeable downgrade in the performance metrics of our current ReLU model, particularly the 70B model? + - Due to the typical requirement of around 2T tokens for LLM training, our model's fine-tuning was conducted with only 5B tokens. This insufficient retraining has resulted in the model's inability to regain its original performance. We are actively working on updating to a more capable model, so please stay tuned. + +4. What if... + - Issues are welcomed! Please feel free to open an issu,e and attach your running environment and running parameters. We will try our best to help you.
```suggestion - Issues are welcomed! Please feel free to open an issue and attach your running environment and running parameters. We will try our best to help you. ```