repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
river
github_2023
others
456
riverqueue
bgentry
@@ -15,4 +15,35 @@ INSERT INTO river_migration SELECT created_at, 'main', version FROM river_migration_old; -DROP TABLE river_migration_old; \ No newline at end of file +DROP TABLE river_migration_old; + +-- +-- Create `river_client` and derivative. +-- +-- This feature hasn't quite yet been implemented, but we're taking advantage of +-- the migration to add the schema early so that we can add it later without an +-- additional migration. +-- + +CREATE UNLOGGED TABLE river_client ( + name text PRIMARY KEY NOT NULL,
notably this creates a hard constraint on unique client names, which may be breaking when rolled out
river
github_2023
go
458
riverqueue
bgentry
@@ -359,17 +359,33 @@ func migrateDown(ctx context.Context, logger *slog.Logger, out io.Writer, opts * return true, nil } +// Rounds a duration so that it doesn't show so much cluttered and not useful +// precision in printf output. +func roundDuration(duration time.Duration) time.Duration { + switch { + case duration > 1*time.Second: + return duration.Truncate(10 * time.Millisecond) + case duration < 1*time.Millisecond: + return duration.Truncate(10 * time.Nanosecond) + default: + return duration.Truncate(10 * time.Microsecond) + } +}
hah, thanks, I noticed this the other day and I'm glad you did too took the initiative to fix it!
river
github_2023
others
443
riverqueue
bgentry
@@ -1,6 +1,22 @@ -DROP INDEX river_migration_line_version_idx; -CREATE UNIQUE INDEX river_migration_version_idx ON river_migration USING btree(version); +-- +-- If any non-main migration are present, 005 should be considered irreversible. +--
Any way we could make this intentionally error? Thinking of something like selecting from a nonexistent column `irreversible_migration` if any values are in the set of `SELECT FROM river_migration WHERE line != 'main'` or something
river
github_2023
others
443
riverqueue
bgentry
@@ -1,12 +1,18 @@ ALTER TABLE river_migration - ADD COLUMN line text; + RENAME TO river_migration_old; -UPDATE river_migration -SET line = 'main'; +CREATE TABLE river_migration( + created_at timestamptz NOT NULL DEFAULT NOW(), + line TEXT NOT NULL, + version bigint NOT NULL,
imo it's a little funky to not put the PK fields first just to make the default `SELECT * FROM river_migration` a bit cleaner. Seems like a warranted variance from the typical desire to alphabetize.
river
github_2023
go
447
riverqueue
bgentry
@@ -151,6 +151,13 @@ See the [`InsertAndWork` example] for complete code. - [Work functions] for simplified worker implementation. +## Cross language enqueueing + +River supports inserting jobs in some non-Go languages which are then be worked by Go implementations. This may be desirable in performance sensitive cases so that jobs can take advantage of Go's fast runtime.
```suggestion River supports inserting jobs in some non-Go languages which are then worked by Go implementations. This may be desirable in performance sensitive cases so that jobs can take advantage of Go's fast runtime. ```
river
github_2023
go
438
riverqueue
bgentry
@@ -427,14 +428,16 @@ func (s *PeriodicJobEnqueuer) insertParamsFromConstructor(ctx context.Context, c return insertParams, uniqueOpts, true } +const periodicJobEnqueurVeryLongDuration = 24 * time.Hour
Small typo in the name of this long constant: ```suggestion const periodicJobEnqueuerVeryLongDuration = 24 * time.Hour ```
river
github_2023
go
435
riverqueue
bgentry
@@ -324,9 +325,21 @@ func (m *Migrator[TTx]) migrateDown(ctx context.Context, exec riverdriver.Execut return res, nil } - if !opts.DryRun { - if _, err := exec.MigrationDeleteByVersionMany(ctx, sliceutil.Map(res.Versions, migrateVersionToInt)); err != nil { - return nil, fmt.Errorf("error deleting migration rows for versions %+v: %w", res.Versions, err) + if !opts.DryRun && len(res.Versions) > 0 { + versions := sliceutil.Map(res.Versions, migrateVersionToInt) + + // Version 005 is hard-coded here because that's the version in which + // the migration `line` comes in. If migration to a point equal or above + // 005, we can remove migrations with a line included, but otherwise we + // must omit the `line` column from queries because it doesn't exist. + if m.line == riverdriver.MigrationLineMain && slices.Min(versions) <= 5 {
The value 5 here might be clearer if put in a named constant
river
github_2023
go
430
riverqueue
bgentry
@@ -0,0 +1,25 @@ +package river + +import ( + "github.com/riverqueue/river/rivershared/baseservice" + "github.com/riverqueue/river/rivershared/startstop" +) + +// A plugin API that drivers may implemented to extend a River client. Driver
```suggestion // A plugin API that drivers may implement to extend a River client. Driver ```
river
github_2023
go
430
riverqueue
bgentry
@@ -0,0 +1,99 @@ +package river + +import ( + "context" + "testing" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/baseservice" + "github.com/riverqueue/river/rivershared/riversharedtest" + "github.com/riverqueue/river/rivershared/startstop" +) + +func TestClientDriverPlugin(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testBundle struct { + pluginDriver *TestDriverWithPlugin + } + + setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { + t.Helper() + + pluginDriver := newDriverWithPlugin(t, riverinternaltest.TestDB(ctx, t)) + + client, err := NewClient(pluginDriver, newTestConfig(t, nil)) + require.NoError(t, err) + + return client, &testBundle{ + pluginDriver: pluginDriver, + } + } + + t.Run("ServicesStart", func(t *testing.T) { + t.Parallel() + + client, bundle := setup(t) + + startClient(ctx, t, client) + + riversharedtest.WaitOrTimeout(t, startstop.WaitAllStartedC( + bundle.pluginDriver.maintenanceService, + bundle.pluginDriver.service, + )) + }) +} + +var _ driverPlugin[pgx.Tx] = &TestDriverWithPlugin{} + +type TestDriverWithPlugin struct { + *riverpgxv5.Driver + maintenanceService startstop.Service + service startstop.Service +} + +func newDriverWithPlugin(t *testing.T, dbPool *pgxpool.Pool) *TestDriverWithPlugin { + t.Helper() + + newService := func(name string) startstop.Service { + return startstop.StartStopFunc(func(ctx context.Context, shouldStart bool, started, stopped func()) error { + if !shouldStart { + return nil + } + + go func() { + started() + defer stopped() // this defer should come first so it's last out + + t.Logf("Test service started: %s", name) + + <-ctx.Done() + }() + + return nil + }) + } + + return &TestDriverWithPlugin{ + Driver: riverpgxv5.New(dbPool), + maintenanceService: newService("maintenance service"), + service: newService("other service"), + } +} + +func (d *TestDriverWithPlugin) PluginInit(archetype *baseservice.Archetype, client *Client[pgx.Tx]) {} + +func (d *TestDriverWithPlugin) PluginMaintenanceServices() []startstop.Service { + return []startstop.Service{d.maintenanceService} +} + +func (d *TestDriverWithPlugin) PluginServices() []startstop.Service { + return []startstop.Service{d.service} +}
Might be helpful to have the test driver panic or `t.Fatal()` if it hasn't been init'd yet when these service methods are called.
river
github_2023
go
432
riverqueue
bgentry
@@ -164,7 +164,7 @@ func DrainContinuously[T any](drainChan <-chan T) func() []T { func TestDB(ctx context.Context, tb testing.TB) *pgxpool.Pool { tb.Helper() - ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
It's so weird that this is not long enough. Are we creating enough test DBs for the `GOMAXPROCS`? They really shouldn't be waiting at all if so, at least not longer than whatever the time is to close the pool and wait for `TruncateRiverTables` 🤔
river
github_2023
go
429
riverqueue
bgentry
@@ -0,0 +1,183 @@ +package riversharedtest + +import ( + "fmt" + "log/slog" + "os" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "github.com/riverqueue/river/rivershared/baseservice" + "github.com/riverqueue/river/rivershared/slogtest" + "github.com/riverqueue/river/rivershared/util/randutil" +) + +// Shared rand instance for archetypes. Random number generation is rare +// enough that it's not likely to produce much contention. +var rand = randutil.NewCryptoSeededConcurrentSafeRand() //nolint:gochecknoglobals + +// BaseServiceArchetype returns a new base service suitable for use in tests. +// Returns a new instance so that it's not possible to accidentally taint a +// shared object. +func BaseServiceArchetype(tb testing.TB) *baseservice.Archetype { + tb.Helper() + + return &baseservice.Archetype{ + Logger: Logger(tb), + Rand: rand, + Time: &TimeStub{}, + } +} + +// Logger returns a logger suitable for use in tests. +// +// Defaults to informational verbosity. If env is set with `RIVER_DEBUG=true`, +// debug level verbosity is activated. +func Logger(tb testing.TB) *slog.Logger { + tb.Helper() + + if os.Getenv("RIVER_DEBUG") == "1" || os.Getenv("RIVER_DEBUG") == "true" { + return slogtest.NewLogger(tb, &slog.HandlerOptions{Level: slog.LevelDebug}) + } + + return slogtest.NewLogger(tb, nil) +} + +// Logger returns a logger suitable for use in tests which outputs only at warn +// or above. Useful in tests where particularly noisy log output is expected. +func LoggerWarn(tb testing.TB) *slog.Logger { + tb.Helper() + return slogtest.NewLogger(tb, &slog.HandlerOptions{Level: slog.LevelWarn}) +} + +// TimeStub implements baseservice.TimeGenerator to allow time to be stubbed in +// tests. +type TimeStub struct { + mu sync.RWMutex + nowUTC *time.Time +} + +func (t *TimeStub) NowUTC() time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + + if t.nowUTC == nil { + return time.Now().UTC() + } + + return *t.nowUTC +} + +func (t *TimeStub) NowUTCOrNil() *time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + + return t.nowUTC +} + +func (t *TimeStub) StubNowUTC(nowUTC time.Time) time.Time { + t.mu.Lock() + defer t.mu.Unlock() + + t.nowUTC = &nowUTC + return nowUTC +} + +// WaitOrTimeout tries to wait on the given channel for a value to come through, +// and returns it if one does, but times out after a reasonable amount of time. +// Useful to guarantee that test cases don't hang forever, even in the event of +// something wrong. +func WaitOrTimeout[T any](tb testing.TB, waitChan <-chan T) T { + tb.Helper() + + timeout := WaitTimeout() + + select { + case value := <-waitChan: + return value + case <-time.After(timeout): + require.FailNowf(tb, "WaitOrTimeout timed out", + "WaitOrTimeout timed out after waiting %s", timeout) + } + return *new(T) // unreachable +} + +// WaitOrTimeoutN tries to wait on the given channel for N values to come +// through, and returns it if they do, but times out after a reasonable amount +// of time. Useful to guarantee that test cases don't hang forever, even in the +// event of something wrong. +func WaitOrTimeoutN[T any](tb testing.TB, waitChan <-chan T, numValues int) []T { + tb.Helper() + + var ( + timeout = WaitTimeout() + deadline = time.Now().Add(timeout) + values = make([]T, 0, numValues) + ) + + for { + select { + case value := <-waitChan: + values = append(values, value) + + if len(values) >= numValues { + return values + } + + case <-time.After(time.Until(deadline)): + require.FailNowf(tb, "WaitOrTimeout timed out", + "WaitOrTimeout timed out after waiting %s (received %d value(s), wanted %d)", timeout, len(values), numValues) + return nil + } + } +} + +// WaitTimeout returns a duration broadly appropriate for waiting on an expected +// event in a test, and which is used for `TestSignal.WaitOrTimeout` in the main +// package and `WaitOrTimeout` above. It's main purpose is to allow a little
```suggestion // package and `WaitOrTimeout` above. Its main purpose is to allow a little ```
river
github_2023
go
410
riverqueue
bgentry
@@ -1647,6 +1644,10 @@ func (c *Client[TTx]) JobListTx(ctx context.Context, tx TTx, params *JobListPara // client, and can be used to add new ones or remove existing ones. func (c *Client[TTx]) PeriodicJobs() *PeriodicJobBundle { return c.periodicJobs } +// Queues returns the currently configured set of queues for the client, and can +// be used to add new ones. +func (c *Client[TTx]) Queues() *queueBundle { return c.queues }
I don't think we want to have an exported method that returns an unexported type like this. Whether or not it works, it looks bad/undiscoverable in docs. I think the type either needs to be exported with minimal methods exposed on it, or else we need to return an exported interface type here.
river
github_2023
go
410
riverqueue
bgentry
@@ -57,6 +57,8 @@ func (m *clientMonitor) Start(ctx context.Context) error { // uninitialized. Unlike SetProducerStatus, it does not broadcast the change // and is only meant to be used during initial client startup. func (m *clientMonitor) InitializeProducerStatus(queueName string) { + m.statusSnapshotMu.Lock() + defer m.statusSnapshotMu.Unlock()
Makes sense. As this wasn't dynamic before it wasn't really needed.
river
github_2023
go
351
riverqueue
bgentry
@@ -1264,6 +1268,18 @@ func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*rive } if tags == nil { tags = []string{} + } else { + for _, tag := range tags { + if len(tag) > 255 { + return nil, nil, errors.New("tags should be a maximum of 255 characters long") + } + // Restricted commas because we need those for batch inserts with + // the riverdatabasesql driver. We may want to restrict other + // special characters as well. + if strings.Contains(tag, ",") { + return nil, nil, errors.New("tags should not contain commas") + }
maybe tags should be restricted to `/a-z_\-/i`? _possibly_ include single spaces in that but I don't think it would harm any use cases to leave that out.
river
github_2023
others
351
riverqueue
bgentry
@@ -22,10 +24,19 @@ sql: emit_result_struct_pointers: true rename: - river_job_state: "JobState" ttl: "TTL" overrides: + # `database/sql` really does not play nicely with json/jsonb. If it's + # left as `[]byte` or `json.RawMessage`, `database/sql` will try to + # encode it as binary (with a \x) which Postgres won't accept as + # json/jsonb at all. Using a custom struct crashed and burned, even + # with a custom scanner implementation. This is the only way I could + # get it to work: strings are compatible with our use of bytes slices, + # but Postgrs will also accept them as json/jsonb.
```suggestion # but Postgres will also accept them as json/jsonb. ```
river
github_2023
go
351
riverqueue
bgentry
@@ -1264,6 +1268,15 @@ func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*rive } if tags == nil { tags = []string{} + } else { + for _, tag := range tags { + if len(tag) > 255 { + return nil, nil, errors.New("tags should be a maximum of 255 characters long") + } + if !tagRE.MatchString(tag) { + return nil, nil, errors.New("tags should match regex " + tagRE.String()) + } + }
This probably requires its own changelog entry.
river
github_2023
go
351
riverqueue
bgentry
@@ -70,107 +73,392 @@ func (e *Executor) Exec(ctx context.Context, sql string) (struct{}, error) { } func (e *Executor) JobCancel(ctx context.Context, params *riverdriver.JobCancelParams) (*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + cancelledAt, err := params.CancelAttemptedAt.MarshalJSON() + if err != nil { + return nil, err + } + + job, err := e.queries.JobCancel(ctx, e.dbtx, &dbsqlc.JobCancelParams{ + ID: params.ID, + CancelAttemptedAt: string(cancelledAt), + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job) } func (e *Executor) JobCountByState(ctx context.Context, state rivertype.JobState) (int, error) { - return 0, riverdriver.ErrNotImplemented + numJobs, err := e.queries.JobCountByState(ctx, e.dbtx, dbsqlc.RiverJobState(state)) + if err != nil { + return 0, err + } + return int(numJobs), nil } func (e *Executor) JobDeleteBefore(ctx context.Context, params *riverdriver.JobDeleteBeforeParams) (int, error) { - return 0, riverdriver.ErrNotImplemented + numDeleted, err := e.queries.JobDeleteBefore(ctx, e.dbtx, &dbsqlc.JobDeleteBeforeParams{ + CancelledFinalizedAtHorizon: params.CancelledFinalizedAtHorizon, + CompletedFinalizedAtHorizon: params.CompletedFinalizedAtHorizon, + DiscardedFinalizedAtHorizon: params.DiscardedFinalizedAtHorizon, + Max: int64(params.Max), + }) + return int(numDeleted), interpretError(err) } func (e *Executor) JobGetAvailable(ctx context.Context, params *riverdriver.JobGetAvailableParams) ([]*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + jobs, err := e.queries.JobGetAvailable(ctx, e.dbtx, &dbsqlc.JobGetAvailableParams{ + AttemptedBy: params.AttemptedBy, + Max: int32(params.Max), + Queue: params.Queue, + }) + if err != nil { + return nil, interpretError(err) + } + return mapSliceError(jobs, jobRowFromInternal) } func (e *Executor) JobGetByID(ctx context.Context, id int64) (*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + job, err := e.queries.JobGetByID(ctx, e.dbtx, id) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job) } func (e *Executor) JobGetByIDMany(ctx context.Context, id []int64) ([]*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + jobs, err := e.queries.JobGetByIDMany(ctx, e.dbtx, id) + if err != nil { + return nil, interpretError(err) + } + return mapSliceError(jobs, jobRowFromInternal) } func (e *Executor) JobGetByKindAndUniqueProperties(ctx context.Context, params *riverdriver.JobGetByKindAndUniquePropertiesParams) (*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + job, err := e.queries.JobGetByKindAndUniqueProperties(ctx, e.dbtx, &dbsqlc.JobGetByKindAndUniquePropertiesParams{ + Args: valOrDefault(string(params.Args), "{}"), + ByArgs: params.ByArgs, + ByCreatedAt: params.ByCreatedAt, + ByQueue: params.ByQueue, + ByState: params.ByState, + CreatedAtBegin: params.CreatedAtBegin, + CreatedAtEnd: params.CreatedAtEnd, + Kind: params.Kind, + Queue: params.Queue, + State: params.State, + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job) } func (e *Executor) JobGetByKindMany(ctx context.Context, kind []string) ([]*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + jobs, err := e.queries.JobGetByKindMany(ctx, e.dbtx, kind) + if err != nil { + return nil, interpretError(err) + } + return mapSliceError(jobs, jobRowFromInternal) } func (e *Executor) JobGetStuck(ctx context.Context, params *riverdriver.JobGetStuckParams) ([]*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + jobs, err := e.queries.JobGetStuck(ctx, e.dbtx, &dbsqlc.JobGetStuckParams{Max: int32(params.Max), StuckHorizon: params.StuckHorizon}) + if err != nil { + return nil, interpretError(err) + } + return mapSliceError(jobs, jobRowFromInternal) } func (e *Executor) JobInsertFast(ctx context.Context, params *riverdriver.JobInsertFastParams) (*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented + job, err := e.queries.JobInsertFast(ctx, e.dbtx, &dbsqlc.JobInsertFastParams{ + Args: string(params.EncodedArgs), + Kind: params.Kind, + MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), + Metadata: valOrDefault(string(params.Metadata), "{}"), + Priority: int16(min(params.Priority, math.MaxInt16)), + Queue: params.Queue, + ScheduledAt: params.ScheduledAt, + State: dbsqlc.RiverJobState(params.State), + Tags: params.Tags, + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job) } func (e *Executor) JobInsertFastMany(ctx context.Context, params []*riverdriver.JobInsertFastParams) (int, error) { - return 0, riverdriver.ErrNotImplemented + insertJobsParams := &dbsqlc.JobInsertFastManyParams{ + Args: make([]string, len(params)), + Kind: make([]string, len(params)), + MaxAttempts: make([]int16, len(params)), + Metadata: make([]string, len(params)), + Priority: make([]int16, len(params)), + Queue: make([]string, len(params)), + ScheduledAt: make([]time.Time, len(params)), + State: make([]dbsqlc.RiverJobState, len(params)), + Tags: make([]string, len(params)), + } + + for i := 0; i < len(params); i++ { + params := params[i] + + var scheduledAt time.Time + if params.ScheduledAt != nil { + scheduledAt = *params.ScheduledAt + } + + tags := params.Tags + if tags == nil { + tags = []string{} + } + + insertJobsParams.Args[i] = valOrDefault(string(params.EncodedArgs), "{}") + insertJobsParams.Kind[i] = params.Kind + insertJobsParams.MaxAttempts[i] = int16(min(params.MaxAttempts, math.MaxInt16)) + insertJobsParams.Metadata[i] = valOrDefault(string(params.Metadata), "{}") + insertJobsParams.Priority[i] = int16(min(params.Priority, math.MaxInt16)) + insertJobsParams.Queue[i] = params.Queue + insertJobsParams.ScheduledAt[i] = scheduledAt + insertJobsParams.State[i] = dbsqlc.RiverJobState(params.State) + insertJobsParams.Tags[i] = strings.Join(tags, ",") + } + + numInserted, err := e.queries.JobInsertFastMany(ctx, e.dbtx, insertJobsParams) + if err != nil { + return 0, interpretError(err) + } + + return int(numInserted), nil } func (e *Executor) JobInsertFull(ctx context.Context, params *riverdriver.JobInsertFullParams) (*rivertype.JobRow, error) { - return nil, riverdriver.ErrNotImplemented -} + job, err := e.queries.JobInsertFull(ctx, e.dbtx, &dbsqlc.JobInsertFullParams{ + Attempt: int16(params.Attempt), + AttemptedAt: params.AttemptedAt, + Args: string(params.EncodedArgs), + CreatedAt: params.CreatedAt, + Errors: mapSlice(params.Errors, func(e []byte) string { return string(e) }), + FinalizedAt: params.FinalizedAt, + Kind: params.Kind, + MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), + Metadata: valOrDefault(string(params.Metadata), "{}"), + Priority: int16(min(params.Priority, math.MaxInt16)), + Queue: params.Queue, + ScheduledAt: params.ScheduledAt, + State: dbsqlc.RiverJobState(params.State), + Tags: params.Tags, + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job) +} + +func (e *Executor) JobList(ctx context.Context, query string, namedArgs map[string]any) ([]*rivertype.JobRow, error) { + // `database/sql` has an `sql.Named` system that should theoretically work + // for named parameters, but neither Pgx or lib/pq implement it, so just use + // dumb string replacement given we're only injecting a very basic value + // anyway. + for name, value := range namedArgs { + newQuery := strings.Replace(query, "@"+name, fmt.Sprintf("%v", value), 1) + if newQuery == query { + return nil, fmt.Errorf("named query parameter @%s not found in query", name) + } + query = newQuery + }
😵‍💫
river
github_2023
others
423
riverqueue
bgentry
@@ -11,6 +11,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `Config.TestOnly` has been added. It disables various features in the River client like staggered maintenance service start that are useful in production, but may be somewhat harmful in tests because they make start/stop slower. [PR #414](https://github.com/riverqueue/river/pull/414). +### Changed + +⚠️ Version 0.8.0 has a small breaking change in `ErrorHandler`. As before, we try never to make breaking changes, but this one was deemed quite important because `ErrorHandler` was fundamentally lacking important functionality.
Wrong version number?
river
github_2023
go
423
riverqueue
bgentry
@@ -234,7 +234,7 @@ func (e *jobExecutor) invokeErrorHandler(ctx context.Context, res *jobExecutorRe case res.PanicVal != nil: errorHandlerRes = invokeAndHandlePanic("HandlePanic", func() *ErrorHandlerResult { - return e.ErrorHandler.HandlePanic(ctx, e.JobRow, res.PanicVal) + return e.ErrorHandler.HandlePanic(ctx, e.JobRow, res.PanicVal, string(res.PanicTrace))
Should we convert this to string once when saving it to the result so that it doesn’t have to be cast here and by pgx when writing to the db?
river
github_2023
go
399
riverqueue
bgentry
@@ -616,6 +618,36 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client return client, nil } +func (c *Client[TTx]) AddQueue(queueName string, queueConfig QueueConfig) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + c.producersByQueueName[queueName] = newProducer(&c.baseService.Archetype, c.driver.GetExecutor(), &producerConfig{ + ClientID: c.config.ID, + Completer: c.completer, + ErrorHandler: c.config.ErrorHandler, + FetchCooldown: c.config.FetchCooldown, + FetchPollInterval: c.config.FetchPollInterval, + JobTimeout: c.config.JobTimeout, + MaxWorkers: queueConfig.MaxWorkers, + Notifier: c.notifier, + Queue: queueName, + RetryPolicy: c.config.RetryPolicy, + SchedulerInterval: c.config.schedulerInterval, + StatusFunc: c.monitor.SetProducerStatus, + Workers: c.config.Workers, + }) + c.monitor.InitializeProducerStatus(queueName) +}
This method is going to need to do some validation on the `QueueConfig` as [we currently do when initializing the client](https://github.com/riverqueue/river/blob/220a636820f37b9a7b16309d2863ef94124dc3db/client.go#L260-L267), and it will also need to return an `error` if validation fails. We should probably extract that to an internal `func (c QueueConfig) validate() error` similar to what we have for the top level `Config` so that we can use the same logic in each place. And of course we'll need to make sure this new surface area (add/remove) is fully covered by tests in this regard.
river
github_2023
go
399
riverqueue
bgentry
@@ -616,6 +618,36 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client return client, nil } +func (c *Client[TTx]) AddQueue(queueName string, queueConfig QueueConfig) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + c.producersByQueueName[queueName] = newProducer(&c.baseService.Archetype, c.driver.GetExecutor(), &producerConfig{ + ClientID: c.config.ID, + Completer: c.completer, + ErrorHandler: c.config.ErrorHandler, + FetchCooldown: c.config.FetchCooldown, + FetchPollInterval: c.config.FetchPollInterval, + JobTimeout: c.config.JobTimeout, + MaxWorkers: queueConfig.MaxWorkers, + Notifier: c.notifier, + Queue: queueName, + RetryPolicy: c.config.RetryPolicy, + SchedulerInterval: c.config.schedulerInterval, + StatusFunc: c.monitor.SetProducerStatus, + Workers: c.config.Workers, + }) + c.monitor.InitializeProducerStatus(queueName) +} + +func (c *Client[TTx]) RemoveQueue(queueName string) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + delete(c.producersByQueueName, queueName)
Should this one also return an `error`? We could reuse `rivertype.ErrNotFound` in case the queue does not exist.
river
github_2023
go
399
riverqueue
bgentry
@@ -616,6 +618,36 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client return client, nil } +func (c *Client[TTx]) AddQueue(queueName string, queueConfig QueueConfig) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + c.producersByQueueName[queueName] = newProducer(&c.baseService.Archetype, c.driver.GetExecutor(), &producerConfig{ + ClientID: c.config.ID, + Completer: c.completer, + ErrorHandler: c.config.ErrorHandler, + FetchCooldown: c.config.FetchCooldown, + FetchPollInterval: c.config.FetchPollInterval, + JobTimeout: c.config.JobTimeout, + MaxWorkers: queueConfig.MaxWorkers, + Notifier: c.notifier, + Queue: queueName, + RetryPolicy: c.config.RetryPolicy, + SchedulerInterval: c.config.schedulerInterval, + StatusFunc: c.monitor.SetProducerStatus, + Workers: c.config.Workers, + }) + c.monitor.InitializeProducerStatus(queueName) +} + +func (c *Client[TTx]) RemoveQueue(queueName string) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + delete(c.producersByQueueName, queueName) + + // Remove queue from currentSnapshot.Producers + c.monitor.RemoveProducerStatus(queueName)
Safe removal is a bit trickier than this. We need to actually shut down the producer somehow, and then the producer itself should be responsible for reporting its status to the monitor until it is shut down. At that point, yeah, we need to somehow remove it from the monitor's map so we don't keep old state around in that map forever. Maybe `SetProducerStatus` should just be smart enough to do that last part automatically when the producer is stopped? @brandur thoughts on how to safely trigger the single producer to shut down? And is this API suitable for an operation which may take awhile (until all running jobs go away)? Keeping in mind there is no obvious way to cancel the context just for a single producer, and we may also need to differentiate between `Stop` and `StopAndCancel` for each individual producer—otherwise how will the producer know when it needs to cancel the context and try to more aggressively shut down running jobs? In either case we still need to somehow wait for clean shutdown to complete, which may actually never happen before the program exits if the running jobs never exit.
river
github_2023
go
399
riverqueue
bgentry
@@ -67,6 +67,13 @@ func (m *clientMonitor) SetProducerStatus(queueName string, status componentstat m.bufferStatusUpdate() } +func (m *clientMonitor) RemoveProducerStatus(queueName string) { + m.statusSnapshotMu.Lock() + defer m.statusSnapshotMu.Unlock() + delete(m.currentSnapshot.Producers, queueName) + m.bufferStatusUpdate() +}
I think we can probably merge this into `SetProducer` and decide whether or not to remove based upon the status.
river
github_2023
go
399
riverqueue
brandur
@@ -616,6 +618,36 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client return client, nil } +func (c *Client[TTx]) AddQueue(queueName string, queueConfig QueueConfig) { + c.producersByQueueNameMu.Lock() + defer c.producersByQueueNameMu.Unlock() + c.producersByQueueName[queueName] = newProducer(&c.baseService.Archetype, c.driver.GetExecutor(), &producerConfig{ + ClientID: c.config.ID, + Completer: c.completer, + ErrorHandler: c.config.ErrorHandler, + FetchCooldown: c.config.FetchCooldown, + FetchPollInterval: c.config.FetchPollInterval, + JobTimeout: c.config.JobTimeout, + MaxWorkers: queueConfig.MaxWorkers, + Notifier: c.notifier, + Queue: queueName, + RetryPolicy: c.config.RetryPolicy, + SchedulerInterval: c.config.schedulerInterval, + StatusFunc: c.monitor.SetProducerStatus, + Workers: c.config.Workers, + }) + c.monitor.InitializeProducerStatus(queueName)
Just given this is a lot of duplicative code, let's move this to a new `addProducer(queueName string, queueConfig QueueConfig)` internal helper that can share it with `Start`.
river
github_2023
go
415
riverqueue
bgentry
@@ -718,23 +718,36 @@ func (c *Client[TTx]) Start(ctx context.Context) error { producer := producer if err := producer.StartWorkContext(fetchCtx, workCtx); err != nil { - stopProducers() + startstop.StopAllParallel(producersAsServices()) stopServicesOnError() return err } } return nil }(); err != nil { - defer close(stopped) + defer stopped() if errors.Is(context.Cause(fetchCtx), startstop.ErrStop) { return rivercommon.ErrShutdown } return err } go func() { - defer close(stopped) + // Wait for all subservices to start up before signaling our own start. + // This isn't strictly needed, but gives tests a way to fully confirm + // that all goroutines for subservices are spun up before continuing. + // + // Stops also cancel the "started" channel, so in case of a context
At first I thought there was a typo, but maybe it's just worded awkwardly. This feels a bit clearer imo, up to you: ```suggestion // Stop also cancels the "started" channel, so in case of a context ```
river
github_2023
go
415
riverqueue
bgentry
@@ -97,19 +103,37 @@ type BaseStartStop struct { // } // // ... -func (s *BaseStartStop) StartInit(ctx context.Context) (context.Context, bool, chan struct{}) { +func (s *BaseStartStop) StartInit(ctx context.Context) (context.Context, bool, func(), func()) {
This is where I start disliking the lint rule discouraging named return values. I mainly just want to see a label on things when there are this many, especially when there are consecutive values of the same type.
river
github_2023
others
408
riverqueue
bgentry
@@ -48,16 +48,14 @@ LIMIT @limit_count::integer; -- name: QueuePause :execresult WITH queue_to_update AS ( - SELECT name + SELECT name, paused_at FROM river_queue WHERE CASE WHEN @name::text = '*' THEN true ELSE river_queue.name = @name::text END - AND paused_at IS NULL FOR UPDATE ) - UPDATE river_queue SET - paused_at = now(), + paused_at = coalesce(queue_to_update.paused_at, now()), updated_at = now() FROM queue_to_update WHERE river_queue.name = queue_to_update.name;
To avoid editing a row for no reason, we could take an approach we've done in some other queries: do the update in a CTE, and then for the final return do a union w/ the updated row and original row to return only one of them. Not a high throughput query so it doesn't matter much, but it would be a nice tweak to stay consistent in our approach.
river
github_2023
go
403
riverqueue
brandur
@@ -375,14 +375,14 @@ func (e *Elector) attemptResignLoop(ctx context.Context) { // This does not inherit the parent context because we want to give up leadership // even during a shutdown. There is no way to short-circuit this.
Maybe tweak this comment a little?
river
github_2023
go
395
riverqueue
bgentry
@@ -1112,7 +1119,7 @@ func (c *Client[TTx]) ID() string { return c.config.ID } -func insertParamsFromConfigArgsAndOptions(config *Config, args JobArgs, insertOpts *InsertOpts) (*riverdriver.JobInsertFastParams, *dbunique.UniqueOpts, error) { +func insertParamsFromConfigArgsAndOptions(archetype *baseservice.Archetype, config *Config, args JobArgs, insertOpts *InsertOpts) (*riverdriver.JobInsertFastParams, *dbunique.UniqueOpts, error) {
Now that we're passing in a couple of fields from `Client` to this (neither of which was needed 1mo ago), it might be cleaner and less confusing to make it a method on `Client` instead. Borderline 🤷‍♂️
river
github_2023
go
390
riverqueue
brandur
@@ -99,6 +99,20 @@ func (e *Executor) JobCountByState(ctx context.Context, state rivertype.JobState return int(numJobs), nil } +func (e *Executor) JobDelete(ctx context.Context, id int64) (bool, error) { + job, err := e.queries.JobDelete(ctx, e.dbtx, id) + if err != nil { + return false, interpretError(err) + } + if job == nil { + return false, rivertype.ErrNotFound
Doesn't sqlc always return a not found error for a `:one` query rather than return a `nil`? What do you think about adding a driver test case that verifies that a `JobDelete` on a non-existent row just to make sure the right thing happens here?
river
github_2023
go
390
riverqueue
brandur
@@ -1047,6 +1047,22 @@ func (c *Client[TTx]) jobCancel(ctx context.Context, exec riverdriver.Executor, }) } +// JobDelete deletes the job with the given ID from the database, returning +// whether or not the job was deleted along with a possible error. Jobs in the +// running state are not deleted. +func (c *Client[TTx]) JobDelete(ctx context.Context, id int64) (bool, error) {
Thoughts on returning the deleted job row instead of a bool? Feels like it gives you gives you more information and not a lot of downside.
river
github_2023
go
390
riverqueue
brandur
@@ -13,6 +13,10 @@ import ( // return this error. var ErrNotFound = errors.New("not found") +// ErrJobRunning is returned when a job is attempted to be deleted while it's +// running. +var ErrJobRunning = errors.New("job is running")
What do you think about a slightly more descriptive error message here in case this error bubbles up to an end user somewhere? Something like: ```suggestion var ErrJobRunning = errors.New("running jobs cannot be deleted") ```
river
github_2023
go
379
riverqueue
brandur
@@ -0,0 +1,271 @@ +package river + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/jobcompleter" + "github.com/riverqueue/river/internal/jobstats" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/rivertype" +) + +type subscriptionManager struct { + baseservice.BaseService + + subscribeCh <-chan []jobcompleter.CompleterJobUpdated + wg sync.WaitGroup + + statsMu sync.Mutex // protects stats fields + statsAggregate jobstats.JobStatistics + statsNumJobs int + + mu sync.Mutex // protects subscription fields + subscriptions map[int]*eventSubscription + subscriptionsSeq int // used for generating simple IDs +} + +func newSubscriptionManager(archetype *baseservice.Archetype, subscribeCh <-chan []jobcompleter.CompleterJobUpdated) *subscriptionManager { + return baseservice.Init(archetype, &subscriptionManager{ + subscribeCh: subscribeCh, + subscriptions: make(map[int]*eventSubscription), + }) +} + +// ResetSubscribeChan is used to change the channel that the subscription +// manager listens on. It must only be called when the subscription manager is +// stopped. +func (sm *subscriptionManager) ResetSubscribeChan(subscribeCh <-chan []jobcompleter.CompleterJobUpdated) { + sm.subscribeCh = subscribeCh +} + +func (sm *subscriptionManager) Start(ctx context.Context) error { + sm.wg.Add(1) + go func() { + defer sm.wg.Done() + + for updates := range sm.subscribeCh { + sm.distributeJobEvents(updates) + } + }() + + return nil +} + +func (sm *subscriptionManager) Stop() { + sm.wg.Wait() + + // Remove all subscriptions and close corresponding channels. + func() { + sm.mu.Lock() + defer sm.mu.Unlock() + + for subID, sub := range sm.subscriptions { + close(sub.Chan) + delete(sm.subscriptions, subID) + } + }() +} + +func (sm *subscriptionManager) logStats(ctx context.Context, svcName string) { + // TODO: don't use mutex + struct internals externally like this
I'm having a hard time understanding this TODO — can we fix it?
river
github_2023
go
379
riverqueue
brandur
@@ -0,0 +1,271 @@ +package river + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/jobcompleter" + "github.com/riverqueue/river/internal/jobstats" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/rivertype" +) + +type subscriptionManager struct { + baseservice.BaseService + + subscribeCh <-chan []jobcompleter.CompleterJobUpdated + wg sync.WaitGroup + + statsMu sync.Mutex // protects stats fields + statsAggregate jobstats.JobStatistics + statsNumJobs int + + mu sync.Mutex // protects subscription fields + subscriptions map[int]*eventSubscription + subscriptionsSeq int // used for generating simple IDs +} + +func newSubscriptionManager(archetype *baseservice.Archetype, subscribeCh <-chan []jobcompleter.CompleterJobUpdated) *subscriptionManager { + return baseservice.Init(archetype, &subscriptionManager{ + subscribeCh: subscribeCh, + subscriptions: make(map[int]*eventSubscription), + }) +} + +// ResetSubscribeChan is used to change the channel that the subscription +// manager listens on. It must only be called when the subscription manager is +// stopped. +func (sm *subscriptionManager) ResetSubscribeChan(subscribeCh <-chan []jobcompleter.CompleterJobUpdated) { + sm.subscribeCh = subscribeCh +} + +func (sm *subscriptionManager) Start(ctx context.Context) error { + sm.wg.Add(1) + go func() { + defer sm.wg.Done() + + for updates := range sm.subscribeCh { + sm.distributeJobEvents(updates) + } + }() + + return nil +} + +func (sm *subscriptionManager) Stop() { + sm.wg.Wait() + + // Remove all subscriptions and close corresponding channels. + func() { + sm.mu.Lock() + defer sm.mu.Unlock() + + for subID, sub := range sm.subscriptions { + close(sub.Chan) + delete(sm.subscriptions, subID) + } + }() +} + +func (sm *subscriptionManager) logStats(ctx context.Context, svcName string) { + // TODO: don't use mutex + struct internals externally like this + sm.statsMu.Lock() + defer sm.statsMu.Unlock() + + sm.Logger.InfoContext(ctx, svcName+": Job stats (since last stats line)", + "num_jobs_run", sm.statsNumJobs, + "average_complete_duration", sm.safeDurationAverage(sm.statsAggregate.CompleteDuration, sm.statsNumJobs), + "average_queue_wait_duration", sm.safeDurationAverage(sm.statsAggregate.QueueWaitDuration, sm.statsNumJobs), + "average_run_duration", sm.safeDurationAverage(sm.statsAggregate.RunDuration, sm.statsNumJobs)) + + sm.statsAggregate = jobstats.JobStatistics{} + sm.statsNumJobs = 0 +} + +// Handles a potential divide by zero. +func (sm *subscriptionManager) safeDurationAverage(d time.Duration, n int) time.Duration { + if n == 0 { + return 0 + } + return d / time.Duration(n) +} + +// Receives updates from the completer and prompts the client to update +// statistics and distribute jobs into any listening subscriber channels. +// (Subscriber channels are non-blocking so this should be quite fast.) +func (sm *subscriptionManager) distributeJobEvents(updates []jobcompleter.CompleterJobUpdated) { + func() { + sm.statsMu.Lock() + defer sm.statsMu.Unlock() + + for _, update := range updates { + stats := update.JobStats + sm.statsAggregate.CompleteDuration += stats.CompleteDuration + sm.statsAggregate.QueueWaitDuration += stats.QueueWaitDuration + sm.statsAggregate.RunDuration += stats.RunDuration + sm.statsNumJobs++ + } + }() + + sm.mu.Lock() + defer sm.mu.Unlock() + + // Quick path so we don't need to allocate anything if no one is listening. + if len(sm.subscriptions) < 1 { + return + } + + for _, update := range updates { + sm._distributeJobEvent(update.Job, jobStatisticsFromInternal(update.JobStats)) + } +} + +// Distribute a single event into any listening subscriber channels. +// +// Job events should specify the job and stats, while queue events should only specify +// the queue. +// +// MUST be called with sm.mu already held. +func (sm *subscriptionManager) _distributeJobEvent(job *rivertype.JobRow, stats *JobStatistics) {
Hmm, can we drop this underscore convention? I don't think it's really a thing. Out of 22.5k struct functions in the Go codebase, I did find three instances of one, but they're all kind of weird special cases: ``` $ ag --no-break --nofilename 'func \([^)]+\) [A-Za-z]' | wc -l 22438 $ ag --no-break --nofilename 'func \([^)]+\) _[A-Za-z]' | wc -l 3 ``` ``` src/go/types/scope.go 119:func (s *Scope) _InsertLazy(name string, resolve func() Object) bool { src/runtime/symtab.go 751:func (f funcInfo) _Func() *Func { src/vendor/golang.org/x/text/unicode/norm/input.go 32:func (in *input) _byte(p int) byte { ``` How about renaming: * `distributeJobEvents` -> `distributeJobUpdates` * `_distributeJobEvent` -> `distributeJobEvent` Makes them a little more distinct. If the concern is other places calling private members of this class, the underscore doesn't stop it, and the right fix is to try and get this into its own package.
river
github_2023
go
379
riverqueue
brandur
@@ -0,0 +1,271 @@ +package river + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/jobcompleter" + "github.com/riverqueue/river/internal/jobstats" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/rivertype" +) + +type subscriptionManager struct { + baseservice.BaseService + + subscribeCh <-chan []jobcompleter.CompleterJobUpdated + wg sync.WaitGroup + + statsMu sync.Mutex // protects stats fields + statsAggregate jobstats.JobStatistics + statsNumJobs int + + mu sync.Mutex // protects subscription fields + subscriptions map[int]*eventSubscription + subscriptionsSeq int // used for generating simple IDs +} + +func newSubscriptionManager(archetype *baseservice.Archetype, subscribeCh <-chan []jobcompleter.CompleterJobUpdated) *subscriptionManager { + return baseservice.Init(archetype, &subscriptionManager{ + subscribeCh: subscribeCh, + subscriptions: make(map[int]*eventSubscription), + }) +} + +// ResetSubscribeChan is used to change the channel that the subscription +// manager listens on. It must only be called when the subscription manager is +// stopped. +func (sm *subscriptionManager) ResetSubscribeChan(subscribeCh <-chan []jobcompleter.CompleterJobUpdated) { + sm.subscribeCh = subscribeCh +} + +func (sm *subscriptionManager) Start(ctx context.Context) error { + sm.wg.Add(1) + go func() { + defer sm.wg.Done() + + for updates := range sm.subscribeCh { + sm.distributeJobEvents(updates) + } + }() + + return nil +} + +func (sm *subscriptionManager) Stop() { + sm.wg.Wait() + + // Remove all subscriptions and close corresponding channels. + func() { + sm.mu.Lock() + defer sm.mu.Unlock() + + for subID, sub := range sm.subscriptions { + close(sub.Chan) + delete(sm.subscriptions, subID) + } + }() +} + +func (sm *subscriptionManager) logStats(ctx context.Context, svcName string) { + // TODO: don't use mutex + struct internals externally like this + sm.statsMu.Lock() + defer sm.statsMu.Unlock() + + sm.Logger.InfoContext(ctx, svcName+": Job stats (since last stats line)", + "num_jobs_run", sm.statsNumJobs, + "average_complete_duration", sm.safeDurationAverage(sm.statsAggregate.CompleteDuration, sm.statsNumJobs), + "average_queue_wait_duration", sm.safeDurationAverage(sm.statsAggregate.QueueWaitDuration, sm.statsNumJobs), + "average_run_duration", sm.safeDurationAverage(sm.statsAggregate.RunDuration, sm.statsNumJobs)) + + sm.statsAggregate = jobstats.JobStatistics{} + sm.statsNumJobs = 0 +} + +// Handles a potential divide by zero. +func (sm *subscriptionManager) safeDurationAverage(d time.Duration, n int) time.Duration { + if n == 0 { + return 0 + } + return d / time.Duration(n) +} + +// Receives updates from the completer and prompts the client to update +// statistics and distribute jobs into any listening subscriber channels. +// (Subscriber channels are non-blocking so this should be quite fast.) +func (sm *subscriptionManager) distributeJobEvents(updates []jobcompleter.CompleterJobUpdated) { + func() { + sm.statsMu.Lock() + defer sm.statsMu.Unlock() + + for _, update := range updates { + stats := update.JobStats + sm.statsAggregate.CompleteDuration += stats.CompleteDuration + sm.statsAggregate.QueueWaitDuration += stats.QueueWaitDuration + sm.statsAggregate.RunDuration += stats.RunDuration + sm.statsNumJobs++ + } + }() + + sm.mu.Lock() + defer sm.mu.Unlock() + + // Quick path so we don't need to allocate anything if no one is listening. + if len(sm.subscriptions) < 1 { + return + } + + for _, update := range updates { + sm._distributeJobEvent(update.Job, jobStatisticsFromInternal(update.JobStats)) + } +} + +// Distribute a single event into any listening subscriber channels. +// +// Job events should specify the job and stats, while queue events should only specify +// the queue. +// +// MUST be called with sm.mu already held. +func (sm *subscriptionManager) _distributeJobEvent(job *rivertype.JobRow, stats *JobStatistics) { + var event *Event + switch job.State { + case rivertype.JobStateCancelled: + event = &Event{Kind: EventKindJobCancelled, Job: job, JobStats: stats} + case rivertype.JobStateCompleted: + event = &Event{Kind: EventKindJobCompleted, Job: job, JobStats: stats} + case rivertype.JobStateScheduled: + event = &Event{Kind: EventKindJobSnoozed, Job: job, JobStats: stats} + case rivertype.JobStateAvailable, rivertype.JobStateDiscarded, rivertype.JobStateRetryable, rivertype.JobStateRunning: + event = &Event{Kind: EventKindJobFailed, Job: job, JobStats: stats} + case rivertype.JobStatePending: + panic("completion subscriber unexpectedly received job in pending state, river bug") + default: + // linter exhaustive rule prevents this from being reached + panic("unreachable state to distribute, river bug") + } + + // All subscription channels are non-blocking so this is always fast and + // there's no risk of falling behind what producers are sending. + for _, sub := range sm.subscriptions { + if sub.ListensFor(event.Kind) { + // TODO: THIS IS UNSAFE AND WILL LEAD TO DROPPED EVENTS. + // + // We are allocating subscriber channels with a fixed size of 1000, but + // potentially processing job events in batches of 5000 (batch completer + // max batch size). It's probably not possible for the subscriber to keep + // up with these bursts. + select { + case sub.Chan <- event: + default: + } + } + } +} + +func (sm *subscriptionManager) distributeQueueEvent(event *Event) { + sm.mu.Lock() + defer sm.mu.Unlock() + + // All subscription channels are non-blocking so this is always fast and + // there's no risk of falling behind what producers are sending. + for _, sub := range sm.subscriptions { + if sub.ListensFor(event.Kind) { + select { + case sub.Chan <- event: + default: + } + } + } +} + +// Special internal variant that lets us inject an overridden size. +func (sm *subscriptionManager) SubscribeConfig(config *SubscribeConfig) (<-chan *Event, func()) { + if config.ChanSize < 0 { + panic("SubscribeConfig.ChanSize must be greater or equal to 1") + } + if config.ChanSize == 0 { + config.ChanSize = subscribeChanSizeDefault + } + + for _, kind := range config.Kinds { + if _, ok := allKinds[kind]; !ok { + panic(fmt.Errorf("unknown event kind: %s", kind)) + } + } + + subChan := make(chan *Event, config.ChanSize) + + sm.mu.Lock() + defer sm.mu.Unlock() + + // Just gives us an easy way of removing the subscription again later. + subID := sm.subscriptionsSeq + sm.subscriptionsSeq++ + + sm.subscriptions[subID] = &eventSubscription{ + Chan: subChan, + Kinds: sliceutil.KeyBy(config.Kinds, func(k EventKind) (EventKind, struct{}) { return k, struct{}{} }), + } + + cancel := func() { + sm.mu.Lock() + defer sm.mu.Unlock() + + // May no longer be present in case this was called after a stop. + sub, ok := sm.subscriptions[subID] + if !ok { + return + } + + close(sub.Chan) + + delete(sm.subscriptions, subID) + } + + return subChan, cancel +} + +// func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int, subscribeCh SubscribeChan) *AsyncCompleter {
Lots of dead code here. Maybe copy/paste error?
river
github_2023
go
379
riverqueue
brandur
@@ -0,0 +1,271 @@ +package river + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/jobcompleter" + "github.com/riverqueue/river/internal/jobstats" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/rivertype" +) + +type subscriptionManager struct { + baseservice.BaseService + + subscribeCh <-chan []jobcompleter.CompleterJobUpdated + wg sync.WaitGroup + + statsMu sync.Mutex // protects stats fields + statsAggregate jobstats.JobStatistics + statsNumJobs int + + mu sync.Mutex // protects subscription fields + subscriptions map[int]*eventSubscription + subscriptionsSeq int // used for generating simple IDs +} + +func newSubscriptionManager(archetype *baseservice.Archetype, subscribeCh <-chan []jobcompleter.CompleterJobUpdated) *subscriptionManager { + return baseservice.Init(archetype, &subscriptionManager{ + subscribeCh: subscribeCh, + subscriptions: make(map[int]*eventSubscription), + }) +} + +// ResetSubscribeChan is used to change the channel that the subscription +// manager listens on. It must only be called when the subscription manager is +// stopped. +func (sm *subscriptionManager) ResetSubscribeChan(subscribeCh <-chan []jobcompleter.CompleterJobUpdated) { + sm.subscribeCh = subscribeCh +} + +func (sm *subscriptionManager) Start(ctx context.Context) error { + sm.wg.Add(1)
Can we make this use the normal stop convention instead with `StartInit`? Protects against double starts, but also gives the client a way to check whether all its services are started.
river
github_2023
go
379
riverqueue
brandur
@@ -0,0 +1,126 @@ +package river + +import ( + "context" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/riverqueue/river/internal/jobcompleter" + "github.com/riverqueue/river/internal/jobstats" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/riverinternaltest/testfactory" + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" + "github.com/stretchr/testify/require" +) + +func Test_SubscriptionManager(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testBundle struct { + exec riverdriver.Executor + subscribeCh chan []jobcompleter.CompleterJobUpdated + tx pgx.Tx + } + + setup := func(t *testing.T) (*subscriptionManager, *testBundle) { + t.Helper() + + tx := riverinternaltest.TestTx(ctx, t) + exec := riverpgxv5.New(nil).UnwrapExecutor(tx) + + subscribeCh := make(chan []jobcompleter.CompleterJobUpdated, 1) + manager := newSubscriptionManager(riverinternaltest.BaseServiceArchetype(t), subscribeCh) + + require.NoError(t, manager.Start(ctx)) + t.Cleanup(manager.Stop) + + return manager, &testBundle{ + exec: exec, + subscribeCh: subscribeCh, + tx: tx, + } + } + + t.Run("DistributesRequestedEventsToSubscribers", func(t *testing.T) { + t.Parallel() + + manager, bundle := setup(t) + t.Cleanup(func() { close(bundle.subscribeCh) }) + + sub, cancelSub := manager.SubscribeConfig(&SubscribeConfig{ChanSize: 10, Kinds: []EventKind{EventKindJobCompleted, EventKindJobSnoozed}}) + t.Cleanup(cancelSub) + + // Send some events + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(time.Now())}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(time.Now())}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRetryable)}) + job4 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateScheduled)}) + + makeStats := func(complete, wait, run time.Duration) *jobstats.JobStatistics { + return &jobstats.JobStatistics{ + CompleteDuration: complete, + QueueWaitDuration: wait, + RunDuration: run, + } + } + + bundle.subscribeCh <- []jobcompleter.CompleterJobUpdated{ + {Job: job1, JobStats: makeStats(101, 102, 103)}, // completed, should be sent + {Job: job2, JobStats: makeStats(201, 202, 203)}, // cancelled, should be skipped + } + bundle.subscribeCh <- []jobcompleter.CompleterJobUpdated{ + {Job: job3, JobStats: makeStats(301, 302, 303)}, // retryable, should be skipped + {Job: job4, JobStats: makeStats(401, 402, 403)}, // snoozed/scheduled, should be sent + } + + received := riverinternaltest.WaitOrTimeoutN(t, sub, 2) + require.Equal(t, job1.ID, received[0].Job.ID) + require.Equal(t, rivertype.JobStateCompleted, received[0].Job.State) + require.Equal(t, time.Duration(101), received[0].JobStats.CompleteDuration) + require.Equal(t, time.Duration(102), received[0].JobStats.QueueWaitDuration) + require.Equal(t, time.Duration(103), received[0].JobStats.RunDuration) + require.Equal(t, job4.ID, received[1].Job.ID) + require.Equal(t, rivertype.JobStateScheduled, received[1].Job.State) + require.Equal(t, time.Duration(401), received[1].JobStats.CompleteDuration) + require.Equal(t, time.Duration(402), received[1].JobStats.QueueWaitDuration) + require.Equal(t, time.Duration(403), received[1].JobStats.RunDuration) + + cancelSub() + select { + case value, stillOpen := <-sub: + require.False(t, stillOpen, "subscription channel should be closed") + require.Nil(t, value, "subscription channel should be closed") + default: + require.Fail(t, "subscription channel should have been closed") + } + }) + + t.Run("StartStopRepeatedly", func(t *testing.T) { + // This service does not use the typical `startstoptest.Stress()` test + // because there are some additional steps required after a `Stop` for the + // subsequent `Start` to succeed. It's also not friendly for multiple + // concurrent calls to `Start` and `Stop`, but this is fine because the only + // usage within `Client` is already protected by a mutex. + t.Parallel() + + manager, bundle := setup(t) + + subscribeCh := bundle.subscribeCh + for i := 0; i < 100; i++ { + go func() { close(subscribeCh) }()
This is racy because the goroutine can run after `subscribeCh` is reset below, which will result in a panic on double-channel close.
river
github_2023
go
376
riverqueue
brandur
@@ -644,39 +644,83 @@ func Test_Client(t *testing.T) { t.Run("StopAndCancel", func(t *testing.T) { t.Parallel() - client, _ := setup(t) - jobStartedChan := make(chan int64) - jobDoneChan := make(chan struct{}) - - type JobArgs struct { - JobArgsReflectKind[JobArgs] + type testBundle struct { + jobDoneChan chan struct{} + jobStartedChan chan int64 } - AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { - jobStartedChan <- job.ID - <-ctx.Done() - require.ErrorIs(t, context.Cause(ctx), rivercommon.ErrShutdown) - close(jobDoneChan) - return nil - })) + setupStopAndCancel := func(t *testing.T) (*Client[pgx.Tx], *testBundle) {
If a test case ends up needing its own setup function, what do you think about denesting it to the stop instead of wiring it in deeper and deeper? It might seem minor, but feels cleaner, requires a lot less indentation, and makes the test case name easier to address because there's fewer segments to find.
river
github_2023
go
376
riverqueue
brandur
@@ -644,39 +644,83 @@ func Test_Client(t *testing.T) { t.Run("StopAndCancel", func(t *testing.T) { t.Parallel() - client, _ := setup(t) - jobStartedChan := make(chan int64) - jobDoneChan := make(chan struct{}) - - type JobArgs struct { - JobArgsReflectKind[JobArgs] + type testBundle struct { + jobDoneChan chan struct{} + jobStartedChan chan int64 } - AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { - jobStartedChan <- job.ID - <-ctx.Done() - require.ErrorIs(t, context.Cause(ctx), rivercommon.ErrShutdown) - close(jobDoneChan) - return nil - })) + setupStopAndCancel := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { + t.Helper() - startClient(ctx, t, client) + client, _ := setup(t) + jobStartedChan := make(chan int64) + jobDoneChan := make(chan struct{}) - insertRes, err := client.Insert(ctx, &JobArgs{}, nil) - require.NoError(t, err) + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } - startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) - require.Equal(t, insertRes.Job.ID, startedJobID) + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + jobStartedChan <- job.ID + <-ctx.Done() + require.ErrorIs(t, context.Cause(ctx), rivercommon.ErrShutdown) + close(jobDoneChan) + return nil + })) - select { - case <-client.Stopped(): - t.Fatal("expected client to not be stopped yet") - default: + startClient(ctx, t, client) + + insertRes, err := client.Insert(ctx, &JobArgs{}, nil) + require.NoError(t, err) + + startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertRes.Job.ID, startedJobID) + + select { + case <-client.Stopped(): + t.Fatal("expected client to not be stopped yet") + default: + } + + return client, &testBundle{ + jobDoneChan: jobDoneChan, + jobStartedChan: jobStartedChan, + } } - require.NoError(t, client.StopAndCancel(ctx)) + t.Run("OnItsOwn", func(t *testing.T) { + t.Parallel() + + client, _ := setupStopAndCancel(t) + + require.NoError(t, client.StopAndCancel(ctx)) + riverinternaltest.WaitOrTimeout(t, client.Stopped()) + }) + + t.Run("AfterStop", func(t *testing.T) { + t.Parallel() + + client, bundle := setupStopAndCancel(t) + + go func() { + require.NoError(t, client.Stop(ctx)) + }() + + select { + case <-client.Stopped(): + t.Fatal("expected client to not be stopped yet") + case <-time.After(500 * time.Millisecond):
Instead of my usual broad repartee about sleep statements in tests, I'll try to quantify it a little more by showing how slow River's test suite is getting. Here's a test run of one of my largest packages at work, encompassing the entire API layer: ``` $ gotestsum ./server/api ✓ server/api (2.155s) DONE 1445 tests in 6.285s ``` Here's River's top level package tests running: ``` $ gotestsum . ✓ . (12.517s) === Skipped === SKIP: . Test_Client_Maintenance/Reindexer (0.00s) client_test.go:2532: Reindexer is disabled for further development DONE 448 tests, 1 skipped in 13.731s ``` It's a little subjective, but I'd consider the package being tested at the top significantly more complex than River. It hits the DB on ~every test, has about two orders of magnitude more database models than River (~100 models compared to 4), and 4x the LOCs just in package itself (it'd be >10x difference if you factored in all dependent packages as well): ``` $ find . -name '*.go' -maxdepth 1 | xargs wc -l 13180 total ``` ``` $ find ./server/api -name '*.go' -maxdepth 1 | xargs wc -l 57358 total ``` At first glance, it has 3x the number of test cases and runs twice as fast, but that's not quite right -- it's a lot more code so it's compile phase is much longer. Looking at only test run time, it actually has 3x the number of test cases and runs **6x faster** than River's test suite (2.155s vs 12.517s), and with ~zero intermittency problems. I'd have to do a little more legwork to prove it, but I think the main difference is the number of sleep statements that we've got everywhere. (With test DB manager the other major candidate.) With this sleep statement, the test case requires a minimum of 500 ms to run. And latency aside, it also opens the door to intermittency problems -- is 500 ms a good number for both CI and local? If so, what about 250 ms? If that's okay, how about 125 ms? We're kind of just picking an arbitrary number and hoping it's long enough. I find the "negative" sleeps (e.g. `WaitOrTimeout`) a little less objectionable because they should be ~instant unless there's a failure, but the "affirmative" sleeps that just bake in a fixed amount of time for every run should ideally be eradicated IMO.
river
github_2023
others
364
riverqueue
bgentry
@@ -12,7 +12,7 @@ UPDATE river_job SET metadata = '{}' WHERE metadata IS NULL; ALTER TABLE river_job ALTER COLUMN metadata SET NOT NULL; -- The 'pending' job state will be used for upcoming functionality: -ALTER TYPE river_job_state ADD VALUE 'pending' AFTER 'discarded'; +ALTER TYPE river_job_state ADD VALUE IF NOT EXISTS 'pending' AFTER 'discarded';
Normally we don’t want to use `IF NOT EXISTS` in these migrations because they indicate a bug or something has been manually manipulated. But, in this case I’m reminded that you can’t safely remove an enum value in Postgres once it’s been added, which [is why it was skipped in the down migration](https://github.com/riverqueue/river/blob/cdadb9c5ce943d2ee1628c30e9e2babebd89f8f6/rivermigrate/migration/004_pending_and_more.down.sql#L6). Maybe this is this best we can do in this case?
river
github_2023
go
237
riverqueue
bgentry
@@ -29,8 +29,17 @@ type testingT interface { Logf(format string, args ...any) } -// Options for RequireInserted or RequireManyInserted including expectations for -// various queuing properties that stem from InsertOpts. +// Options for RequireInserted functions including expectations for various +// queuing properties that stem from InsertOpts. +// +// When used with RequiredInserted or RequireInsertedMany, multiple properties +// set on this struct increase the specifity on a job to match, acting like an +// AND condition on each. So if multiple properties are set, a job must match +// all of them to be considered a successful match. +// +// When used with RequireNotInserted, multiple properties act like an OR instead +// of an AND. If an inserted job is found whose properties match any of the set +// opts properties, a test failure is triggered.
I don't this behavior is what I would expect here. My intuition (based on how I've used queue assertions for other libraries/ecosystems) is that I'm allowed to be as specific as I desire here, and that the assertion will ensure that no jobs have been inserted which match all of the conditions I specified. If I want, I can easily make sure that no jobs have been inserted on a certain queue, or if I want to be more specific, I can make sure that no jobs have been inserted on that queue and with that job type. And in the affirmative case where I want to make sure a job _was_ inserted, it likewise matches _all_ of the criteria I specify, but ignores all those which I don't.
river
github_2023
go
237
riverqueue
bgentry
@@ -157,14 +166,117 @@ func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.Jo } if opts != nil { - if !compareJobToInsertOpts(t, jobRow, *opts, -1) { + if compareJobToInsertOpts(t, jobRow, opts, -1, false) == compareResCheckFailed { return nil, nil //nolint:nilnil } } return &river.Job[TArgs]{JobRow: jobRow, Args: actualArgs}, nil } +// RequireNotInserted is a test helper that verifies that a job of the given +// kind was not inserted for work, failing the test if one was. +// +// job := RequireNotInserted(ctx, t, riverpgxv5.New(dbPool), &Job1Args{}, nil) +// +// This variant takes a driver that wraps a database pool. See also +// RequireNotInsertedTx which takes a transaction. +// +// A RequireInsertedOpts struct can be provided as the last argument, and if it +// is, its properties (e.g. max attempts, priority, queue name) will act as +// requirements on a found row. If any fields are set, then the test will fail +// if a job is found that matches any of them (unlike options to RequireInserted +// which behave like an AND, these are an OR). So for example, if options +// specify `Priority: 3`, and a joke of the same kind was inserted, but it was +// `Priority: 2`, RequireNotInserted will not fail. If the inserted job was +// `Priority: 2` (therefore matching the job), RequireNotInserted does fail. +func RequireNotInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, tb testing.TB, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) {
See my above comment, I think the behavior on this might be backwards. But as far as the type thing, I would definitely avoid the aliasing option because it results in poorer usability. If we define these types right next to each other, it wouldn't be that hard to keep them lined up. Or alternatively if we can get away with just using the same type in both places, that's even better.
river
github_2023
go
237
riverqueue
bgentry
@@ -29,8 +30,18 @@ type testingT interface { Logf(format string, args ...any) } -// Options for RequireInserted or RequireManyInserted including expectations for -// various queuing properties that stem from InsertOpts. +// Options for RequireInserted functions including expectations for various +// queuing properties that stem from InsertOpts. +// +// Multiple properties set on this struct increase the specifity on a job to +// match, acting like an AND condition on each. +// +// In the case of RequireInserted or RequireInsertdMany, if multiple properties
```suggestion // In the case of RequireInserted or RequireInsertedMany, if multiple properties ```
river
github_2023
go
237
riverqueue
bgentry
@@ -291,22 +414,48 @@ func compareJobToInsertOpts(t testingT, jobRow *rivertype.JobRow, expectedOpts R return fmt.Sprintf(" (expected job slice index %d)", index) } - if expectedOpts.MaxAttempts != 0 && jobRow.MaxAttempts != expectedOpts.MaxAttempts { - failure(t, "Job with kind '%s'%s max attempts %d not equal to expected %d", - jobRow.Kind, positionStr(), jobRow.MaxAttempts, expectedOpts.MaxAttempts) - return false + var failures []string + + if expectedOpts.MaxAttempts != 0 { + if jobRow.MaxAttempts == expectedOpts.MaxAttempts { + if requireNotInserted { + failures = append(failures, fmt.Sprintf("max attempts equal to excluded %d", expectedOpts.MaxAttempts)) + } + } else { + if requireNotInserted { + return true // any one property doesn't match; assertion passes + } else { + failures = append(failures, fmt.Sprintf("max attempts %d not equal to expected %d", jobRow.MaxAttempts, expectedOpts.MaxAttempts)) + } + }
There's definitely a lot of duplication in these attr comparisons. Do you think it'd be worth extracting a generic helper for these comparable attrs (int, string)?
river
github_2023
others
237
riverqueue
bgentry
@@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 A new `river_queue` table is introduced in the v4 migration for this purpose. Upon startup, every producer in each River `Client` will make an `UPSERT` query to the database to either register the queue as being active, or if it already exists it will instead bump the timestamp to keep it active. This query will be run periodically in each producer as long as the `Client` is alive, even if the queue is paused. A separate query will delete/purge any queues which have not been active in awhile (currently fixed to 24 hours). `QueuePause` and `QueueResume` APIs have been introduced to `Client` pause and resume a single queue by name, or _all_ queues using the special `*` value. Each producer will watch for notifications on the relevant `LISTEN/NOTIFY` topic unless operating in poll-only mode, in which case they will periodically poll for changes to their queue record in the database. +- `RequireNotInserted` test helper (in addition to the existing `RequireInserted`) that verifies that a job with matching conditions was _not_ inserted. [PR #237](https://github.com/riverqueue/river/pull/237).
We missed the release on this so unfortunately this will need to get rebased and pushed to the unreleased section.
river
github_2023
go
324
riverqueue
brandur
@@ -0,0 +1,137 @@ +package river_test + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/slogutil" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +type ReportingArgs struct{} + +func (args ReportingArgs) Kind() string { return "Reporting" } + +type ReportingWorker struct { + river.WorkerDefaults[ReportingArgs] + jobWorkedCh chan<- string +} + +func (w *ReportingWorker) Work(ctx context.Context, job *river.Job[ReportingArgs]) error { + select { + case <-ctx.Done(): + return ctx.Err() + case w.jobWorkedCh <- job.Queue: + return nil + } +} + +// Example_queuePause demonstrates how to pause queues to prevent them from +// working new jobs, and later resume them. +func Example_queuePause() { + ctx := context.Background() + + dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_testdb_example")) + if err != nil { + panic(err) + } + defer dbPool.Close() + + // Required for the purpose of this test, but not necessary in real usage. + if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { + panic(err) + } + + const ( + unreliableQueue = "unreliable_external_service" + reliableQueue = "reliable_jobs" + ) + + workers := river.NewWorkers() + jobWorkedCh := make(chan string) + river.AddWorker(workers, &ReportingWorker{jobWorkedCh: jobWorkedCh}) + + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ + Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), + Queues: map[string]river.QueueConfig{ + unreliableQueue: {MaxWorkers: 10}, + reliableQueue: {MaxWorkers: 10}, + }, + Workers: workers, + }) + if err != nil { + panic(err) + } + + if err := riverClient.Start(ctx); err != nil { + panic(err) + } + + fmt.Printf("Pausing %s queue\n", unreliableQueue) + if err := riverClient.QueuePause(ctx, unreliableQueue, nil); err != nil { + panic(err) + } + + // Jobs on the reliable queue will be worked immediately because that queue is not paused: + fmt.Printf("Inserting job into %s queue\n", reliableQueue) + if _, err = riverClient.Insert(ctx, ReportingArgs{}, &river.InsertOpts{Queue: reliableQueue}); err != nil { + panic(err) + } + receivedQueue := waitOrTimeout(jobWorkedCh) + fmt.Printf("Job worked on %s queue\n", receivedQueue) + if receivedQueue != reliableQueue { + panic("expected reliable queue, got " + receivedQueue) + } + + fmt.Printf("Inserting job into %s queue\n", unreliableQueue) + if _, err = riverClient.Insert(ctx, ReportingArgs{}, &river.InsertOpts{Queue: unreliableQueue}); err != nil { + panic(err) + } + waitExpectTimeout(jobWorkedCh)
Okay, so one thing with the example tests is that unlike the rest of the suite, they always run sequentially. So by adding this statement, we add exactly +1 second to the runtime of `go test .`, which is already extremely slow. Instead of this, what do you think about something like: * Pause the unreliable queue. * Insert the unreliable job first, then the the reliable job. * Wait for the reliable job to be executed. Because it's got a larger ID than the unreliable job, we would have expected that the unreliable one would get worked first if it was going to be worked. * Unpause the unreliable queue. * Wait for one more completion. We then depend partly on the test output to verify that everything happened in the order we expected.
river
github_2023
go
324
riverqueue
brandur
@@ -0,0 +1,133 @@ +package river_test + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/slogutil" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +type ReportingArgs struct{} + +func (args ReportingArgs) Kind() string { return "Reporting" } + +type ReportingWorker struct { + river.WorkerDefaults[ReportingArgs] + jobWorkedCh chan<- string +} + +func (w *ReportingWorker) Work(ctx context.Context, job *river.Job[ReportingArgs]) error { + select { + case <-ctx.Done(): + return ctx.Err() + case w.jobWorkedCh <- job.Queue: + return nil + } +} + +// Example_queuePause demonstrates how to pause queues to prevent them from +// working new jobs, and later resume them. +func Example_queuePause() { + ctx := context.Background() + + dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_testdb_example")) + if err != nil { + panic(err) + } + defer dbPool.Close() + + // Required for the purpose of this test, but not necessary in real usage. + if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { + panic(err) + } + + const ( + unreliableQueue = "unreliable_external_service" + reliableQueue = "reliable_jobs" + ) + + workers := river.NewWorkers() + jobWorkedCh := make(chan string) + river.AddWorker(workers, &ReportingWorker{jobWorkedCh: jobWorkedCh}) + + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ + Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), + Queues: map[string]river.QueueConfig{ + unreliableQueue: {MaxWorkers: 10}, + reliableQueue: {MaxWorkers: 10}, + }, + Workers: workers, + }) + if err != nil { + panic(err) + } + + if err := riverClient.Start(ctx); err != nil { + panic(err) + } + + // Out of example scope, but used to wait until a queue is paused or unpaused. + subscribeChan, subscribeCancel := riverClient.Subscribe(river.EventKindQueuePaused, river.EventKindQueueResumed) + defer subscribeCancel() + + fmt.Printf("Pausing %s queue\n", unreliableQueue) + if err := riverClient.QueuePause(ctx, unreliableQueue, nil); err != nil { + panic(err) + } + + // Wait for queue to be paused: + waitOrTimeout(subscribeChan) + + fmt.Println("Inserting one job each into unreliable and reliable queues") + if _, err = riverClient.Insert(ctx, ReportingArgs{}, &river.InsertOpts{Queue: unreliableQueue}); err != nil { + panic(err) + } + if _, err = riverClient.Insert(ctx, ReportingArgs{}, &river.InsertOpts{Queue: reliableQueue}); err != nil { + panic(err) + } + // The unreliable queue is paused so its job should get worked yet, while + // reliable queue is not paused so its job should get worked immediately: + receivedQueue := waitOrTimeout(jobWorkedCh) + fmt.Printf("Job worked on %s queue\n", receivedQueue) + if receivedQueue != reliableQueue { + panic("expected reliable queue, got " + receivedQueue) + } + + // Resume the unreliable queue so it can work the job: + fmt.Printf("Resuming %s queue\n", unreliableQueue) + if err := riverClient.QueueResume(ctx, unreliableQueue, nil); err != nil { + panic(err) + } + receivedQueue = waitOrTimeout(jobWorkedCh) + fmt.Printf("Job worked on %s queue\n", receivedQueue) + if receivedQueue != unreliableQueue { + panic("expected unreliable queue, got " + receivedQueue)
What do you think about changing this to a `fmt.Printf` and having the output of the example assert its correctness instead? When it comes to these examples, definitely the fewer LOCs helps to sharpen and clarify.
river
github_2023
go
327
riverqueue
brandur
@@ -920,20 +926,30 @@ func (c *Client[TTx]) distributeJob(job *rivertype.JobRow, stats *JobStatistics) } var event *Event - switch job.State { - case rivertype.JobStateCancelled: - event = &Event{Kind: EventKindJobCancelled, Job: job, JobStats: stats} - case rivertype.JobStateCompleted: - event = &Event{Kind: EventKindJobCompleted, Job: job, JobStats: stats} - case rivertype.JobStateScheduled: - event = &Event{Kind: EventKindJobSnoozed, Job: job, JobStats: stats} - case rivertype.JobStateAvailable, rivertype.JobStateDiscarded, rivertype.JobStateRetryable, rivertype.JobStateRunning: - event = &Event{Kind: EventKindJobFailed, Job: job, JobStats: stats} - case rivertype.JobStatePending: - panic("completion subscriber unexpectedly received job in pending state, river bug") - default: - // linter exhaustive rule prevents this from being reached - panic("unreachable state to distribute, river bug") + + if job != nil { + switch job.State { + case rivertype.JobStateCancelled: + event = &Event{Kind: EventKindJobCancelled, Job: job, JobStats: stats} + case rivertype.JobStateCompleted: + event = &Event{Kind: EventKindJobCompleted, Job: job, JobStats: stats} + case rivertype.JobStateScheduled: + event = &Event{Kind: EventKindJobSnoozed, Job: job, JobStats: stats} + case rivertype.JobStateAvailable, rivertype.JobStateDiscarded, rivertype.JobStateRetryable, rivertype.JobStateRunning: + event = &Event{Kind: EventKindJobFailed, Job: job, JobStats: stats} + case rivertype.JobStatePending: + panic("completion subscriber unexpectedly received job in pending state, river bug") + default: + // linter exhaustive rule prevents this from being reached + panic("unreachable state to distribute, river bug") + } + } else { + switch queue.PausedAt { + case nil: + event = &Event{Kind: EventKindQueueResumed, Queue: queue} + default: + event = &Event{Kind: EventKindQueuePaused, Queue: queue}
What do you think about just having the producer generate the event rather than having a separate field that's then reassembled into an event post-hoc? See: https://github.com/riverqueue/river/pull/328/files Feels a little bit cleaner, and it may help for assembling a full queue object.
river
github_2023
go
327
riverqueue
brandur
@@ -511,74 +509,73 @@ func Test_Client(t *testing.T) { config.Queues["alternate"] = QueueConfig{MaxWorkers: 10} client := newTestClient(t, bundle.dbPool, config) - jobStartedChan := make(chan int64) - - type JobArgs struct { - JobArgsReflectKind[JobArgs] - } - - AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { - jobStartedChan <- job.ID - return nil - })) - + subscribeChan := subscribe(t, client) startClient(ctx, t, client) - client.producersByQueueName[QueueDefault].testSignals.Init() - client.producersByQueueName["alternate"].testSignals.Init() - - insertRes1, err := client.Insert(ctx, &JobArgs{}, nil) + insertRes1, err := client.Insert(ctx, &noOpArgs{}, nil) require.NoError(t, err) - startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) - require.Equal(t, insertRes1.Job.ID, startedJobID) + event := riverinternaltest.WaitOrTimeout(t, subscribeChan) + require.Equal(t, EventKindJobCompleted, event.Kind) + require.Equal(t, insertRes1.Job.ID, event.Job.ID)
Just for the sake of keeping these tests shorter, what do you think of only subscribing to queue-based events here, then we can skip the checks on job events?
river
github_2023
go
327
riverqueue
brandur
@@ -511,74 +509,73 @@ func Test_Client(t *testing.T) { config.Queues["alternate"] = QueueConfig{MaxWorkers: 10} client := newTestClient(t, bundle.dbPool, config) - jobStartedChan := make(chan int64) - - type JobArgs struct { - JobArgsReflectKind[JobArgs] - } - - AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { - jobStartedChan <- job.ID - return nil - })) - + subscribeChan := subscribe(t, client) startClient(ctx, t, client) - client.producersByQueueName[QueueDefault].testSignals.Init() - client.producersByQueueName["alternate"].testSignals.Init() - - insertRes1, err := client.Insert(ctx, &JobArgs{}, nil) + insertRes1, err := client.Insert(ctx, &noOpArgs{}, nil) require.NoError(t, err) - startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) - require.Equal(t, insertRes1.Job.ID, startedJobID) + event := riverinternaltest.WaitOrTimeout(t, subscribeChan) + require.Equal(t, EventKindJobCompleted, event.Kind) + require.Equal(t, insertRes1.Job.ID, event.Job.ID) // Pause only the default queue: require.NoError(t, client.QueuePause(ctx, QueueDefault, nil)) - client.producersByQueueName[QueueDefault].testSignals.Paused.WaitOrTimeout() + event = riverinternaltest.WaitOrTimeout(t, subscribeChan) + require.Equal(t, EventKindQueuePaused, event.Kind) + require.Equal(t, QueueDefault, event.Queue.Name)
This might depend on whether we get the full queue object in the end, but IMO, feels cleaner to have a single assertion on the full event object. More readable, and produces a better error in case of failure too: ``` pauseEvent := riverinternaltest.WaitOrTimeout(t, subscribeChan) require.Equal(t, &Event{Kind: EventKindQueuePaused, Queue: QueueDefault}, pauseEvent) ```
river
github_2023
go
326
riverqueue
bgentry
@@ -214,9 +214,9 @@ type AttemptError struct { // subsequently remove the periodic job with `Remove()`. type PeriodicJobHandle int -// Queue is a configuration for a queue that is currently (or recently was) in +// QueueRow is a configuration for a queue that is currently (or recently was) in // use by a client. -type Queue struct { +type QueueRow struct {
I'm hesitant to do this. We had to call the job one `JobRow` because we wanted the generic typed variant to be `Job[T]`. However we have no such issue with the queue type and I doubt we will, so it seems unnecessary to clutter this one up with a `*Row` suffix.
river
github_2023
go
301
riverqueue
brandur
@@ -148,3 +150,13 @@ type AttemptError struct { // (returned by the use of `Client.PeriodicJobs().Add()`) which can be used to // subsequently remove the periodic job with `Remove()`. type PeriodicJobHandle int + +// Queue is a configuration for a queue that is currently (or recently was) in +// use by a client. +type Queue struct { + CreatedAt time.Time + Metadata []byte + Name string + PausedAt *time.Time + UpdatedAt time.Time
Just given this is the public facing struct, could we document the properties on this one?
river
github_2023
go
301
riverqueue
brandur
@@ -44,6 +45,9 @@ const ( PriorityDefault = rivercommon.PriorityDefault QueueDefault = rivercommon.QueueDefault QueueNumWorkersMax = 10_000 + + queueSettingsPollIntervalDefault = 2 * time.Second + queueSettingsReportIntervalDefault = 10 * time.Minute
A couple on this one: * What do you think about dropping "Settings" out of these names? It makes them a bit of a mouthful, and when I was reading their names, they didn't add anything to explaining what the constants were for (there's no way for the mind to map "settings" to polling for pause/resume state). * Could we push these down into the producer? They're not used anywhere else, and having their definition closer to their point of usage makes it easier to find what these values are supposed to be when you're trying to figure that out. As is, you need to hop up multiple layers of indirection to know.
river
github_2023
go
301
riverqueue
brandur
@@ -23,6 +23,9 @@ type InsertOpts struct { // field by River. Metadata []byte + // Pending indicates that the job should be inserted in the `pending` state.
Could you explain this more? I saw the note in the PR description, but a user of the Go API should be able to figure out what this is for by reading the documentation, and even the PR description didn't explain under what circumstances a normal caller might want to set a pending state.
river
github_2023
go
301
riverqueue
brandur
@@ -1309,10 +1335,23 @@ func (c *Client[TTx]) insert(ctx context.Context, exec riverdriver.Executor, arg return nil, err } - jobInsertRes, err := c.uniqueInserter.JobInsert(ctx, exec, params, uniqueOpts) + execTx, err := exec.Begin(ctx)
Should we stick just to `tx` as convention for transaction variables like this? You're using either `tx` or `execTx` for the same thing depending on the specific function (see the block directly below this one for example).
river
github_2023
go
301
riverqueue
brandur
@@ -1423,6 +1489,93 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdrive return insertParams, nil } +func (c *Client[TTx]) maybeNotifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, state rivertype.JobState, queue string) error { + if state != rivertype.JobStateAvailable { + return nil + } + return c.maybeNotifyInsertForQueues(ctx, execTx, []string{queue})
These helper functions are feeling a little on the light side ... only used in one place each, and just 2-3 lines. Creates another jump of indirection when they could probably just be merged into their caller.
river
github_2023
go
301
riverqueue
brandur
@@ -1423,6 +1489,93 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdrive return insertParams, nil } +func (c *Client[TTx]) maybeNotifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, state rivertype.JobState, queue string) error { + if state != rivertype.JobStateAvailable { + return nil + } + return c.maybeNotifyInsertForQueues(ctx, execTx, []string{queue}) +} + +func (c *Client[TTx]) maybeNotifyInsertMany(ctx context.Context, execTx riverdriver.ExecutorTx, insertParams []*riverdriver.JobInsertFastParams) error { + queues := sliceutil.Map(insertParams, func(params *riverdriver.JobInsertFastParams) string { return params.Queue }) + return c.maybeNotifyInsertForQueues(ctx, execTx, queues)
This function needs a check on `JobStateAvailable` like the above doesn't it?
river
github_2023
go
301
riverqueue
brandur
@@ -1423,6 +1489,93 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdrive return insertParams, nil } +func (c *Client[TTx]) maybeNotifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, state rivertype.JobState, queue string) error { + if state != rivertype.JobStateAvailable { + return nil + } + return c.maybeNotifyInsertForQueues(ctx, execTx, []string{queue}) +} + +func (c *Client[TTx]) maybeNotifyInsertMany(ctx context.Context, execTx riverdriver.ExecutorTx, insertParams []*riverdriver.JobInsertFastParams) error { + queues := sliceutil.Map(insertParams, func(params *riverdriver.JobInsertFastParams) string { return params.Queue }) + return c.maybeNotifyInsertForQueues(ctx, execTx, queues) +} + +// Notify the given queues that new jobs are available. The queues list will be +// deduplicated and each will be checked to see if it is due for an insert +// notification from this client. +func (c *Client[TTx]) maybeNotifyInsertForQueues(ctx context.Context, execTx riverdriver.ExecutorTx, queues []string) error { + if len(queues) < 1 { + return nil + } + + queueMap := make(map[string]struct{}) + queuesDeduped := make([]string, 0, len(queues)) + payloads := make([]string, 0, len(queues)) + + for _, queue := range queues { + if _, ok := queueMap[queue]; ok { + continue + } + + queueMap[queue] = struct{}{} + if c.insertNotifyLimiter.ShouldTrigger(queue) { + payloads = append(payloads, fmt.Sprintf("{\"queue\": %q}", queue)) + queuesDeduped = append(queuesDeduped, queue) + } + } + + if len(payloads) < 1 { + return nil + } + + return c.notifyInsert(ctx, execTx, queuesDeduped, payloads) +} + +func (c *Client[TTx]) notifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, queues, payloads []string) error {
IMO refactor this function out. Does nothing but call into the executor (a single function call), used in only one place, but creates another indirection hop.
river
github_2023
go
301
riverqueue
brandur
@@ -1423,6 +1489,93 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdrive return insertParams, nil } +func (c *Client[TTx]) maybeNotifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, state rivertype.JobState, queue string) error { + if state != rivertype.JobStateAvailable { + return nil + } + return c.maybeNotifyInsertForQueues(ctx, execTx, []string{queue}) +} + +func (c *Client[TTx]) maybeNotifyInsertMany(ctx context.Context, execTx riverdriver.ExecutorTx, insertParams []*riverdriver.JobInsertFastParams) error { + queues := sliceutil.Map(insertParams, func(params *riverdriver.JobInsertFastParams) string { return params.Queue }) + return c.maybeNotifyInsertForQueues(ctx, execTx, queues) +} + +// Notify the given queues that new jobs are available. The queues list will be +// deduplicated and each will be checked to see if it is due for an insert +// notification from this client. +func (c *Client[TTx]) maybeNotifyInsertForQueues(ctx context.Context, execTx riverdriver.ExecutorTx, queues []string) error { + if len(queues) < 1 { + return nil + } + + queueMap := make(map[string]struct{}) + queuesDeduped := make([]string, 0, len(queues)) + payloads := make([]string, 0, len(queues)) + + for _, queue := range queues { + if _, ok := queueMap[queue]; ok { + continue + } + + queueMap[queue] = struct{}{} + if c.insertNotifyLimiter.ShouldTrigger(queue) { + payloads = append(payloads, fmt.Sprintf("{\"queue\": %q}", queue)) + queuesDeduped = append(queuesDeduped, queue) + } + } + + if len(payloads) < 1 { + return nil + } + + return c.notifyInsert(ctx, execTx, queuesDeduped, payloads) +} + +func (c *Client[TTx]) notifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, queues, payloads []string) error { + err := execTx.NotifyMany(ctx, &riverdriver.NotifyManyParams{ + Topic: string(notifier.NotificationTopicInsert), + Payload: payloads, + }) + if err != nil { + c.baseService.Logger.ErrorContext( + ctx, + c.baseService.Name+": Failed to send job insert notification", + slog.String("queues", strings.Join(queues, ",")), + slog.String("err", err.Error()), + ) + return err + } + return nil +} + +// emit a notification about a queue being paused or resumed. +func (c *Client[TTx]) notifyQueuePauseOrResume(ctx context.Context, execTx riverdriver.ExecutorTx, action, queue string, opts *QueuePauseOpts) error { + type queueConfigChange struct {
Just a note that "queue config" is used in some places like this, while "queue settings" is used above. IMO neither are really that appropriate ... can we call it a "queue state change" or something like that?
river
github_2023
go
301
riverqueue
brandur
@@ -1528,6 +1681,83 @@ func (c *Client[TTx]) JobListTx(ctx context.Context, tx TTx, params *JobListPara // client, and can be used to add new ones or remove existing ones. func (c *Client[TTx]) PeriodicJobs() *PeriodicJobBundle { return c.periodicJobs } +// QueueGet returns the queue with the given name. If the queue has not recently +// been active or does not exist, returns ErrNotFound. +// +// The provided context is used for the underlying Postgres query and can be +// used to cancel the operation or apply a timeout. +func (c *Client[TTx]) QueueGet(ctx context.Context, name string) (*rivertype.Queue, error) { + return c.driver.GetExecutor().QueueGet(ctx, name) +} + +// QueueList returns a list of all queues that are currently active or were +// recently active. Limit and offset can be used to paginate the results. +// +// The provided context is used for the underlying Postgres query and can be +// used to cancel the operation or apply a timeout. +func (c *Client[TTx]) QueueList(ctx context.Context, limit, offset int) ([]*rivertype.Queue, error) {
Okay, sorry to pop a can of worms on this one. I know the intent of the offset pagination is that the set of queues will always be small, but limit/offset pagination is ~never appropriate, and this function's signature locks us into it without a breaking change. It also can't take a sort order (imagine you want to order by pause state first, _then_ name, or something like that). What do you think about changing this so that it looks like the other list function: ``` go func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) { ``` But for now just implement `QueueListParams` so that it has no properties and always causes all queues to be returned? (i.e. It's just a stand in for forward compatibility that we can use for future augmentation.) This would mean that we can't paginate in the UI for now, but IMO that's a reasonable trade off for now. Just like the logic for using limit/offset, ~all users will have a small number of queues, and even if they have a huge number of queues, we can probably render them all in the UI for the time being and just have it be a big page. Once/if this becomes a problem, we could readdress it.
river
github_2023
go
301
riverqueue
brandur
@@ -461,6 +458,117 @@ func Test_Client(t *testing.T) { require.Equal(t, `relation "river_job" does not exist`, pgErr.Message) }) + t.Run("PauseAndResume", func(t *testing.T) { + t.Parallel() + + config, bundle := setupConfig(t) + config.Queues["alternate"] = QueueConfig{MaxWorkers: 10} + client := newTestClient(t, bundle.dbPool, config) + + jobStartedChan := make(chan int64) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + jobStartedChan <- job.ID + return nil + })) + + startClient(ctx, t, client) + + client.producersByQueueName[QueueDefault].testSignals.Init() + client.producersByQueueName["alternate"].testSignals.Init() + + insertRes1, err := client.Insert(ctx, &JobArgs{}, nil) + require.NoError(t, err) + + startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertRes1.Job.ID, startedJobID) + + // Pause only the default queue: + require.NoError(t, client.QueuePause(ctx, QueueDefault, nil)) + client.producersByQueueName[QueueDefault].testSignals.Paused.WaitOrTimeout() + + insertRes2, err := client.Insert(ctx, &JobArgs{}, nil) + require.NoError(t, err) + + select { + case <-jobStartedChan: + t.Fatal("expected job 2 to not start on paused queue") + case <-time.After(500 * time.Millisecond): + } + + // alternate queue should still be running: + insertResbAlternate1, err := client.Insert(ctx, &JobArgs{}, &InsertOpts{Queue: "alternate"}) + require.NoError(t, err) + + startedJobID = riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertResbAlternate1.Job.ID, startedJobID) + + // Pause all queues: + require.NoError(t, client.QueuePause(ctx, "*", nil)) + client.producersByQueueName["alternate"].testSignals.Paused.WaitOrTimeout() + + insertResAlternate2, err := client.Insert(ctx, &JobArgs{}, &InsertOpts{Queue: "alternate"}) + require.NoError(t, err) + + select { + case <-jobStartedChan: + t.Fatal("expected alternate job 2 to not start on paused queue") + case <-time.After(500 * time.Millisecond): + } + + // Resume only the alternate queue: + require.NoError(t, client.QueueResume(ctx, "alternate", nil)) + client.producersByQueueName["alternate"].testSignals.Resumed.WaitOrTimeout() + + startedJobID = riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertResAlternate2.Job.ID, startedJobID) + + // Resume all queues: + require.NoError(t, client.QueueResume(ctx, "*", nil)) + client.producersByQueueName[QueueDefault].testSignals.Resumed.WaitOrTimeout() + + startedJobID = riverinternaltest.WaitOrTimeout(t, jobStartedChan) + require.Equal(t, insertRes2.Job.ID, startedJobID) + })
Could we get another test for the "simple" case that lets you run just a basic pause and unpause? It's often useful to be able to zero in on doing just a basic exercise of a feature to debug something high level. This test exercises everything in one case, and is very verbose so you need to read a lot to understand what it's trying to do.
river
github_2023
go
301
riverqueue
brandur
@@ -128,16 +138,47 @@ func (s *JobScheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, er ctx, cancelFunc := context.WithTimeout(ctx, 30*time.Second) defer cancelFunc() - numScheduled, err := s.exec.JobSchedule(ctx, &riverdriver.JobScheduleParams{ - InsertTopic: string(notifier.NotificationTopicInsert), - Max: s.config.Limit, - Now: s.TimeNowUTC(), + execTx, err := s.exec.Begin(ctx) + if err != nil { + return 0, fmt.Errorf("error starting transaction: %w", err) + } + defer execTx.Rollback(ctx) + + now := s.TimeNowUTC() + nowWithLookAhead := now.Add(s.config.Interval) + + results, err := s.exec.JobSchedule(ctx, &riverdriver.JobScheduleParams{
Can we use something more descriptive for this variable like `scheduledJobs`?
river
github_2023
go
301
riverqueue
brandur
@@ -0,0 +1,162 @@ +package maintenance + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/maintenance/startstop" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/util/timeutil" + "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" +) + +const ( + QueueCleanerIntervalDefault = time.Hour + QueueRetentionPeriodDefault = 24 * time.Hour
Add "cleaner" to the name of this one, and for the time being, maybe unexport both constants. Neither are needed outside the packages and IMO no reason to ever really make these configurable unless there's a really amazing reason to do so.
river
github_2023
go
301
riverqueue
brandur
@@ -0,0 +1,162 @@ +package maintenance + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/maintenance/startstop" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/util/timeutil" + "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" +) + +const ( + QueueCleanerIntervalDefault = time.Hour + QueueRetentionPeriodDefault = 24 * time.Hour +) + +// Test-only properties. +type QueueCleanerTestSignals struct { + DeletedBatch rivercommon.TestSignal[struct{}] // notifies when runOnce finishes a pass +} + +func (ts *QueueCleanerTestSignals) Init() { + ts.DeletedBatch.Init() +} + +type QueueCleanerConfig struct { + // Interval is the amount of time to wait between runs of the cleaner. + Interval time.Duration + // QueueRetentionPeriod is the amount of time to keep queues around before + // they're removed. + QueueRetentionPeriod time.Duration
Maybe drop this one down to just `RetentionPeriod`. The additional "Queue" stutters with what's implied by the struct's name.
river
github_2023
go
301
riverqueue
brandur
@@ -792,6 +795,88 @@ func ExerciseExecutorFull[TTx any](ctx context.Context, t *testing.T, driver riv require.Equal(t, rivertype.JobStateCompleted, job.State) require.Equal(t, []string{"tag"}, job.Tags) }) + + // TODO(bgentry): these are probably in the wrong file or location within + // the file and should go somewhere else? + t.Run("JobFinalizedAtConstraint", func(t *testing.T) {
RE TODO: If you feel it may still be wrong, should probably try to find a new convention for where to put it. Should probably find a way to resolve this TODO before merge since it's fairly trivial.
river
github_2023
go
301
riverqueue
brandur
@@ -36,6 +36,14 @@ type JobOpts struct { func Job(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts *JobOpts) *rivertype.JobRow { tb.Helper() + job, err := exec.JobInsertFull(ctx, JobBuild(tb, opts)) + require.NoError(tb, err) + return job +} + +func JobBuild(tb testing.TB, opts *JobOpts) *riverdriver.JobInsertFullParams {
I was thinking about doing something like this before, but I think we need to establish a different convention, because as is, the naming suggests that this is a factory function for a "job build" entity. Maybe something like `Job_Build` or `Job_InsertParams`?
river
github_2023
go
301
riverqueue
brandur
@@ -111,3 +117,26 @@ var seq int64 = 1 //nolint:gochecknoglobals func nextSeq() int { return int(atomic.AddInt64(&seq, 1)) } + +type QueueOpts struct { + Metadata []byte + Name *string + UpdatedAt *time.Time +} + +func Queue(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts *QueueOpts) *rivertype.Queue { + tb.Helper() + + metadata := opts.Metadata + if opts.Metadata == nil { + metadata = []byte("{}") + } + + queue, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ + Metadata: metadata, + Name: ptrutil.ValOrDefault(opts.Name, "default"),
Could you change this to? ``` go Name: ptrutil.ValOrDefaultFunc(opts.Name, func() string { return fmt.Sprintf("queue_%02d", nextSeq()) }), ``` The basic idea with these factory functions is you can call them many times with no (or very few) opts and still get a new valid object back. See migrations above for example, which _could_ always returning migration version 1, but generates a sequence instead for caller convenience. This would also allow you to simplify your tests quite a bit, many of which have to override `Name` to work around this default.
river
github_2023
go
301
riverqueue
brandur
@@ -295,6 +332,51 @@ type insertPayload struct { Queue string `json:"queue"` } +func (p *producer) handleJobControlNotification(workCtx context.Context) func(notifier.NotificationTopic, string) {
Okay, so if I understand correctly, the "job control" topic is shared between (1) job cancel notifications, and (2) queue pause/resume notifications. It's not super clear to me what these have in common, except that they're both topics used by the producer. But job inserts are also topics shared by the producer, so by that logic, shouldn't they also share the same topic?
river
github_2023
go
301
riverqueue
brandur
@@ -283,6 +318,8 @@ type jobControlAction string const ( jobControlActionCancel jobControlAction = "cancel" + jobControlActionPause jobControlAction = "pause" + jobControlActionResume jobControlAction = "resume"
If all these keep sharing, we definitely want a rename of these at least, e.g.: `Cancel` -> `JobCancel` `Pause` -> `QueuePause` `Resume` -> `QueueResume`
river
github_2023
go
301
riverqueue
brandur
@@ -470,6 +577,84 @@ func (p *producer) handleWorkerDone(job *rivertype.JobRow) { p.jobResultCh <- job } +func (p *producer) pollForSettingChanges(ctx context.Context, lastPaused bool) { + ticker := time.NewTicker(p.config.QueueSettingsPollInterval) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + updatedQueue, err := p.fetchQueueSettings(ctx) + if err != nil { + p.Logger.ErrorContext(ctx, p.Name+": Error fetching queue settings", slog.String("err", err.Error())) + continue + } + shouldBePaused := (updatedQueue.PausedAt != nil) + if lastPaused != shouldBePaused { + action := jobControlActionPause + if !shouldBePaused { + action = jobControlActionResume + } + payload := &jobControlPayload{ + Action: action, + Queue: p.config.Queue, + } + p.Logger.InfoContext(ctx, p.Name+": Queue control state changed from polling", + slog.String("queue", p.config.Queue), + slog.String("action", string(action)), + slog.Bool("paused", shouldBePaused), + ) + + select { + case p.queueControlCh <- payload: + lastPaused = shouldBePaused + default: + p.Logger.WarnContext(ctx, p.Name+": Queue control notification dropped due to full buffer", slog.String("action", string(action))) + } + } + } + } +} + +func (p *producer) fetchQueueSettings(ctx context.Context) (*rivertype.Queue, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + return p.exec.QueueGet(ctx, p.config.Queue) +} + +func (p *producer) reportQueueStatusLoop(ctx context.Context) { + // TODO(bgentry): initial randmized sleep?
Can this be resolved? Maybe just use one of the cancellable sleep helpers with a small jitter even if we're not totally married to the exact timing. Feels better than leaving the TODO in.
river
github_2023
go
301
riverqueue
brandur
@@ -410,4 +438,126 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin startstoptest.Stress(ctx, t, producer) }) + + t.Run("QueuePausedBeforeStart", func(t *testing.T) { + t.Parallel() + + producer, bundle := setup(t) + AddWorker(bundle.workers, &noOpWorker{}) + + // TODO: maybe add a separate QueueInsertFull to driver to clean this up?
IMO: while there's some argument that jobs should have a fast path because we want them highly performant for benchmarks and the like, one extra insert parameter on queues is going to have ~zero practical performance impact. Maybe just add any desired ones to the normal insert path as optional parameters.
river
github_2023
go
301
riverqueue
brandur
@@ -0,0 +1,4 @@ +package river + +// QueuePauseOpts are optional settings for pausing or unpausing a queue.
I've been struggling with this myself just writing comments here, but for purposes of docs we should try to consolidate on one term. Either "resume" or "unpause", but try to stay consistent.
river
github_2023
others
301
riverqueue
brandur
@@ -1,5 +1,15 @@ -- name: PGAdvisoryXactLock :exec SELECT pg_advisory_xact_lock(@key); --- name: PGNotify :exec -SELECT pg_notify(@topic, @payload); \ No newline at end of file +-- name: PGNotifyMany :exec +WITH topic_to_notify AS ( + SELECT + concat(current_schema(), '.', @topic::text) AS topic, + unnest(@payload::text[]) AS payload +) + +SELECT pg_notify(
Minor, but any chance we could try to by convention omit the newlines between CTE cases? Every time I see a `SELECT` like this with a bare line above it, I read it as a standalone expression before realizing there's CTE(s) up there and I have to read upwards to get the whole context.
river
github_2023
others
301
riverqueue
brandur
@@ -290,11 +297,10 @@ river_job_scheduled AS ( WHERE river_job.id = jobs_to_schedule.id RETURNING * ) -SELECT count(*) -FROM ( - SELECT pg_notify(@insert_topic, json_build_object('queue', queue)::text) - FROM river_job_scheduled -) AS notifications_sent; +SELECT + queue, + scheduled_at
What do you think about having this return just the full job row instead? There's a minor performance implication, but keeps things more consistent and requires fewer bespoke types in the driver machinery.
river
github_2023
go
301
riverqueue
brandur
@@ -1646,6 +1729,256 @@ func ExerciseExecutorFull[TTx any](ctx context.Context, t *testing.T, driver riv require.FailNow(t, "Goroutine didn't finish in a timely manner") } } + + t.Run("QueueCreateOrSetUpdatedAt", func(t *testing.T) { + t.Run("InsertsANewQueueWithDefaultUpdatedAt", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + metadata := []byte(`{"foo": "bar"}`) + queue, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ + Metadata: metadata, + Name: "new-queue", + }) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), queue.CreatedAt, 500*time.Millisecond) + require.Equal(t, metadata, queue.Metadata) + require.Equal(t, "new-queue", queue.Name) + require.Nil(t, queue.PausedAt) + require.WithinDuration(t, time.Now(), queue.UpdatedAt, 500*time.Millisecond) + }) + + t.Run("UpdatesTheUpdatedAtOfExistingQueue", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + metadata := []byte(`{"foo": "bar"}`) + tBefore := time.Now().UTC() + queueBefore, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ + Metadata: metadata, + Name: "updateable-queue", + UpdatedAt: &tBefore, + }) + require.NoError(t, err) + require.WithinDuration(t, tBefore, queueBefore.UpdatedAt, time.Millisecond) + + tAfter := tBefore.Add(2 * time.Second) + queueAfter, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ + Metadata: []byte(`{"other": "metadata"}`), + Name: "updateable-queue", + UpdatedAt: &tAfter, + }) + require.NoError(t, err) + + // unchanged: + require.Equal(t, queueBefore.CreatedAt, queueAfter.CreatedAt) + require.Equal(t, metadata, queueAfter.Metadata) + require.Equal(t, "updateable-queue", queueAfter.Name) + require.Nil(t, queueAfter.PausedAt) + + // Timestamp is bumped: + require.WithinDuration(t, tAfter, queueAfter.UpdatedAt, time.Millisecond) + }) + }) + + t.Run("QueueDeleteExpired", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now() + _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue1"), UpdatedAt: ptrutil.Ptr(now)}) + queue2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue2"), UpdatedAt: ptrutil.Ptr(now.Add(-25 * time.Hour))}) + queue3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue3"), UpdatedAt: ptrutil.Ptr(now.Add(-26 * time.Hour))}) + queue4 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue4"), UpdatedAt: ptrutil.Ptr(now.Add(-48 * time.Hour))}) + _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue5"), UpdatedAt: ptrutil.Ptr(now.Add(-23 * time.Hour))}) + + horizon := now.Add(-24 * time.Hour) + deletedQueueNames, err := exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) + require.NoError(t, err) + + // queue2 and queue3 should be deleted, with queue4 being skipped due to max of 2: + require.Equal(t, []string{queue2.Name, queue3.Name}, deletedQueueNames) + + // Try again, make sure queue4 gets deleted this time: + deletedQueueNames, err = exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) + require.NoError(t, err) + + require.Equal(t, []string{queue4.Name}, deletedQueueNames) + }) + + t.Run("QueueGet", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`), Name: ptrutil.Ptr("queue1")}) + + queueFetched, err := exec.QueueGet(ctx, queue.Name) + require.NoError(t, err) + + require.WithinDuration(t, queue.CreatedAt, queueFetched.CreatedAt, time.Millisecond) + require.Equal(t, queue.Metadata, queueFetched.Metadata) + require.Equal(t, queue.Name, queueFetched.Name) + require.Nil(t, queueFetched.PausedAt) + require.WithinDuration(t, queue.UpdatedAt, queueFetched.UpdatedAt, time.Millisecond) + + queueFetched, err = exec.QueueGet(ctx, "nonexistent-queue") + require.ErrorIs(t, err, rivertype.ErrNotFound) + require.Nil(t, queueFetched) + }) + + t.Run("QueueList", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + requireQueuesEqual := func(t *testing.T, target, actual *rivertype.Queue) { + t.Helper() + require.WithinDuration(t, target.CreatedAt, actual.CreatedAt, time.Millisecond) + require.Equal(t, target.Metadata, actual.Metadata) + require.Equal(t, target.Name, actual.Name) + if target.PausedAt == nil { + require.Nil(t, actual.PausedAt) + } else { + require.NotNil(t, actual.PausedAt) + require.WithinDuration(t, *target.PausedAt, *actual.PausedAt, time.Millisecond) + } + } + + queues, err := exec.QueueList(ctx, 10, 0) + require.NoError(t, err) + require.Empty(t, queues) + + // Make queue1, pause it, refetch: + queue1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`), Name: ptrutil.Ptr("queue1")}) + require.NoError(t, exec.QueuePause(ctx, queue1.Name)) + queue1, err = exec.QueueGet(ctx, queue1.Name) + require.NoError(t, err) + + queue2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue2")}) + queue3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue3")}) + + queues, err = exec.QueueList(ctx, 2, 0) + require.NoError(t, err) + + require.Len(t, queues, 2) + requireQueuesEqual(t, queue1, queues[0]) + requireQueuesEqual(t, queue2, queues[1]) + + queues, err = exec.QueueList(ctx, 2, 2) + require.NoError(t, err) + + require.Len(t, queues, 1) + requireQueuesEqual(t, queue3, queues[0]) + }) + + t.Run("QueuePause", func(t *testing.T) { + t.Parallel() + + t.Run("ExistingQueue", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue1")}) + require.Nil(t, queue.PausedAt) + + require.NoError(t, exec.QueuePause(ctx, queue.Name)) + + queueFetched, err := exec.QueueGet(ctx, queue.Name) + require.NoError(t, err) + require.NotNil(t, queueFetched.PausedAt) + require.WithinDuration(t, time.Now(), *(queueFetched.PausedAt), 500*time.Millisecond) + }) + + t.Run("NonExistentQueue", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + err := exec.QueuePause(ctx, "queue1") + require.ErrorIs(t, err, rivertype.ErrNotFound) + }) + + t.Run("AllQueues", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + queue1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue1")}) + require.Nil(t, queue1.PausedAt) + queue2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Name: ptrutil.Ptr("queue2")}) + require.Nil(t, queue2.PausedAt) + + require.NoError(t, exec.QueuePause(ctx, "*"))
Could we use some kind of global constant for this, even if it's just for use inside River's internal code? I was trying to look for how wildcard pause/resume worked, and found this very awkward to search for. It's very short for one, there's no constant so you can't "find references", and "*" is a regex meaningful character of course, so searching for it is a little extra painful as you need to put in the right escapes.
river
github_2023
go
301
riverqueue
brandur
@@ -0,0 +1,162 @@ +package maintenance + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/maintenance/startstop" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/util/timeutil" + "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" +) + +const ( + QueueCleanerIntervalDefault = time.Hour + QueueRetentionPeriodDefault = 24 * time.Hour +) + +// Test-only properties. +type QueueCleanerTestSignals struct { + DeletedBatch rivercommon.TestSignal[struct{}] // notifies when runOnce finishes a pass +} + +func (ts *QueueCleanerTestSignals) Init() { + ts.DeletedBatch.Init() +} + +type QueueCleanerConfig struct { + // Interval is the amount of time to wait between runs of the cleaner. + Interval time.Duration + // QueueRetentionPeriod is the amount of time to keep queues around before + // they're removed. + QueueRetentionPeriod time.Duration +} + +func (c *QueueCleanerConfig) mustValidate() *QueueCleanerConfig { + if c.Interval <= 0 { + panic("QueueCleanerConfig.Interval must be above zero") + } + if c.QueueRetentionPeriod <= 0 { + panic("QueueCleanerConfig.QueueRetentionPeriod must be above zero") + } + + return c +} + +// QueueCleaner periodically removes queues from the river_queue table that have +// not been updated in a while, indicating that they are no longer active. +type QueueCleaner struct { + queueMaintainerServiceBase + startstop.BaseStartStop + + // exported for test purposes + Config *QueueCleanerConfig + TestSignals QueueCleanerTestSignals + + batchSize int // configurable for test purposes + exec riverdriver.Executor +} + +func NewQueueCleaner(archetype *baseservice.Archetype, config *QueueCleanerConfig, exec riverdriver.Executor) *QueueCleaner { + return baseservice.Init(archetype, &QueueCleaner{ + Config: (&QueueCleanerConfig{ + Interval: valutil.ValOrDefault(config.Interval, QueueCleanerIntervalDefault), + QueueRetentionPeriod: valutil.ValOrDefault(config.QueueRetentionPeriod, QueueRetentionPeriodDefault), + }).mustValidate(), + + batchSize: BatchSizeDefault, + exec: exec, + }) +} + +func (s *QueueCleaner) Start(ctx context.Context) error { + ctx, shouldStart, stopped := s.StartInit(ctx) + if !shouldStart { + return nil + } + + s.StaggerStart(ctx) + + go func() { + // This defer should come first so that it's last out, thereby avoiding + // races. + defer close(stopped) + + s.Logger.DebugContext(ctx, s.Name+logPrefixRunLoopStarted) + defer s.Logger.DebugContext(ctx, s.Name+logPrefixRunLoopStopped) + + ticker := timeutil.NewTickerWithInitialTick(ctx, s.Config.Interval) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + } + + res, err := s.runOnce(ctx) + if err != nil { + if !errors.Is(err, context.Canceled) { + s.Logger.ErrorContext(ctx, s.Name+": Error cleaning queues", slog.String("error", err.Error())) + } + continue + } + + s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully, + slog.Int("num_queues_deleted", len(res.QueuesDeleted)), + ) + } + }() + + return nil +} + +type queueCleanerRunOnceResult struct { + QueuesDeleted []string +} + +func (s *QueueCleaner) runOnce(ctx context.Context) (*queueCleanerRunOnceResult, error) { + res := &queueCleanerRunOnceResult{QueuesDeleted: make([]string, 0, 10)} + + for { + // Wrapped in a function so that defers run as expected. + queuesDeleted, err := func() ([]string, error) { + ctx, cancelFunc := context.WithTimeout(ctx, 30*time.Second) + defer cancelFunc() + + queuesDeleted, err := s.exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{ + Max: s.batchSize, + UpdatedAtHorizon: time.Now().Add(-s.Config.QueueRetentionPeriod), + })
Am I understanding correctly that when you use the global pause/resume of "*", you can never pause all queues for more than 24 hours? Unlike a normal queue, River clients won't report on the global queue "*", so it won't have it's `updated_at` field bumped unless the user pauses/resumes again. So after 24 hours it gets reaped here even if the user never resumed an initial pause.
river
github_2023
go
301
riverqueue
brandur
@@ -1423,6 +1486,101 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdrive return insertParams, nil } +func (c *Client[TTx]) maybeNotifyInsert(ctx context.Context, execTx riverdriver.ExecutorTx, state rivertype.JobState, queue string) error { + if state != rivertype.JobStateAvailable { + return nil + } + return c.maybeNotifyInsertForQueues(ctx, execTx, []string{queue}) +} + +func (c *Client[TTx]) maybeNotifyInsertMany(ctx context.Context, tx riverdriver.ExecutorTx, insertParams []*riverdriver.JobInsertFastParams) error { + queues := make([]string, 0, len(insertParams))
Small one, but thoughts on using a map instead of slice here? It probably doesn't matter for small numbers of insert many params, but for large ones (e.g. 1k or 10k like the benchmark does), you're allocating quite a large slice here unnecessarily since the cardinality of the queues in use is likely to be a tiny number like 1 or 2.
river
github_2023
others
301
riverqueue
brandur
@@ -7,6 +7,32 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +⚠️ Version 0.5.0 contains a new database migration, version 4. This migration is backward compatible with any River installation running the v3 migration. Be sure to run the v4 migration prior to deploying the code from this release. + +### Added + +- Add `pending` job state. This is currently unused, but will be used to build higher level functionality for staging jobs that are not yet ready to run (for some reason other than their scheduled time being in the future). Pending jobs will never be run or deleted and must first be moved to another state by external code. [PR #301](https://github.com/riverqueue/river/pull/301). +- Queue status tracking, pause and resume. [PR #301](https://github.com/riverqueue/river/pull/301). + + A useful operational lever is the ability to pause and resume a queue without shutting down clients. In addition to pause/resume being a feature request from #54, as part of the work on River's UI it's been useful to list out the active queues so that they can be displayed and manipulated.
Sorry for the super nit, but mind linking up 54 since it won't be in the rendered changelog. ```suggestion A useful operational lever is the ability to pause and resume a queue without shutting down clients. In addition to pause/resume being a feature request from [#54](https://github.com/riverqueue/river/pull/54), as part of the work on River's UI it's been useful to list out the active queues so that they can be displayed and manipulated. ```
river
github_2023
others
301
riverqueue
brandur
@@ -7,6 +7,32 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +⚠️ Version 0.5.0 contains a new database migration, version 4. This migration is backward compatible with any River installation running the v3 migration. Be sure to run the v4 migration prior to deploying the code from this release.
We may want to put together a little upgrade guide on the website because people's familiarity with the last time they migrated may have become long atrophied at this point (i.e. how to use the CLI, the fact that the CLI will need to be updated separately to know about the new migration, etc.).
river
github_2023
others
304
riverqueue
brandur
@@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed + +- JobList/JobListTx now support querying Jobs by a list of Job Kinds and States (breaking change). Also allows for filtering by specific timestamp values. Thank you Jos Kraaijeveld (@thatjos)! 🙏🏻 [PR #236](https://github.com/riverqueue/river/pull/236).
Been using this convention in the other PRs to indicate the breaking change: ```suggestion - **Breaking change:** JobList/JobListTx now support querying Jobs by a list of Job Kinds and States. Also allows for filtering by specific timestamp values. Thank you Jos Kraaijeveld (@thatjos)! 🙏🏻 [PR #236](https://github.com/riverqueue/river/pull/236). ```
river
github_2023
go
304
riverqueue
brandur
@@ -193,6 +190,23 @@ func (p *JobListParams) toDBParams() (*dblist.JobListParams, error) { return nil, errors.New("invalid sort order") } + if p.sortField == JobListOrderByFinalizedAt { + currentNonFinalizedStates := make([]rivertype.JobState, 0, len(p.states)) + for _, state := range p.states { + //nolint:exhaustive + switch state { + case JobStateCancelled, JobStateCompleted, JobStateDiscarded: + default: + currentNonFinalizedStates = append(currentNonFinalizedStates, state) + } + } + // This indicates the user overrode the States list with only non-finalized + // states prior to then requesting FinalizedAt ordering. + if len(currentNonFinalizedStates) == 0 { + return nil, errors.New("cannot order by finalized_at with non-finalized state filters")
Could we put the non-finalized states into the error message with a `%+v` or something? Just helps make the error quicker to resolve.
river
github_2023
others
308
riverqueue
bgentry
@@ -7,12 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.4.0] - 2024-04-20 + ### Changed +Version 0.4.0 comes with a number of small breaking changes which we've decided to release all as part of a single version. More breaking changes in one release is inconvenient, but we've tried to coordinate them in hopes that any future breaking changes will be non-existent or very rare. All changes will get picked up the Go compiler, and each one should be quite easy to fix. The changes don't apply to any of the most common core APIs, and likely many projects won't have to change any code.
Should we put a ⚠️ emoji or anything with this? It’s a rare scenario and we want it to be extra visible.
river
github_2023
go
307
riverqueue
bgentry
@@ -1600,26 +1617,60 @@ func Test_Client_JobList(t *testing.T) { job5 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) job6 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: &now}) - res, err := client.JobList(ctx, NewJobListParams().States(rivertype.JobStateAvailable).After(JobListCursorFromJob(job1, JobListOrderByTime))) + // + // JobListOrderByID
Do you think it's too much nesting to separate these into separate cases? it's definitely a lot of stuff for a single case IMO.
river
github_2023
go
236
riverqueue
brandur
@@ -1360,20 +1360,27 @@ func validateQueueName(queueName string) error { // if err != nil { // // handle error // } -func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) { +func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, *JobListCursor, error) {
Agreed that the code needed to get the next cursor is a little overly verbose at the moment, and furthermore you have to make a `len` check on jobs to make sure you're not indexing a 0-length slice. ``` go jobs, err := client.JobList(ctx, NewJobListParams().After(JobListCursorFromJob(jobs[len(jobs)-1]))) ``` If we were going to change this API though, I think I'd be tempted to make it more like a result struct, which is a bit more conventional, and a little more futureproof. e.g. ``` go type JobListResult struct { Jobs []*rivertype.JobRow NextCursor *JobListCursor ``` Thoughts?
river
github_2023
go
236
riverqueue
brandur
@@ -15,19 +15,36 @@ import ( // JobListCursor is used to specify a starting point for a paginated // job list query. type JobListCursor struct { - id int64 - kind string - queue string - time time.Time + id int64 + kind string + queue string + sortField JobListOrderByField + time time.Time } // JobListCursorFromJob creates a JobListCursor from a JobRow. -func JobListCursorFromJob(job *rivertype.JobRow) *JobListCursor { +func JobListCursorFromJob(job *rivertype.JobRow, sortField JobListOrderByField) *JobListCursor { + time := job.CreatedAt + switch sortField { + case JobListOrderByTime: + time = jobListTimeValue(job) + case JobListOrderByAttemptedAt: + if job.AttemptedAt != nil { + time = *job.AttemptedAt + } + case JobListOrderByFinalizedAt: + if job.FinalizedAt != nil { + time = *job.FinalizedAt + } + case JobListOrderByScheduledAt: + time = job.ScheduledAt + }
OOC, wasn't this `switch` in `jobListTimeValue` before (and isn't `jobListTimeValue` still in this file?)? Is moving it up just to remove a layer of indirection?
river
github_2023
go
236
riverqueue
bgentry
@@ -1351,6 +1351,13 @@ func validateQueueName(queueName string) error { return nil } +// JobListResult is the result of a job list operation. It contains a list of +// jobs and a cursor for fetching the next page of results. +type JobListResult struct { + Jobs []*rivertype.JobRow + Cursor *JobListCursor
Not sure it makes sense to output a single `Cursor` field here. There could be a cursor for _any_ of the entries in the `Jobs` list, so with this name it's not clear if it's the start cursor or end cursor. I doubt there's much use case for the start cursor, and people can always use the `JobListCursorFromJob` function to create one if they want. Maybe let's just name this `LastCursor` to be unambiguous?
river
github_2023
go
236
riverqueue
bgentry
@@ -90,12 +113,20 @@ const ( ) // JobListOrderByField specifies the field to sort by. -type JobListOrderByField int +type JobListOrderByField string const ( // JobListOrderByTime specifies that the sort should be by time. The specific - // time field used will vary by job state. - JobListOrderByTime JobListOrderByField = iota + // time field used will vary by the first specified job state. + JobListOrderByTime JobListOrderByField = "time" + // JobListOrderByCreatedAt specifies that the sort should be by created_at. + JobListOrderByCreatedAt JobListOrderByField = "created_at" + // JobListOrderByScheduledAt specifies that the sort should be by scheduled_at. + JobListOrderByScheduledAt JobListOrderByField = "scheduled_at" + // JobListOrderByAttemptedAt specifies that the sort should be by attempted_at. + JobListOrderByAttemptedAt JobListOrderByField = "attempted_at" + // JobListOrderByFinalizedAt specifies that the sort should be by finalized_at. + JobListOrderByFinalizedAt JobListOrderByField = "finalized_at"
Can we keep these sorted alphabetically?
river
github_2023
go
306
riverqueue
bgentry
@@ -111,17 +111,62 @@ type JobRow struct { Tags []string } -// JobState is the state of a job. Jobs start as `available` or `scheduled`, and -// if all goes well eventually transition to `completed` as they're worked. +// JobState is the state of a job. Jobs start their lifecycle as either +// JobStateAvailable or JobStateScheduled, and if all goes well, transition to +// JobStateCompleted after they're worked. type JobState string const ( + // JobStateAvailable is the state for jobs that are immediately eligible to + // be worked. JobStateAvailable JobState = "available" + + // JobStateCancelled is the state for jobs that have been manually cancelled + // by user request. + // + // Cancelled jobs are reaped by the job cleaner service after a configured + // amount of time (default 24 hours). JobStateCancelled JobState = "cancelled" + + // JobStateCompleted is the state for jobs that have successfully run to + // completion. + // + // Completed jobs are reaped by the job cleaner service after a configured + // amount of time (default 24 hours). JobStateCompleted JobState = "completed" + + // JobStateDiscarded is the state for jobs that have errored enough times + // that they're no longer eligible to be retried. Manual user invention + // is required for them to be tried again. + // + // Discarded jobs are reaped by the job cleaner service after a configured + // amount of time (default 7 days). JobStateDiscarded JobState = "discarded" + + // JobStateRetryable is the state for jobs that have errored, but will be + // retried. + // + // The job scheduler service changes them to JobStateAvailable when they're + // ready to be worked (their `scheduled_at` timestamp comes due). + // + // Jobs that will be retried very soon in the future may be changed to + // JobStateAvailable immediately instead of JobStateRetryable so that they + // don't have to wait for the job scheduler to run. JobStateRetryable JobState = "retryable" - JobStateRunning JobState = "running" + + // JobStateRunning are jobs which are actively running. + // + // If a worker dies unexpectedly while jobs were being worked, jobs will be
Might be worth clarifying the behavior here as this is specifically true when the client doesn’t have an opportunity to log the failure (program crash, hardware failure, or job that never returns from work).
river
github_2023
go
297
riverqueue
bgentry
@@ -0,0 +1,81 @@ +package rivertype_test + +import ( + "go/ast" + "go/parser" + "go/token" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/rivertype" +) + +func TestJobStates(t *testing.T) { + t.Parallel() + + jobStates := rivertype.JobStates() + + // One easy check that doesn't require the source file reading below. + require.Contains(t, jobStates, rivertype.JobStateAvailable) + + // Get all job state names from the corresponding source file and make sure + // they're included in JobStates. Helps check that we didn't add a new value + // but forgot to add it to the full list of constant values. + for _, nameAndValue := range allValuesForStringConstantType(t, "river_type.go", "JobState") {
Great idea!
river
github_2023
go
297
riverqueue
bgentry
@@ -0,0 +1,81 @@ +package rivertype_test + +import ( + "go/ast" + "go/parser" + "go/token" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/rivertype" +) + +func TestJobStates(t *testing.T) { + t.Parallel() + + jobStates := rivertype.JobStates() + + // One easy check that doesn't require the source file reading below. + require.Contains(t, jobStates, rivertype.JobStateAvailable) + + // Get all job state names from the corresponding source file and make sure + // they're included in JobStates. Helps check that we didn't add a new value + // but forgot to add it to the full list of constant values. + for _, nameAndValue := range allValuesForStringConstantType(t, "river_type.go", "JobState") { + t.Logf("Checking for job state: %s / %s", nameAndValue.Name, nameAndValue.Value) + require.Contains(t, jobStates, rivertype.JobState(nameAndValue.Value)) + } +} + +// stringConstantNameAndValue is a name and value for a string constant like +// `JobStateAvailable` + `available`. +type stringConstantNameAndValue struct{ Name, Value string } + +// allValuesForStringConstantType reads a Go source file and looks for all +// values for the named string constant. +func allValuesForStringConstantType(t *testing.T, srcFile, typeName string) []stringConstantNameAndValue { + t.Helper() + + fset := token.NewFileSet() + + src, err := os.ReadFile(srcFile) + require.NoError(t, err) + + f, err := parser.ParseFile(fset, srcFile, src, parser.ParseComments) + require.NoError(t, err) + + var valueNames []stringConstantNameAndValue + + for _, decl := range f.Decls { + if gen, ok := decl.(*ast.GenDecl); ok && gen.Tok == token.CONST { + for _, spec := range gen.Specs { + // Always ast.ValueSpec for token.CONST. + valueSpec := spec.(*ast.ValueSpec) //nolint:forcetypeassert + + typeIdent, ok := valueSpec.Type.(*ast.Ident) + if !ok || typeIdent.Name != typeName { + continue + } + + for i, nameIdent := range valueSpec.Names { + // Force type assert because we expect one of our constants + // to be defined as a basic type literal like this. + basicLitExpr := valueSpec.Values[i].(*ast.BasicLit) //nolint:forcetypeassert + + valueNames = append(valueNames, stringConstantNameAndValue{ + Name: nameIdent.Name, + Value: basicLitExpr.Value[1 : len(basicLitExpr.Value)-1], // strip quote on either side + }) + } + } + } + } + + if len(valueNames) < 1 { + require.FailNow(t, "Not values found", "No values found for source file and constant type: %s / %s", srcFile, typeName)
typo? ```suggestion require.FailNow(t, "No values found", "No values found for source file and constant type: %s / %s", srcFile, typeName) ```
river
github_2023
go
281
riverqueue
bgentry
@@ -180,34 +180,47 @@ func runNewTestClient(ctx context.Context, t *testing.T, config *Config) *Client return client } -func Test_Client(t *testing.T) { +func Test_Client_Standard(t *testing.T) {
were you originally renaming this because you planned to have a separate poll-only test block? As of now it looks like you just added a single case in here and don't need to rename this.
river
github_2023
go
281
riverqueue
bgentry
@@ -13,13 +13,19 @@ import ( "github.com/riverqueue/river/internal/notifier" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/internal/util/valutil" "github.com/riverqueue/river/riverdriver" ) const ( - electInterval = 5 * time.Second - electInteralJitter = 1 * time.Second - electIntervalTTLPadding = 10 * time.Second + electIntervalDefault = 5 * time.Second + electIntervalJitterDefault = 1 * time.Second + electIntervalTTLPaddingDefault = 10 * time.Second + + // TODO: for now we only support a single instance per database/schema. + // If we want to provide isolation within a single database/schema, + // we'll need to add a client config for this. + instanceNameDefault = "default"
It goes away with app-level notifications anyway. Will be a small conflict but no worries.
river
github_2023
go
281
riverqueue
bgentry
@@ -210,7 +210,7 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error { fetchLimiter.Call() } // TODO(brandur): Get rid of this retry loop after refactor. - insertSub, err = notifier.ListenRetryLoop(fetchCtx, &p.BaseService, p.config.Notifier, notifier.NotificationTopicInsert, handleInsertNotification) + insertSub, err = p.config.Notifier.Listen(fetchCtx, notifier.NotificationTopicInsert, handleInsertNotification)
comment above still relevant?
river
github_2023
go
281
riverqueue
bgentry
@@ -241,7 +241,7 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error { ) } // TODO(brandur): Get rid of this retry loop after refactor. - jobControlSub, err = notifier.ListenRetryLoop(fetchCtx, &p.BaseService, p.config.Notifier, notifier.NotificationTopicJobControl, handleJobControlNotification) + jobControlSub, err = p.config.Notifier.Listen(fetchCtx, notifier.NotificationTopicJobControl, handleJobControlNotification)
comment above still relevant?
river
github_2023
go
288
riverqueue
bgentry
@@ -0,0 +1,189 @@ +package river + +import ( + "time" + + "github.com/riverqueue/river/internal/dbunique" + "github.com/riverqueue/river/internal/maintenance" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" +) + +// PeriodicSchedule is a schedule for a periodic job. Periodic jobs should +// generally have an interval of at least 1 minute, and never less than one +// second. +type PeriodicSchedule interface { + // Next returns the next time at which the job should be run given the + // current time. + Next(current time.Time) time.Time +} + +// PeriodicJobConstructor is a function that gets called each time the paired +// PeriodicSchedule is triggered. +// +// A constructor must never block. It may return nil to indicate that no job +// should be inserted. +type PeriodicJobConstructor func() (JobArgs, *InsertOpts) + +// PeriodicJob is a configuration for a periodic job. +type PeriodicJob struct { + constructorFunc PeriodicJobConstructor + opts *PeriodicJobOpts + scheduleFunc PeriodicSchedule +} + +// PeriodicJobOpts are options for a periodic job. +type PeriodicJobOpts struct { + // RunOnStart can be used to indicate that a periodic job should insert an + // initial job as a new scheduler is started. This can be used as a hedge + // for jobs with longer scheduled durations that may not get to expiry + // before a new scheduler is elected. + // + // RunOnStart also applies when a new periodic job is added dynamically with + // `PeriodicJobs().Add` or `PeriodicJobs().AddMany`. Jobs added this way + // with RunOnStart set to true are inserted once, then continue with their + // normal run schedule. + RunOnStart bool +} + +// NewPeriodicJob returns a new PeriodicJob given a schedule and a constructor +// function. +// +// The schedule returns a time until the next time the periodic job should run. +// The helper PeriodicInterval is available for jobs that should run on simple, +// fixed intervals (e.g. every 15 minutes), and a custom schedule or third party +// cron package can be used for more complex scheduling (see the cron example). +// The constructor function is invoked each time a periodic job's schedule +// elapses, returning job arguments to insert along with optional insertion +// options. +// +// The periodic job scheduler is approximate and doesn't guarantee strong +// durability. It's started by the elected leader in a River cluster, and each +// periodic job is assigned an initial run time when that occurs. New run times +// are scheduled each time a job's target run time is reached and a new job +// inserted. However, each scheduler only retains in-memory state, so anytime a +// process quits or a new leader is elected, the whole process starts over +// without regard for the state of the last scheduler. The RunOnStart option +// can be used as a hedge to make sure that jobs with long run durations are +// guaranteed to occasionally run. +func NewPeriodicJob(scheduleFunc PeriodicSchedule, constructorFunc PeriodicJobConstructor, opts *PeriodicJobOpts) *PeriodicJob { + return &PeriodicJob{ + constructorFunc: constructorFunc, + opts: opts, + scheduleFunc: scheduleFunc, + } +} + +type periodicIntervalSchedule struct { + interval time.Duration +} + +// PeriodicInterval returns a simple PeriodicSchedule that runs at the given +// interval. +func PeriodicInterval(interval time.Duration) PeriodicSchedule { + return &periodicIntervalSchedule{interval} +} + +func (s *periodicIntervalSchedule) Next(t time.Time) time.Time { + return t.Add(s.interval) +} + +// PeriodicJobBundle is a bundle of currently configured periodic jobs. It's +// made accessible through Client, where new periodic jobs can be configured, +// and only ones removed. +type PeriodicJobBundle struct { + periodicJobEnqueuer *maintenance.PeriodicJobEnqueuer +} + +func newPeriodicJobBundle(periodicJobEnqueuer *maintenance.PeriodicJobEnqueuer) *PeriodicJobBundle { + return &PeriodicJobBundle{ + periodicJobEnqueuer: periodicJobEnqueuer, + } +} + +// Adds a new periodic job to the client. The job is queued immediately if +// RunOnStart is enabled, and then scheduled normally. +// +// Returns a periodic job handle which can be used to subsequently remove the +// job if desired. +// +// Adding or removing periodic jobs has no effect unless this client is elected +// leader because only the leader enqueues periodic jobs. To make sure that a +// new periodic job is fully enabled or disabled, it should be added or removed +// from _every_ active River client across all processes. +func (b *PeriodicJobBundle) Add(periodicJob *PeriodicJob) rivertype.PeriodicJobHandle { + return b.periodicJobEnqueuer.Add(b.toInternal(periodicJob)) +} + +// AddMany adds many new periodic jobs to the client. The jobs are queued +// immediately if their RunOnStart is enabled, and then scheduled normally. +// +// Returns a periodic job handle which can be used to subsequently remove the +// job if desired. +// +// Adding or removing periodic jobs has no effect unless this client is elected +// leader because only the leader enqueues periodic jobs. To make sure that a +// new periodic job is fully enabled or disabled, it should be added or removed +// from _every_ active River client across all processes. +func (b *PeriodicJobBundle) AddMany(periodicJobs []*PeriodicJob) []rivertype.PeriodicJobHandle { + return b.periodicJobEnqueuer.AddMany(sliceutil.Map(periodicJobs, b.toInternal)) +} + +// Clear clears all periodic jobs, cancelling all scheduled runs. +// +// Adding or removing periodic jobs has no effect unless this client is elected +// leader because only the leader enqueues periodic jobs. To make sure that a +// new periodic job is fully enabled or disabled, it should be added or removed +// from _every_ active River client across all processes. +func (b *PeriodicJobBundle) Clear() { + b.periodicJobEnqueuer.Clear() +} + +// Remove removes a periodic job, cancelling all scheduled runs. +// +// Requires the use of the periodic job handle that was returned when the job +// was added. +// +// Adding or removing periodic jobs has no effect unless this client is elected +// leader because only the leader enqueues periodic jobs. To make sure that a +// new periodic job is fully enabled or disabled, it should be added or removed +// from _every_ active River client across all processes. +func (b *PeriodicJobBundle) Remove(periodicJobHandle rivertype.PeriodicJobHandle) { + b.periodicJobEnqueuer.Remove(periodicJobHandle) +} + +// Remove removes many periodic jobs, cancelling all scheduled runs. +// +// Requires the use of the periodic job handles that were returned when the jobs +// were added. +// +// Adding or removing periodic jobs has no effect unless this client is elected +// leader because only the leader enqueues periodic jobs. To make sure that a +// new periodic job is fully enabled or disabled, it should be added or removed +// from _every_ active River client across all processes. +func (b *PeriodicJobBundle) RemoveManyAll(periodicJobHandles []rivertype.PeriodicJobHandle) {
Any particular reason for the `All` suffix? I feel like `RemoveMany` would suffice here.
river
github_2023
go
288
riverqueue
bgentry
@@ -264,39 +259,226 @@ func TestPeriodicJobEnqueuer(t *testing.T) { svc, _ := setup(t) - svc.periodicJobs = []*PeriodicJob{ - {ScheduleFunc: periodicIntervalSchedule(time.Microsecond), ConstructorFunc: jobConstructorFunc("periodic_job_1us", false)}, - } + svc.Add(&PeriodicJob{ScheduleFunc: periodicIntervalSchedule(time.Microsecond), ConstructorFunc: jobConstructorFunc("periodic_job_1us", false)}) // make a longer list of jobs so the loop has to run for longer for i := 1; i < 100; i++ { - svc.periodicJobs = append(svc.periodicJobs, - &PeriodicJob{ - ScheduleFunc: periodicIntervalSchedule(time.Duration(i) * time.Hour), - ConstructorFunc: jobConstructorFunc(fmt.Sprintf("periodic_job_%dh", i), false), - }, - ) + svc.Add(&PeriodicJob{ + ScheduleFunc: periodicIntervalSchedule(time.Duration(i) * time.Hour), + ConstructorFunc: jobConstructorFunc(fmt.Sprintf("periodic_job_%dh", i), false), + }) } - require.NoError(t, svc.Start(ctx)) + startService(t, svc) svc.TestSignals.EnteredLoop.WaitOrTimeout() - periodicJobs := make([]*PeriodicJob, len(svc.periodicJobs)) - copy(periodicJobs, svc.periodicJobs) - for i := 0; i < 100; i++ { svc.TestSignals.InsertedJobs.WaitOrTimeout() } }) + t.Run("ConfigurableViaConstructor", func(t *testing.T) { + t.Parallel() + + _, bundle := setup(t) + + svc := NewPeriodicJobEnqueuer( + riverinternaltest.BaseServiceArchetype(t), + &PeriodicJobEnqueuerConfig{ + PeriodicJobs: []*PeriodicJob{ + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false), RunOnStart: true}, + {ScheduleFunc: periodicIntervalSchedule(1500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_1500ms", false), RunOnStart: true}, + }, + }, bundle.exec) + svc.StaggerStartupDisable(true) + svc.TestSignals.Init() + + startService(t, svc) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_1500ms", 1) + }) + + t.Run("AddAfterStart", func(t *testing.T) { + t.Parallel() + + svc, bundle := setup(t) + + startService(t, svc) + + svc.Add( + &PeriodicJob{ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, + ) + svc.Add( + &PeriodicJob{ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_start", false), RunOnStart: true}, + ) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 2) + }) + + t.Run("AddManyAfterStart", func(t *testing.T) { + t.Parallel() + + svc, bundle := setup(t) + + startService(t, svc) + + svc.AddMany([]*PeriodicJob{ + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_start", false), RunOnStart: true}, + }) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 2) + }) + + t.Run("ClearAfterStart", func(t *testing.T) { + t.Parallel() + + svc, bundle := setup(t) + + startService(t, svc) + + handles := svc.AddMany([]*PeriodicJob{ + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_start", false), RunOnStart: true}, + }) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + + svc.Clear() + + handleAfterClear := svc.Add( + &PeriodicJob{ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_new", false)}, + ) + + // Handles are not reused. + require.NotEqual(t, handles[0], handleAfterClear) + require.NotEqual(t, handles[1], handleAfterClear) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) // same as before + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) // same as before + requireNJobs(t, bundle.exec, "periodic_job_500ms_new", 1) // new row + }) + + t.Run("RemoveAfterStart", func(t *testing.T) { + t.Parallel() + + svc, bundle := setup(t) + + startService(t, svc) + + handles := svc.AddMany([]*PeriodicJob{ + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_start", false), RunOnStart: true}, + }) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + + svc.Remove(handles[1]) + + // Each is one because the second job was removed before it was worked + // again. + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + }) + + t.Run("RemoveManyAfterStart", func(t *testing.T) { + t.Parallel() + + svc, bundle := setup(t) + + startService(t, svc) + + handles := svc.AddMany([]*PeriodicJob{ + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_other", false)}, + {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms_start", false), RunOnStart: true}, + }) + + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_other", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + + svc.RemoveMany([]rivertype.PeriodicJobHandle{handles[1], handles[2]}) + + // Each is one because the second job was removed before it was worked + // again. + svc.TestSignals.InsertedJobs.WaitOrTimeout() + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms_other", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + }) + + // To suss out any race conditions in the add/remove/clear/run loop code, + // and interactions between them. + t.Run("AddRemoveStress", func(t *testing.T) { + t.Parallel() + + svc, _ := setup(t) + + var wg sync.WaitGroup + + randomSleep := func() time.Duration { + return time.Duration(randutil.IntBetween(svc.Rand, 1, 5)) * time.Millisecond + } + + for i := 0; i < 10; i++ { + wg.Add(1) + + jobBaseName := fmt.Sprintf("periodic_job_1ms_%02d", i) + + go func() { + defer wg.Done() + + for j := 0; j < 50; j++ { + handle := svc.Add(&PeriodicJob{ScheduleFunc: periodicIntervalSchedule(time.Millisecond), ConstructorFunc: jobConstructorFunc(jobBaseName, false)}) + randomSleep() + + svc.Add(&PeriodicJob{ScheduleFunc: periodicIntervalSchedule(time.Millisecond), ConstructorFunc: jobConstructorFunc(jobBaseName+"_second", false)}) + randomSleep() + + svc.Remove(handle) + randomSleep() + + svc.Clear() + randomSleep() + } + }() + } + + wg.Wait() + }) + t.Run("NoJobsConfigured", func(t *testing.T) { t.Parallel() svc, _ := setup(t) - svc.periodicJobs = []*PeriodicJob{} + // TODO: How about just don't configure the jobs in the first place.
missed TODO?
river
github_2023
go
284
riverqueue
bgentry
@@ -168,7 +168,11 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { continue } - periodicJob.nextRunAt = periodicJob.ScheduleFunc(now) + // Although we may have inserted a new job a little + // assertively due to the margin applied above, try to stay
```suggestion // preemptively due to the margin applied above, try to stay ```
river
github_2023
go
280
riverqueue
bgentry
@@ -44,12 +44,24 @@ func TestReindexer(t *testing.T) { svc := NewReindexer(archetype, &ReindexerConfig{ ScheduleFunc: fromNow(500 * time.Millisecond), }, bundle.exec) + svc.StaggerStartupDisable(true) svc.TestSignals.Init() t.Cleanup(svc.Stop) return svc, bundle } + runImmediatelyThanOnceAnHour := func() func(time.Time) time.Time {
```suggestion runImmediatelyThenOnceAnHour := func() func(time.Time) time.Time { ```
river
github_2023
go
280
riverqueue
bgentry
@@ -44,12 +44,24 @@ func TestReindexer(t *testing.T) { svc := NewReindexer(archetype, &ReindexerConfig{ ScheduleFunc: fromNow(500 * time.Millisecond), }, bundle.exec) + svc.StaggerStartupDisable(true) svc.TestSignals.Init() t.Cleanup(svc.Stop) return svc, bundle } + runImmediatelyThanOnceAnHour := func() func(time.Time) time.Time { + alreadyRan := false + return func(t time.Time) time.Time { + if alreadyRan { + return t.Add(time.Hour) + } + alreadyRan = true + return t.Add(time.Millisecond) + }
does `TickerWithInitialTick` not work here?
river
github_2023
go
280
riverqueue
bgentry
@@ -48,14 +41,6 @@ type Archetype struct { TimeNowUTC func() time.Time } -// WithSleepDisabled disables sleep in services that are using `BaseService`'s -// `CancellableSleep` functions and returns the archetype for convenience. Use -// of this is only appropriate in tests. -func (a *Archetype) WithSleepDisabled() *Archetype {
It does seem like a bit of a step back to spread out the problematic behavior into more places, but whatever you think is the best option here.