repo_name stringlengths 1 62 | dataset stringclasses 1 value | lang stringclasses 11 values | pr_id int64 1 20.1k | owner stringlengths 2 34 | reviewer stringlengths 2 39 | diff_hunk stringlengths 15 262k | code_review_comment stringlengths 1 99.6k |
|---|---|---|---|---|---|---|---|
river | github_2023 | go | 266 | riverqueue | bgentry | @@ -1,26 +1,403 @@
+// testdbman is a command-line tool for managing the test databases used by
+// parallel tests and the sample applications.
package main
import (
+ "context"
+ "errors"
+ "flag"
"fmt"
+ "io"
"os"
+ "runtime"
+ "slices"
+ "strings"
+ "time"
- "github.com/spf13/cobra"
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgxpool"
+
+ "github.com/riverqueue/river/internal/util/maputil"
+ "github.com/riverqueue/river/riverdriver/riverpgxv5"
+ "github.com/riverqueue/river/rivermigrate"
)
-// testdbman is a command-line tool for managing the test databases used by
-// parallel tests and the sample applications.
+func main() {
+ commandBundle := NewCommandBundle(
+ "testdbman",
+ "testdbman manages test databases",
+ `
+A small program to create and manage test databases. River currently requires a
+number of different of test databases loaded with its schema for it to be able
+to run the full test suite in parallel.
-var rootCmd = &cobra.Command{ //nolint:gochecknoglobals
- Use: "testdbman",
- Short: "testdbman manages test databases",
- Run: func(cmd *cobra.Command, args []string) {
- _ = cmd.Usage()
- },
-}
+Run "testdbman create" to raise all required test databases and prepare for a
+test run.
+ `,
+ )
-func main() {
- if err := rootCmd.Execute(); err != nil {
- fmt.Printf("failed: %s\n", err)
+ // create
+ {
+ commandBundle.AddCommand(
+ "create",
+ "Create test databases",
+ `
+Creates the test databases used by parallel tests and the sample applications.
+Each is migrated with River's current schema.
+
+The sample application DB is named river_testdb, while the DBs for parallel
+tests are named river_testdb_0, river_testdb_1, etc. up to the larger of 4 or
+runtime.NumCPU() (a choice that comes from pgx's default connection pool size).
+`,
+ createTestDatabases,
+ )
+ }
+
+ // drop
+ {
+ commandBundle.AddCommand(
+ "drop",
+ "Drop test databases",
+ `
+Drops all test databases. Any test database matching the base name
+(river_testdb) or the base name with an underscore followed by any other token
+(river_testdb_example, river_testdb_0, river_testdb_1, etc.) will be dropped.
+`,
+ dropTestDatabases,
+ )
+ }
+
+ // reset
+ {
+ commandBundle.AddCommand(
+ "reset",
+ "Drop and recreate test databases",
+ `
+Reset the test databases, dropping the existing database(s) if they exist, and
+recreating them with the most up to date schema. Equivalent to running "drop"
+followed by "create".
+`,
+ resetTestDatabases,
+ )
+ }
+
+ ctx := context.Background()
+
+ if err := commandBundle.Exec(ctx, os.Args); err != nil {
+ fmt.Fprintf(os.Stderr, "failed: "+err.Error()+"\n")
os.Exit(1)
}
}
+
+//
+// Commands
+//
+
+const managementDatabaseURL = "postgres:///postgres"
+
+//
+// Helpers
+//
+
+func createTestDatabases(ctx context.Context, out io.Writer) error {
+ mgmtConn, err := pgx.Connect(ctx, managementDatabaseURL)
+ if err != nil {
+ return fmt.Errorf("error opening management connection: %w", err)
+ }
+ defer mgmtConn.Close(ctx)
+
+ createDBAndMigrate := func(dbName string) error {
+ if _, err := mgmtConn.Exec(ctx, "CREATE DATABASE "+dbName); err != nil {
+ return fmt.Errorf("error crating database %q: %w", dbName, err)
+ }
+ fmt.Fprintf(out, "created: %-20s", dbName)
+
+ // Defer printing a newline, which will be either added to the end of a
+ // successful invocation of this command (after the string "[and
+ // migrated]" has been printed to the current line), or printed before
+ // returning an error so that in either case output looks right.
+ defer fmt.Fprintf(out, "\n")
+
+ dbURL := "postgres:///" + dbName
+
+ dbPool, err := pgxpool.New(ctx, dbURL)
+ if err != nil {
+ return fmt.Errorf("error creating connection pool to %q: %w", dbURL, err)
+ }
+ defer dbPool.Close()
+
+ migrator := rivermigrate.New(riverpgxv5.New(dbPool), nil)
+ if _, err = migrator.Migrate(ctx, rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{}); err != nil {
+ return err
+ }
+ fmt.Fprintf(out, " [and migrated]")
+
+ return nil
+ }
+
+ dbNames := generateTestDBNames(max(4, runtime.NumCPU()))
+
+ for _, dbName := range dbNames {
+ if err := createDBAndMigrate(dbName); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func generateTestDBNames(numDBs int) []string {
+ dbNames := []string{
+ "river_testdb",
+ "river_testdb_example",
+ }
+
+ // This is the same default as pgxpool's maximum number of connections
+ // when not specified -- either 4 or the number of CPUs, whichever is
+ // greater. If changing this number, also change the similar value in
+ // `riverinternaltest` where it's duplicated. | This comment is probably better placed above line 138 where the `numDBs` is calculated |
river | github_2023 | go | 272 | riverqueue | bgentry | @@ -575,146 +571,154 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client
// jobs, but will also cancel the context for any currently-running jobs. If
// using StopAndCancel, there's no need to also call Stop.
func (c *Client[TTx]) Start(ctx context.Context) error {
- if !c.config.willExecuteJobs() {
- return errors.New("client Queues and Workers must be configured for a client to start working")
- }
- if c.config.Workers != nil && len(c.config.Workers.workersMap) < 1 {
- return errors.New("at least one Worker must be added to the Workers bundle")
+ fetchCtx, shouldStart, stopped := c.baseStartStop.StartInit(ctx)
+ if !shouldStart {
+ return nil
}
- // Before doing anything else, make an initial connection to the database to
- // verify that it appears healthy. Many of the subcomponents below start up
- // in a goroutine and in case of initial failure, only produce a log line,
- // so even in the case of a fundamental failure like the database not being
- // available, the client appears to have started even though it's completely
- // non-functional. Here we try to make an initial assessment of health and
- // return quickly in case of an apparent problem.
- _, err := c.driver.GetExecutor().Exec(ctx, "SELECT 1")
- if err != nil {
- return fmt.Errorf("error making initial connection to database: %w", err)
- }
+ c.stopped = stopped
- // In case of error, stop any services that might have started. This
- // is safe because even services that were never started will still
- // tolerate being stopped.
- stopServicesOnError := func() {
- startstop.StopAllParallel(c.services)
- c.monitor.Stop()
+ stopProducers := func() {
+ startstop.StopAllParallel(sliceutil.Map(
+ maputil.Values(c.producersByQueueName),
+ func(p *producer) startstop.Service { return p }),
+ )
}
- // Monitor should be the first subprocess to start, and the last to stop.
- // It's not part of the waitgroup because we need to wait for everything else
- // to shut down prior to closing the monitor.
- //
- // Unlike other services, it's given a background context so that it doesn't
- // cancel on normal stops.
- if err := c.monitor.Start(context.Background()); err != nil { //nolint:contextcheck
- return err
- }
+ var workCtx context.Context
- if c.completer != nil {
- // The completer is part of the services list below, but although it can
- // stop gracefully along with all the other services, it needs to be
- // started with a context that's _not_ fetchCtx. This ensures that even
- // when fetch is cancelled on shutdown, the completer is still given a
- // separate opportunity to start stopping only after the producers have
- // finished up and returned.
- if err := c.completer.Start(ctx); err != nil {
- stopServicesOnError()
- return err
+ // Startup code. Wrapped in a closure so it doesn't have to remember to
+ // close the stopped channel if returning with an error.
+ if err := func() error {
+ if !c.config.willExecuteJobs() {
+ return errors.New("client Queues and Workers must be configured for a client to start working")
+ }
+ if c.config.Workers != nil && len(c.config.Workers.workersMap) < 1 {
+ return errors.New("at least one Worker must be added to the Workers bundle")
}
- // Receives job complete notifications from the completer and
- // distributes them to any subscriptions.
- c.completer.Subscribe(c.distributeJobCompleterCallback)
- }
-
- // We use separate contexts for fetching and working to allow for a graceful
- // stop. However, both inherit from the provided context so if it is
- // cancelled a more aggressive stop will be initiated.
- fetchCtx, fetchWorkCancel := context.WithCancelCause(ctx)
- c.fetchWorkCancel = fetchWorkCancel
- workCtx, workCancel := context.WithCancelCause(withClient[TTx](ctx, c))
- c.workCancel = workCancel
-
- for _, service := range c.services {
- // TODO(brandur): Reevaluate the use of fetchNewWorkCtx here. It's
- // currently necessary to speed up shutdown so that all services start
- // shutting down before having to wait for the producers to finish, but
- // as stopping becomes more normalized (hopefully by making the client
- // itself a start/stop service), we can likely accomplish that in a
- // cleaner way.
- if err := service.Start(fetchCtx); err != nil {
- stopServicesOnError()
- if errors.Is(context.Cause(ctx), rivercommon.ErrShutdown) {
- return nil
- }
- return err
+ // Before doing anything else, make an initial connection to the database to
+ // verify that it appears healthy. Many of the subcomponents below start up
+ // in a goroutine and in case of initial failure, only produce a log line,
+ // so even in the case of a fundamental failure like the database not being
+ // available, the client appears to have started even though it's completely
+ // non-functional. Here we try to make an initial assessment of health and
+ // return quickly in case of an apparent problem.
+ _, err := c.driver.GetExecutor().Exec(fetchCtx, "SELECT 1")
+ if err != nil {
+ return fmt.Errorf("error making initial connection to database: %w", err)
}
- }
- for _, producer := range c.producersByQueueName {
- producer := producer
+ // In case of error, stop any services that might have started. This
+ // is safe because even services that were never started will still
+ // tolerate being stopped.
+ stopServicesOnError := func() {
+ startstop.StopAllParallel(c.services)
+ c.monitor.Stop()
+ }
- if err := producer.StartWorkContext(fetchCtx, workCtx); err != nil {
+ // Monitor should be the first subprocess to start, and the last to stop.
+ // It's not part of the waitgroup because we need to wait for everything else
+ // to shut down prior to closing the monitor.
+ //
+ // Unlike other services, it's given a background context so that it doesn't
+ // cancel on normal stops.
+ if err := c.monitor.Start(context.Background()); err != nil { //nolint:contextcheck
return err
}
- }
- go func() {
- <-fetchCtx.Done()
- c.signalStopComplete(ctx)
- }()
+ if c.completer != nil {
+ // The completer is part of the services list below, but although it can
+ // stop gracefully along with all the other services, it needs to be
+ // started with a context that's _not_ fetchCtx. This ensures that even
+ // when fetch is cancelled on shutdown, the completer is still given a
+ // separate opportunity to start stopping only after the producers have
+ // finished up and returned.
+ if err := c.completer.Start(ctx); err != nil {
+ stopServicesOnError()
+ return err
+ }
- c.baseService.Logger.InfoContext(workCtx, "River client successfully started", slog.String("client_id", c.ID()))
+ // Receives job complete notifications from the completer and
+ // distributes them to any subscriptions.
+ c.completer.Subscribe(c.distributeJobCompleterCallback)
+ }
- return nil
-}
+ // We use separate contexts for fetching and working to allow for a graceful
+ // stop. Both inherit from the provided context, so if it's cancelled, a
+ // more aggressive stop will be initiated.
+ workCtx, c.workCancel = context.WithCancelCause(withClient[TTx](ctx, c))
-// ctx is used only for logging, not for lifecycle.
-func (c *Client[TTx]) signalStopComplete(ctx context.Context) {
- for _, producer := range c.producersByQueueName {
- producer.Stop()
- }
+ for _, service := range c.services {
+ if err := service.Start(fetchCtx); err != nil {
+ stopServicesOnError()
+ return err
+ }
+ }
- // Stop all mainline services where stop order isn't important.
- startstop.StopAllParallel(append(
- // This list of services contains the completer, which should always
- // stop after the producers so that any remaining work that was enqueued
- // will have a chance to have its state completed as it finishes.
- //
- // TODO: there's a risk here that the completer is stuck on a job that
- // won't complete. We probably need a timeout or way to move on in those
- // cases.
- c.services,
+ for _, producer := range c.producersByQueueName {
+ producer := producer
- // Will only be started if this client was leader, but can tolerate a stop
- // without having been started.
- c.queueMaintainer,
- ))
+ if err := producer.StartWorkContext(fetchCtx, workCtx); err != nil {
+ stopProducers()
+ stopServicesOnError()
+ return err
+ }
+ }
- c.baseService.Logger.InfoContext(ctx, c.baseService.Name+": All services stopped")
+ return nil
+ }(); err != nil {
+ defer close(stopped)
+ if errors.Is(context.Cause(fetchCtx), startstop.ErrStop) {
+ return rivercommon.ErrShutdown
+ }
+ return err
+ }
- // As of now, the Adapter doesn't have any async behavior, so we don't need
- // to wait for it to stop. Once all executors and completers are done, we
- // know that nothing else is happening that's from us.
+ go func() {
+ defer close(stopped) | It might be more readable and easily debuggable if some of these closures were split into internal client methods. Thinking in particular of the potential for a trace to have anonymous goroutines rather than named methods.
Not a huge concern but it might be worth trying to shuffle some of this around to improve the readability of `Start()`? Particularly here and in the large startup closure above. |
river | github_2023 | go | 272 | riverqueue | bgentry | @@ -2564,8 +2574,8 @@ func Test_Client_InsertTriggersImmediateWork(t *testing.T) {
ctx := context.Background()
require := require.New(t)
- ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
- t.Cleanup(cancel)
+ // ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ // t.Cleanup(cancel) | Mistakenly commented? |
river | github_2023 | go | 272 | riverqueue | bgentry | @@ -79,27 +100,63 @@ func (s *BaseStartStop) StartInit(ctx context.Context) (context.Context, bool, c
s.started = true
s.stopped = make(chan struct{})
- ctx, s.cancelFunc = context.WithCancel(ctx)
+ ctx, s.cancelFunc = context.WithCancelCause(ctx)
return ctx, true, s.stopped
}
// Stop is an automatically provided implementation for the maintenance Service
// interface's Stop.
func (s *BaseStartStop) Stop() {
+ shouldStop, stopped, finalizeStop := s.StopInit()
+ if !shouldStop {
+ return
+ }
+
+ <-stopped
+ finalizeStop(true)
+}
+
+// StopInit provides a way to build a more customized Stop implementation. It
+// should be avoided unless there'a an exception reason not to because Stop | ```suggestion
// should be avoided unless there's an exceptional reason not to because Stop
``` |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -69,7 +69,7 @@ func jobStatisticsFromInternal(stats *jobstats.JobStatistics) *JobStatistics {
// The maximum size of the subscribe channel. Events that would overflow it will
// be dropped.
-const subscribeChanSize = 100
+const subscribeChanSize = 50_000 | I was wondering earlier if this would be an issue with the way you were now measuring. It’s unlikely someone would hit this in practice but I suppose it’s still a bit of a wart with the current fixed size buffered channel for subscriptions. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +64,290 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
+
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C():
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
}
- }()
+ }
+ }()
+
+ return nil
+}
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Use a single `finalized_at` value for the whole batch. Not the greatest
+ // thing maybe, but makes things much easier.
+ var sampleFinalizedAt time.Time
+ for _, setState := range setStateBatch {
+ sampleFinalizedAt = *setState.Params.FinalizedAt
+ break
+ } | Unfortunately I think this change is a pretty substantial degradation. With this PR we're now debouncing up to 100ms, meaning jobs could have up to 100ms of randomly distributed padding added to their `finalized_at`, making it so `attempted_at - finalized_at` is no longer representative of their duration (at least not for reasonably fast jobs).
I'm sure it will be a bit less efficient, but I think we'd be better off keeping a `finalized_at` for each job as reported at the time it finished executing. Then your `JobSetCompleteIfRunningManyParams` can be a list of two arrays: the IDs, and the corresponding timestamps for each.
I also wanted to take this moment to raise another question. As I've progressed on the UI, I don't really love the limitations of trying to deduce job statistics from the handful of available timestamps (`inserted_at`, `attempted_at`, `finalized_at`, and those in `errors`). It's too complicated and leaves odd undefined gaps. I can paper over some of those at the UI level, but it has held me back from some stuff I'd like to ship like a more reliable execution timeline.
I've started wondering whether it'd be better if we tracked some sort of job stats object that we could keep as an array on the `JobRow` and as a jsonb array in Postgres. Possibly it could be merged together with `errors` so there's a single large jsonb ala `executions` or `attempts`. It would allow for some nice built-in stats even without hooking up an external monitoring system to consume all job stats in real time. I'm not sure how this will impact performance in the happy path though, because you'd then always be setting/updating a jsonb TOAST value as well as setting a timestamp.
Thoughts on both these fronts? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -656,18 +677,21 @@ func (c *Client[TTx]) signalStopComplete(ctx context.Context) {
producer.Stop()
}
- // Stop all mainline services where stop order isn't important. Contains the
- // elector and notifier, amongst others.
- startstop.StopAllParallel(c.services)
-
- // Once the producers have all finished, we know that completers have at least
- // enqueued any remaining work. Wait for the completer to finish.
- //
- // TODO: there's a risk here that the completer is stuck on a job that won't
- // complete. We probably need a timeout or way to move on in those cases.
- c.completer.Wait()
+ // Stop all mainline services where stop order isn't important.
+ startstop.StopAllParallel(append(
+ // This list of services contains the completer, which should always
+ // stop after the producers so that any remaining work that was enqueued
+ // will have a chance to have its state completed as it finishes.
+ //
+ // TODO: there's a risk here that the completer is stuck on a job that
+ // won't complete. We probably need a timeout or way to move on in those
+ // cases.
+ c.services,
- c.queueMaintainer.Stop()
+ // Will only be started if this client was leader, but can tolerate a stop
+ // without having been started.
+ c.queueMaintainer, | oh yeah, there's probably no reason for this not to stop earlier. Well, maybe it should wait until after the elector has given up any leadership it may hold? 🤔 I guess it's fine if not, worst case they skip a cycle that's about to happen but somebody else will pick it up, right? Is that true of all maintenance services? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -762,7 +785,41 @@ func (c *Client[TTx]) Stopped() <-chan struct{} {
// versions. If new event kinds are added, callers will have to explicitly add
// them to their requested list and ensure they can be handled correctly.
func (c *Client[TTx]) Subscribe(kinds ...EventKind) (<-chan *Event, func()) {
- for _, kind := range kinds {
+ return c.SubscribeConfig(&SubscribeConfig{Kinds: kinds})
+}
+
+// The default maximum size of the subscribe channel. Events that would overflow
+// it will be dropped.
+const subscribeChanSizeDefault = 1_000
+
+// SubscribeConfig is more thorough subscription configuration used for
+// Client.SubscribeConfig.
+type SubscribeConfig struct {
+ // ChanSize is the size of the buffered channel that will be created for the
+ // subscription. Incoming events that overall this number because a listener
+ // isn't reading from the channel in a timely manner will be dropped.
+ //
+ // Defaults to 1000.
+ ChanSize int
+
+ // Kinds are the kinds of events that the subscription will receive.
+ // Requiring that kinds are specified explicitly allows for forward
+ // compatibility in case new kinds of events are added in future versions.
+ // If new event kinds are added, callers will have to explicitly add them to
+ // their requested list and esnure they can be handled correctly.
+ Kinds []EventKind
+}
+
+// Special internal variant that lets us inject an overridden size.
+func (c *Client[TTx]) SubscribeConfig(config *SubscribeConfig) (<-chan *Event, func()) {
+ if config.ChanSize < 0 {
+ panic("SubscribeConfig.ChanSize must be greater or equal to 1")
+ } | I guess we don't care about max? if you want to be dumb go right ahead...? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -132,13 +145,30 @@ func (b *Benchmarker[TTx]) Run(ctx context.Context) error {
case <-shutdown:
return
- case <-subscribeChan:
- numJobsLeft.Add(-1)
- numJobsWorked := numJobsWorked.Add(1)
+ case event := <-subscribeChan:
+ if event == nil { // Closed channel.
+ b.logger.InfoContext(ctx, "Subscription channel closed")
+ return
+ }
+
+ switch {
+ case event.Kind == river.EventKindJobCancelled:
+ fallthrough
+
+ case event.Kind == river.EventKindJobCompleted:
+ fallthrough
+
+ // Only count a job as complete if it failed for the last time.
+ // We don't expect benchmark jobs to ever fail, so this extra
+ // attention to detail is here, but shouldn't be needed. | should we count and log these for debug purposes? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ }) | Hmm, is this an appropriate context to be passing down and respecting the deadline on? Isn't this `ctx` the job's execution context, which may be cancelled (including by a remote cancellation attempt)?
I feel like the only place it would make sense to be used is in the inline completer. The async one should probably be started with a Client-derived context that is in no way tied to the job's execution. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil } | This context is unused, but do you think it should maybe in some way be a parent to any completion attempts in here? Actually, I'm second-guessing myself in realtime. Even in the case of an aggressive shutdown, we still don't want to shut down until the completer has finished its work. So IMO these completion attempts should not inherit any deadlines whatsoever from parent contexts. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion | ```suggestion
WaitingAt time.Time // when job was submitted for completion
``` |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C():
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
}
- }()
+ }
+ }()
+
+ return nil
+}
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Insert a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ insertSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) {
+ return withRetries(ctx, &c.BaseService, func(ctx context.Context) ([]*rivertype.JobRow, error) {
+ return c.exec.JobSetCompleteIfRunningMany(ctx, &riverdriver.JobSetCompleteIfRunningManyParams{
+ ID: batchID,
+ FinalizedAt: batchFinalizedAt,
+ })
+ })
+ }
+
+ // This could be written more simply using multiple `sliceutil.Map`s, but
+ // it's done this way to allocate as few new slices as necessary.
+ mapIDsAndFinalizedAt := func(setStateBatch map[int64]*batchCompleterSetState) ([]int64, []time.Time) {
+ var (
+ batchIDs = make([]int64, len(setStateBatch))
+ batchFinalizedAt = make([]time.Time, len(setStateBatch))
+ i int
+ )
+ for _, setState := range setStateBatch {
+ batchIDs[i] = setState.Params.ID
+ batchFinalizedAt[i] = *setState.Params.FinalizedAt
+ i++
+ }
+ return batchIDs, batchFinalizedAt
+ }
+
+ // Tease apart enormous batches into sub-batches.
+ //
+ // All the code below is concerned with doing that, with a fast loop that
+ // doesn't allocate any additional memory in case the entire batch is
+ // smaller than the sub-batch maximum size (which will be the common case).
+ const oneOperationMax = 2_000 | Is it preferable to wait until the end of the timer window once we have enough jobs to fill a batch, or should we dispatch it ~immediately when we hit this threshold and reset the timer?
Doesn't have to be part of this PR, but might be worth refactoring if you think it could be straightforward to do so. IMO if we already have enough jobs to fill a batch, may as well get rid of them ASAP and avoid just stacking up more jobs until we hit the timer.
Maybe it ends up being harder to manage multiple in-flight batches and isn't worth it as a result. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C(): | Doesn't this `DebouncedChan` type fire on the leading edge? So that means we will always submit a batch of 1 before then waiting to collect additional jobs? Doesn't seem ideal if so. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C():
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
}
- }()
+ }
+ }()
+
+ return nil
+}
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Insert a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ insertSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) { | Slightly misnamed IMO because it's not _inserting_ anything. Maybe submit, update, or handle sub batch? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C():
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
}
- }()
+ }
+ }()
+
+ return nil
+}
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Insert a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ insertSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) {
+ return withRetries(ctx, &c.BaseService, func(ctx context.Context) ([]*rivertype.JobRow, error) {
+ return c.exec.JobSetCompleteIfRunningMany(ctx, &riverdriver.JobSetCompleteIfRunningManyParams{
+ ID: batchID,
+ FinalizedAt: batchFinalizedAt,
+ })
+ })
+ }
+
+ // This could be written more simply using multiple `sliceutil.Map`s, but
+ // it's done this way to allocate as few new slices as necessary.
+ mapIDsAndFinalizedAt := func(setStateBatch map[int64]*batchCompleterSetState) ([]int64, []time.Time) {
+ var (
+ batchIDs = make([]int64, len(setStateBatch))
+ batchFinalizedAt = make([]time.Time, len(setStateBatch))
+ i int
+ )
+ for _, setState := range setStateBatch {
+ batchIDs[i] = setState.Params.ID
+ batchFinalizedAt[i] = *setState.Params.FinalizedAt
+ i++
+ }
+ return batchIDs, batchFinalizedAt
+ }
+
+ // Tease apart enormous batches into sub-batches.
+ //
+ // All the code below is concerned with doing that, with a fast loop that
+ // doesn't allocate any additional memory in case the entire batch is
+ // smaller than the sub-batch maximum size (which will be the common case).
+ const oneOperationMax = 2_000
+
+ var (
+ batchID, batchFinalizedAt = mapIDsAndFinalizedAt(setStateBatch)
+ jobRows []*rivertype.JobRow
+ )
+ if len(setStateBatch) > oneOperationMax {
+ jobRows = make([]*rivertype.JobRow, 0, len(setStateBatch))
+ for i := 0; i < len(setStateBatch); i += oneOperationMax {
+ endIndex := min(i+oneOperationMax, len(batchID)) // beginning of next sub-batch or end of slice
+ jobRowsSubBatch, err := insertSubBatch(batchID[i:endIndex], batchFinalizedAt[i:endIndex])
+ if err != nil {
+ return err
+ } | so if a single sub batch fails after 4 attempts, we immediately return an error and give up on all remaining jobs in the batch? That feels a bit risky 🤔
Also is there any kind of backpressure to stop the client from fetching more jobs if the completer is filling up and can't complete all the jobs it already has lined up? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,125 +63,299 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
+}
+
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+ WaitingAt time.Time // went job was submitted for completion
+}
+
+// BatchCompleter uses a debounced channel to accumulate incoming completions
+// and every so often complete many of them as a single efficient batch. To
+// minimize the amount of driver surface area we need, the batching is only
+// performed for jobs being changed to a `completed` state, which we expect to
+// the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ debounceChan *chanutil.DebouncedChan
+ exec PartialExecutor
+ ready chan struct{}
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.Mutex
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ exec: exec,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ c.ready = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ c.debounceChan = chanutil.NewDebouncedChan(stopCtx, 100*time.Millisecond)
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ close(c.ready)
+
+ for {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ return
+ case <-c.debounceChan.C():
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
}
- }()
+ }
+ }()
+
+ return nil
+}
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Insert a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ insertSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) {
+ return withRetries(ctx, &c.BaseService, func(ctx context.Context) ([]*rivertype.JobRow, error) {
+ return c.exec.JobSetCompleteIfRunningMany(ctx, &riverdriver.JobSetCompleteIfRunningManyParams{
+ ID: batchID,
+ FinalizedAt: batchFinalizedAt,
+ })
+ })
+ }
+
+ // This could be written more simply using multiple `sliceutil.Map`s, but
+ // it's done this way to allocate as few new slices as necessary.
+ mapIDsAndFinalizedAt := func(setStateBatch map[int64]*batchCompleterSetState) ([]int64, []time.Time) {
+ var (
+ batchIDs = make([]int64, len(setStateBatch))
+ batchFinalizedAt = make([]time.Time, len(setStateBatch))
+ i int
+ )
+ for _, setState := range setStateBatch {
+ batchIDs[i] = setState.Params.ID
+ batchFinalizedAt[i] = *setState.Params.FinalizedAt
+ i++
+ }
+ return batchIDs, batchFinalizedAt
+ }
+
+ // Tease apart enormous batches into sub-batches.
+ //
+ // All the code below is concerned with doing that, with a fast loop that
+ // doesn't allocate any additional memory in case the entire batch is
+ // smaller than the sub-batch maximum size (which will be the common case).
+ const oneOperationMax = 2_000
+
+ var (
+ batchID, batchFinalizedAt = mapIDsAndFinalizedAt(setStateBatch)
+ jobRows []*rivertype.JobRow
+ )
+ if len(setStateBatch) > oneOperationMax {
+ jobRows = make([]*rivertype.JobRow, 0, len(setStateBatch))
+ for i := 0; i < len(setStateBatch); i += oneOperationMax {
+ endIndex := min(i+oneOperationMax, len(batchID)) // beginning of next sub-batch or end of slice
+ jobRowsSubBatch, err := insertSubBatch(batchID[i:endIndex], batchFinalizedAt[i:endIndex])
+ if err != nil {
+ return err
+ }
+ jobRows = append(jobRows, jobRowsSubBatch...)
+ }
+ } else {
+ var err error
+ jobRows, err = insertSubBatch(batchID, batchFinalizedAt)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, jobRow := range jobRows {
+ setState := setStateBatch[jobRow.ID]
+ setState.Stats.CompleteDuration = c.TimeNowUTC().Sub(setState.WaitingAt)
+ c.sendJobToSubscription(jobRow, setState.Stats)
+ }
+
return nil
}
-func (c *AsyncJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+func (c *BatchCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Send completions other than setting to `complete` to an async completer.
+ // We consider this okay because these are expected to be much more rare, so
+ // only optimizing `complete` will yield huge speed gains.
+ if params.State != rivertype.JobStateCompleted {
+ return c.asyncCompleter.JobSetStateIfRunning(ctx, stats, params)
+ }
- c.subscribeFunc = subscribeFunc
+ // Wait until the completer is started and ready to start processing
+ // batches. Alternatively, we could remove this and allow batches to start
+ // accumulating even if the service isn't started, but that could introduce
+ // some danger of the service never being started and therefore accumulating
+ // forever without completing jobs.
+ <-c.ready | Can we instead make sure the completer is started & good to go before we even start working jobs? Seems better than having this channel receive on every single completion IMO. And the slight increase in startup time also seems worth it to be as sure as possible that jobs are ready to be worked successfully. |
river | github_2023 | others | 258 | riverqueue | bgentry | @@ -23,10 +24,9 @@ require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
- github.com/oklog/ulid/v2 v2.1.0 // indirect
+ github.com/lmittmann/tint v1.0.4 // indirect | Looks solid! I think I'm gonna pull this into my riverdemo app too. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -612,19 +612,40 @@ func (c *Client[TTx]) Start(ctx context.Context) error {
return err
}
- // Receives job complete notifications from the completer and distributes
- // them to any subscriptions.
- c.completer.Subscribe(c.distributeJobCompleterCallback)
+ if c.completer != nil {
+ // The completer is part of the services list below, but although it can
+ // stop gracefully along with all the other services, it needs to be
+ // started with a context that's _not_ fetchCtx. This ensures that even
+ // when fetch is cancelled on shutdown, the completer is still given a
+ // separate opportunity to start stopping only after the producers have
+ // finished up and returned.
+ if err := c.completer.Start(ctx); err != nil {
+ stopServicesOnError()
+ return err
+ }
- for _, service := range c.services {
- if err := service.Start(fetchCtx); err != nil {
- // In case of error, stop any services that might have started. This
- // is safe because even services that were never started will still
- // tolerate being stopped.
- startstop.StopAllParallel(c.services)
+ // Receives job complete notifications from the completer and
+ // distributes them to any subscriptions.
+ c.completer.Subscribe(c.distributeJobCompleterCallback)
+ }
- c.monitor.Stop()
+ // We use separate contexts for fetching and working to allow for a graceful
+ // stop. However, both inherit from the provided context so if it is
+ // cancelled a more aggressive stop will be initiated.
+ fetchCtx, fetchWorkCancel := context.WithCancelCause(ctx)
+ c.fetchWorkCancel = fetchWorkCancel
+ workCtx, workCancel := context.WithCancelCause(withClient[TTx](ctx, c))
+ c.workCancel = workCancel
+ for _, service := range c.services {
+ // TODO(brandur): Reevaluate the use of fetchNewWorkCtx here. It's | Missed the rename in this comment:
```suggestion
// TODO(brandur): Reevaluate the use of fetchCtx here. It's
``` |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -94,17 +96,17 @@ func (b *Benchmarker[TTx]) Run(ctx context.Context) error {
// values against the wall, they perform quite well. Much better than
// the client's default values at any rate.
FetchCooldown: 2 * time.Millisecond,
- FetchPollInterval: 5 * time.Millisecond,
+ FetchPollInterval: 20 * time.Millisecond, | did you find that this gave you better throughput at 20ms vs 5ms? |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,170 +60,468 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
+}
+
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
- })
-}
-
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+}
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+// BatchCompleter accumulates incoming completions, and instead of completing
+// them immediately, every so often complete many of them as a single efficient
+// batch. To minimize the amount of driver surface area we need, the batching is
+// only performed for jobs being changed to a `completed` state, which we expect
+// to the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ completionMaxSize int // configurable for testing purposes; max jobs to complete in single database operation
+ maxBacklog int // configurable for testing purposes; max backlog allowed before no more completions accepted
+ exec PartialExecutor
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.RWMutex
+ started chan struct{}
+ waitOnBacklogChan chan struct{}
+ waitOnBacklogWaiting bool
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ const (
+ completionMaxSize = 5_000
+ maxBacklog = 20_000
+ )
+
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ completionMaxSize: completionMaxSize,
+ exec: exec,
+ maxBacklog: maxBacklog,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ c.started = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
+
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+
+ close(c.started)
+
+ backlogSize := func() int {
+ c.setStateParamsMu.RLock()
+ defer c.setStateParamsMu.RUnlock()
+ return len(c.setStateParams)
+ }
+
+ for numTicks := 0; ; numTicks++ {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+ return
+
+ case <-ticker.C:
}
- }()
+ // The ticker fires quite often to make sure that given a huge glut
+ // of jobs, we don't accidentally build up too much of a backlog by
+ // waiting too long. However, don't start a complete operation until
+ // we reach a minimum threshold unless we're on a tick that's a
+ // multiple of 5. So, jobs will be completed every 250ms even if the
+ // threshold hasn't been met.
+ const batchCompleterStartThreshold = 100
+ if backlogSize() < min(c.maxBacklog, batchCompleterStartThreshold) && numTicks != 0 && numTicks%5 != 0 {
+ continue
+ }
+
+ for {
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ // New jobs to complete may have come in while working the batch
+ // above. If enough have to bring us above the minimum complete
+ // threshold, do another again. Otherwise, break and listen for
+ // a new tick. | bit of awkward phrasing in here, probably worth tweaking |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,170 +60,468 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
+}
+
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
- })
-}
-
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+}
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+// BatchCompleter accumulates incoming completions, and instead of completing
+// them immediately, every so often complete many of them as a single efficient
+// batch. To minimize the amount of driver surface area we need, the batching is
+// only performed for jobs being changed to a `completed` state, which we expect
+// to the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ completionMaxSize int // configurable for testing purposes; max jobs to complete in single database operation
+ maxBacklog int // configurable for testing purposes; max backlog allowed before no more completions accepted
+ exec PartialExecutor
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.RWMutex
+ started chan struct{}
+ waitOnBacklogChan chan struct{}
+ waitOnBacklogWaiting bool
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ const (
+ completionMaxSize = 5_000
+ maxBacklog = 20_000
+ )
+
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ completionMaxSize: completionMaxSize,
+ exec: exec,
+ maxBacklog: maxBacklog,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ c.started = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
+
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+
+ close(c.started)
+
+ backlogSize := func() int {
+ c.setStateParamsMu.RLock()
+ defer c.setStateParamsMu.RUnlock()
+ return len(c.setStateParams)
+ }
+
+ for numTicks := 0; ; numTicks++ {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+ return
+
+ case <-ticker.C:
}
- }()
+ // The ticker fires quite often to make sure that given a huge glut
+ // of jobs, we don't accidentally build up too much of a backlog by
+ // waiting too long. However, don't start a complete operation until
+ // we reach a minimum threshold unless we're on a tick that's a
+ // multiple of 5. So, jobs will be completed every 250ms even if the
+ // threshold hasn't been met.
+ const batchCompleterStartThreshold = 100
+ if backlogSize() < min(c.maxBacklog, batchCompleterStartThreshold) && numTicks != 0 && numTicks%5 != 0 {
+ continue
+ }
+
+ for {
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ // New jobs to complete may have come in while working the batch
+ // above. If enough have to bring us above the minimum complete
+ // threshold, do another again. Otherwise, break and listen for
+ // a new tick.
+ if backlogSize() < batchCompleterStartThreshold {
+ break
+ }
+ }
+ }
+ }()
+
+ return nil
+}
+
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Complete a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ completeSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) {
+ start := time.Now()
+ defer func() {
+ c.Logger.DebugContext(ctx, c.Name+": Completed sub-batch of job(s)", "duration", time.Since(start), "num_jobs", len(batchID))
+ }()
+
+ return withRetries(ctx, &c.BaseService, func(ctx context.Context) ([]*rivertype.JobRow, error) {
+ return c.exec.JobSetCompleteIfRunningMany(ctx, &riverdriver.JobSetCompleteIfRunningManyParams{
+ ID: batchID,
+ FinalizedAt: batchFinalizedAt,
+ })
+ })
+ }
+
+ // This could be written more simply using multiple `sliceutil.Map`s, but
+ // it's done this way to allocate as few new slices as necessary.
+ mapIDsAndFinalizedAt := func(setStateBatch map[int64]*batchCompleterSetState) ([]int64, []time.Time) {
+ var (
+ batchIDs = make([]int64, len(setStateBatch))
+ batchFinalizedAt = make([]time.Time, len(setStateBatch))
+ i int
+ )
+ for _, setState := range setStateBatch {
+ batchIDs[i] = setState.Params.ID
+ batchFinalizedAt[i] = *setState.Params.FinalizedAt
+ i++
+ }
+ return batchIDs, batchFinalizedAt
+ }
+
+ // Tease apart enormous batches into sub-batches.
+ //
+ // All the code below is concerned with doing that, with a fast loop that
+ // doesn't allocate any additional memory in case the entire batch is
+ // smaller than the sub-batch maximum size (which will be the common case).
+ var (
+ batchID, batchFinalizedAt = mapIDsAndFinalizedAt(setStateBatch)
+ jobRows []*rivertype.JobRow
+ )
+ if len(setStateBatch) > c.completionMaxSize {
+ jobRows = make([]*rivertype.JobRow, 0, len(setStateBatch))
+ for i := 0; i < len(setStateBatch); i += c.completionMaxSize {
+ endIndex := min(i+c.completionMaxSize, len(batchID)) // beginning of next sub-batch or end of slice
+ jobRowsSubBatch, err := completeSubBatch(batchID[i:endIndex], batchFinalizedAt[i:endIndex])
+ if err != nil {
+ return err
+ }
+ jobRows = append(jobRows, jobRowsSubBatch...)
+ }
+ } else {
+ var err error
+ jobRows, err = completeSubBatch(batchID, batchFinalizedAt)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, jobRow := range jobRows {
+ setState := setStateBatch[jobRow.ID]
+ setState.Stats.CompleteDuration = c.TimeNowUTC().Sub(*setState.Params.FinalizedAt)
+ c.sendJobToSubscription(jobRow, setState.Stats)
+ }
+
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ if c.waitOnBacklogWaiting && len(c.setStateParams) < c.maxBacklog {
+ c.Logger.DebugContext(ctx, c.Name+": Disabling waitOnBacklog; ready to complete more jobs")
+ close(c.waitOnBacklogChan)
+ c.waitOnBacklogWaiting = false
+ }
+ }()
+
return nil
}
-func (c *AsyncJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+func (c *BatchCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Send completions other than setting to `complete` to an async completer.
+ // We consider this okay because these are expected to be much more rare, so
+ // only optimizing `complete` will yield huge speed gains.
+ if params.State != rivertype.JobStateCompleted {
+ return c.asyncCompleter.JobSetStateIfRunning(ctx, stats, params)
+ }
- c.subscribeFunc = subscribeFunc
+ // If we've built up too much of a backlog because the completer's fallen
+ // behind, block completions until the complete loop's had a chance to catch
+ // up.
+ c.waitOrInitBacklogChannel(ctx)
+
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ c.setStateParams[params.ID] = &batchCompleterSetState{params, stats}
+
+ return nil
}
-func (c *AsyncJobCompleter) Wait() {
- // TODO: handle error?
- _ = c.eg.Wait()
+func (c *BatchCompleter) Stop() {
+ c.BaseStartStop.Stop()
+ c.asyncCompleter.Stop()
}
-// As configued, total time from initial attempt is ~7 seconds (1 + 2 + 4) (not
-// including jitter). I put in a basic retry algorithm to hold us over, but we
-// may want to rethink these numbers and strategy.
-const numRetries = 3
+func (c *BatchCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
+ c.withSubscribe.Subscribe(subscribeFunc)
+ c.asyncCompleter.Subscribe(subscribeFunc)
+}
-func withRetries(c *baseservice.BaseService, f func(ctx context.Context) (*rivertype.JobRow, error)) (*rivertype.JobRow, error) { //nolint:varnamelen
- retrySecondsWithoutJitter := func(attempt int) float64 {
- // Uses a different algorithm (2 ** N) compared to retry policies (4 **
- // N) so we can get more retries sooner: 1, 2, 4, 8
- return math.Pow(2, float64(attempt))
+func (c *BatchCompleter) WaitStarted() <-chan struct{} {
+ return c.started
+}
+
+func (c *BatchCompleter) waitOrInitBacklogChannel(ctx context.Context) {
+ c.setStateParamsMu.RLock()
+ var (
+ backlogSize = len(c.setStateParams)
+ waitChan = c.waitOnBacklogChan
+ waiting = c.waitOnBacklogWaiting
+ )
+ c.setStateParamsMu.RUnlock()
+
+ if waiting {
+ <-waitChan
+ return
}
- retrySeconds := func(attempt int) float64 {
- retrySeconds := retrySecondsWithoutJitter(attempt)
+ // Not at max backlog. A little raciness is allowed here: multiple
+ // goroutines may have acquired the read lock above and seen a size under
+ // limit, but with all allowed to continue it could put the backlog over its
+ // maximum. The backlog will only be nominally over because generally max
+ // backlog >> max workers, so consider this okay.
+ if backlogSize < c.maxBacklog {
+ return
+ }
- // Jitter number of seconds +/- 10%.
- retrySeconds += retrySeconds * (c.Rand.Float64()*0.2 - 0.1)
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
- return retrySeconds
+ // Check once more if another process has already started waiting (it's
+ // possible for multiple to race between the acquiring the lock above). If
+ // so, we fall through and allow this insertion to happen, even though it
+ // might bring the batch slightly over limit, because arranging the locks
+ // otherwise would get complicated.
+ if c.waitOnBacklogWaiting {
+ return
}
- tryOnce := func() (*rivertype.JobRow, error) {
- ctx := context.Background()
+ // Tell all future insertions to start waiting. This one is allowed to fall
+ // through and succeed even though it may bring the batch a little over
+ // limit.
+ c.waitOnBacklogChan = make(chan struct{})
+ c.waitOnBacklogWaiting = true
+ c.Logger.WarnContext(ctx, c.Name+": Hit maximum backlog; completions will wait until below threshold", "max_backlog", c.maxBacklog)
+}
- ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
- defer cancel()
+// As configued, total time from initial attempt is ~7 seconds (1 + 2 + 4) (not
+// including jitter).
+const numRetries = 3 | We may want to bump this a bit. 7 seconds could be just a short connectivity blip, so it seems a bit extreme to potentially allow bulk data loss in that situation.
Is there harm from increasing it? Of course the client won't be able to keep working new jobs if it hits the max backlog because we're stuck retrying, but it probably won't be able to do that anyway in a scenario that causes these failures. |
river | github_2023 | go | 258 | riverqueue | bgentry | @@ -56,170 +60,468 @@ type InlineJobCompleter struct {
wg sync.WaitGroup
}
-func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter {
- return baseservice.Init(archetype, &InlineJobCompleter{
+func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineCompleter {
+ return baseservice.Init(archetype, &InlineCompleter{
exec: exec,
})
}
-func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
- })
-}
-
-func (c *InlineJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
-
- c.subscribeFunc = subscribeFunc
-}
-
-func (c *InlineJobCompleter) Wait() {
- c.wg.Wait()
-}
-
-func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
+func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
c.wg.Add(1)
defer c.wg.Done()
start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+ return nil
+}
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
- }
- }()
+func (c *InlineCompleter) Start(ctx context.Context) error { return nil }
- return nil
+func (c *InlineCompleter) Stop() {
+ c.wg.Wait()
}
-type AsyncJobCompleter struct {
+// A default concurrency of 100 seems to perform better a much smaller number
+// like 10, but it's quite dependent on environment (10 and 100 bench almost
+// identically on MBA when it's on battery power). This number should represent
+// our best known default for most use cases, but don't consider its choice to
+// be particularly well informed at this point.
+const asyncCompleterDefaultConcurrency = 100
+
+type AsyncCompleter struct {
baseservice.BaseService
+ withSubscribe
- concurrency uint32
- exec PartialExecutor
- eg *errgroup.Group
- subscribeFunc func(update CompleterJobUpdated)
- subscribeFuncMu sync.Mutex
+ concurrency int
+ errGroup *errgroup.Group
+ exec PartialExecutor
}
-func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter {
- eg := &errgroup.Group{}
- // TODO: int concurrency may feel more natural than uint32
- eg.SetLimit(int(concurrency))
+func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *AsyncCompleter {
+ return newAsyncCompleterWithConcurrency(archetype, exec, asyncCompleterDefaultConcurrency)
+}
+
+func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec PartialExecutor, concurrency int) *AsyncCompleter {
+ errGroup := &errgroup.Group{}
+ errGroup.SetLimit(concurrency)
- return baseservice.Init(archetype, &AsyncJobCompleter{
+ return baseservice.Init(archetype, &AsyncCompleter{
exec: exec,
concurrency: concurrency,
- eg: eg,
- })
-}
-
-func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
- return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) {
- return c.exec.JobSetStateIfRunning(ctx, params)
+ errGroup: errGroup,
})
}
-func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error {
- c.eg.Go(func() error {
- start := c.TimeNowUTC()
+func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Start clock outside of goroutine so that the time spent blocking waiting
+ // for an errgroup slot is accurately measured.
+ start := c.TimeNowUTC()
- job, err := withRetries(&c.BaseService, f)
+ c.errGroup.Go(func() error {
+ job, err := withRetries(ctx, &c.BaseService, func(ctx context.Context) (*rivertype.JobRow, error) {
+ return c.exec.JobSetStateIfRunning(ctx, params)
+ })
if err != nil {
return err
}
stats.CompleteDuration = c.TimeNowUTC().Sub(start)
+ c.sendJobToSubscription(job, stats)
+
+ return nil
+ })
+ return nil
+}
+
+func (c *AsyncCompleter) Start(ctx context.Context) error { return nil }
+
+func (c *AsyncCompleter) Stop() {
+ if err := c.errGroup.Wait(); err != nil {
+ c.Logger.Error("Error waiting on async completer: %s", err)
+ }
+}
+
+type batchCompleterSetState struct {
+ Params *riverdriver.JobSetStateIfRunningParams
+ Stats *jobstats.JobStatistics
+}
- func() {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+// BatchCompleter accumulates incoming completions, and instead of completing
+// them immediately, every so often complete many of them as a single efficient
+// batch. To minimize the amount of driver surface area we need, the batching is
+// only performed for jobs being changed to a `completed` state, which we expect
+// to the vast common case under normal operation. The completer embeds an
+// AsyncCompleter to perform other non-`completed` state completions.
+type BatchCompleter struct {
+ baseservice.BaseService
+ startstop.BaseStartStop
+ withSubscribe
+
+ asyncCompleter *AsyncCompleter // used for non-complete completions
+ completionMaxSize int // configurable for testing purposes; max jobs to complete in single database operation
+ maxBacklog int // configurable for testing purposes; max backlog allowed before no more completions accepted
+ exec PartialExecutor
+ setStateParams map[int64]*batchCompleterSetState
+ setStateParamsMu sync.RWMutex
+ started chan struct{}
+ waitOnBacklogChan chan struct{}
+ waitOnBacklogWaiting bool
+}
+
+func NewBatchCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *BatchCompleter {
+ const (
+ completionMaxSize = 5_000
+ maxBacklog = 20_000
+ )
+
+ return baseservice.Init(archetype, &BatchCompleter{
+ asyncCompleter: NewAsyncCompleter(archetype, exec),
+ completionMaxSize: completionMaxSize,
+ exec: exec,
+ maxBacklog: maxBacklog,
+ setStateParams: make(map[int64]*batchCompleterSetState),
+ })
+}
+
+func (c *BatchCompleter) Start(ctx context.Context) error {
+ stopCtx, shouldStart, stopped := c.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
- if c.subscribeFunc != nil {
- c.subscribeFunc(CompleterJobUpdated{Job: job, JobStats: stats})
+ c.started = make(chan struct{})
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ c.Logger.InfoContext(ctx, c.Name+": Run loop started")
+ defer c.Logger.InfoContext(ctx, c.Name+": Run loop stopped")
+
+ ticker := time.NewTicker(50 * time.Millisecond)
+ defer ticker.Stop()
+
+ close(c.started)
+
+ backlogSize := func() int {
+ c.setStateParamsMu.RLock()
+ defer c.setStateParamsMu.RUnlock()
+ return len(c.setStateParams)
+ }
+
+ for numTicks := 0; ; numTicks++ {
+ select {
+ case <-stopCtx.Done():
+ // Try to insert last batch before leaving. Note we use the
+ // original context so operations aren't immediately cancelled.
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+ return
+
+ case <-ticker.C:
}
- }()
+ // The ticker fires quite often to make sure that given a huge glut
+ // of jobs, we don't accidentally build up too much of a backlog by
+ // waiting too long. However, don't start a complete operation until
+ // we reach a minimum threshold unless we're on a tick that's a
+ // multiple of 5. So, jobs will be completed every 250ms even if the
+ // threshold hasn't been met.
+ const batchCompleterStartThreshold = 100
+ if backlogSize() < min(c.maxBacklog, batchCompleterStartThreshold) && numTicks != 0 && numTicks%5 != 0 {
+ continue
+ }
+
+ for {
+ if err := c.handleBatch(ctx); err != nil {
+ c.Logger.Error(c.Name+": Error completing batch", "err", err)
+ }
+
+ // New jobs to complete may have come in while working the batch
+ // above. If enough have to bring us above the minimum complete
+ // threshold, do another again. Otherwise, break and listen for
+ // a new tick.
+ if backlogSize() < batchCompleterStartThreshold {
+ break
+ }
+ }
+ }
+ }()
+
+ return nil
+}
+
+func (c *BatchCompleter) handleBatch(ctx context.Context) error {
+ var setStateBatch map[int64]*batchCompleterSetState
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ setStateBatch = c.setStateParams
+
+ // Don't bother resetting the map if there's nothing to process,
+ // allowing the completer to idle efficiently.
+ if len(setStateBatch) > 0 {
+ c.setStateParams = make(map[int64]*batchCompleterSetState)
+ } else {
+ // Set nil to avoid a data race below in case the map is set as a
+ // new job comes in.
+ setStateBatch = nil
+ }
+ }()
+
+ if len(setStateBatch) < 1 {
return nil
- })
+ }
+
+ // Complete a sub-batch with retries. Also helps reduce visual noise and
+ // increase readability of loop below.
+ completeSubBatch := func(batchID []int64, batchFinalizedAt []time.Time) ([]*rivertype.JobRow, error) {
+ start := time.Now()
+ defer func() {
+ c.Logger.DebugContext(ctx, c.Name+": Completed sub-batch of job(s)", "duration", time.Since(start), "num_jobs", len(batchID))
+ }()
+
+ return withRetries(ctx, &c.BaseService, func(ctx context.Context) ([]*rivertype.JobRow, error) {
+ return c.exec.JobSetCompleteIfRunningMany(ctx, &riverdriver.JobSetCompleteIfRunningManyParams{
+ ID: batchID,
+ FinalizedAt: batchFinalizedAt,
+ })
+ })
+ }
+
+ // This could be written more simply using multiple `sliceutil.Map`s, but
+ // it's done this way to allocate as few new slices as necessary.
+ mapIDsAndFinalizedAt := func(setStateBatch map[int64]*batchCompleterSetState) ([]int64, []time.Time) {
+ var (
+ batchIDs = make([]int64, len(setStateBatch))
+ batchFinalizedAt = make([]time.Time, len(setStateBatch))
+ i int
+ )
+ for _, setState := range setStateBatch {
+ batchIDs[i] = setState.Params.ID
+ batchFinalizedAt[i] = *setState.Params.FinalizedAt
+ i++
+ }
+ return batchIDs, batchFinalizedAt
+ }
+
+ // Tease apart enormous batches into sub-batches.
+ //
+ // All the code below is concerned with doing that, with a fast loop that
+ // doesn't allocate any additional memory in case the entire batch is
+ // smaller than the sub-batch maximum size (which will be the common case).
+ var (
+ batchID, batchFinalizedAt = mapIDsAndFinalizedAt(setStateBatch)
+ jobRows []*rivertype.JobRow
+ )
+ if len(setStateBatch) > c.completionMaxSize {
+ jobRows = make([]*rivertype.JobRow, 0, len(setStateBatch))
+ for i := 0; i < len(setStateBatch); i += c.completionMaxSize {
+ endIndex := min(i+c.completionMaxSize, len(batchID)) // beginning of next sub-batch or end of slice
+ jobRowsSubBatch, err := completeSubBatch(batchID[i:endIndex], batchFinalizedAt[i:endIndex])
+ if err != nil {
+ return err
+ }
+ jobRows = append(jobRows, jobRowsSubBatch...)
+ }
+ } else {
+ var err error
+ jobRows, err = completeSubBatch(batchID, batchFinalizedAt)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, jobRow := range jobRows {
+ setState := setStateBatch[jobRow.ID]
+ setState.Stats.CompleteDuration = c.TimeNowUTC().Sub(*setState.Params.FinalizedAt)
+ c.sendJobToSubscription(jobRow, setState.Stats)
+ }
+
+ func() {
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ if c.waitOnBacklogWaiting && len(c.setStateParams) < c.maxBacklog {
+ c.Logger.DebugContext(ctx, c.Name+": Disabling waitOnBacklog; ready to complete more jobs")
+ close(c.waitOnBacklogChan)
+ c.waitOnBacklogWaiting = false
+ }
+ }()
+
return nil
}
-func (c *AsyncJobCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
- c.subscribeFuncMu.Lock()
- defer c.subscribeFuncMu.Unlock()
+func (c *BatchCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error {
+ // Send completions other than setting to `complete` to an async completer.
+ // We consider this okay because these are expected to be much more rare, so
+ // only optimizing `complete` will yield huge speed gains.
+ if params.State != rivertype.JobStateCompleted {
+ return c.asyncCompleter.JobSetStateIfRunning(ctx, stats, params)
+ }
- c.subscribeFunc = subscribeFunc
+ // If we've built up too much of a backlog because the completer's fallen
+ // behind, block completions until the complete loop's had a chance to catch
+ // up.
+ c.waitOrInitBacklogChannel(ctx)
+
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
+
+ c.setStateParams[params.ID] = &batchCompleterSetState{params, stats}
+
+ return nil
}
-func (c *AsyncJobCompleter) Wait() {
- // TODO: handle error?
- _ = c.eg.Wait()
+func (c *BatchCompleter) Stop() {
+ c.BaseStartStop.Stop()
+ c.asyncCompleter.Stop()
}
-// As configued, total time from initial attempt is ~7 seconds (1 + 2 + 4) (not
-// including jitter). I put in a basic retry algorithm to hold us over, but we
-// may want to rethink these numbers and strategy.
-const numRetries = 3
+func (c *BatchCompleter) Subscribe(subscribeFunc func(update CompleterJobUpdated)) {
+ c.withSubscribe.Subscribe(subscribeFunc)
+ c.asyncCompleter.Subscribe(subscribeFunc)
+}
-func withRetries(c *baseservice.BaseService, f func(ctx context.Context) (*rivertype.JobRow, error)) (*rivertype.JobRow, error) { //nolint:varnamelen
- retrySecondsWithoutJitter := func(attempt int) float64 {
- // Uses a different algorithm (2 ** N) compared to retry policies (4 **
- // N) so we can get more retries sooner: 1, 2, 4, 8
- return math.Pow(2, float64(attempt))
+func (c *BatchCompleter) WaitStarted() <-chan struct{} {
+ return c.started
+}
+
+func (c *BatchCompleter) waitOrInitBacklogChannel(ctx context.Context) {
+ c.setStateParamsMu.RLock()
+ var (
+ backlogSize = len(c.setStateParams)
+ waitChan = c.waitOnBacklogChan
+ waiting = c.waitOnBacklogWaiting
+ )
+ c.setStateParamsMu.RUnlock()
+
+ if waiting {
+ <-waitChan
+ return
}
- retrySeconds := func(attempt int) float64 {
- retrySeconds := retrySecondsWithoutJitter(attempt)
+ // Not at max backlog. A little raciness is allowed here: multiple
+ // goroutines may have acquired the read lock above and seen a size under
+ // limit, but with all allowed to continue it could put the backlog over its
+ // maximum. The backlog will only be nominally over because generally max
+ // backlog >> max workers, so consider this okay.
+ if backlogSize < c.maxBacklog {
+ return
+ }
- // Jitter number of seconds +/- 10%.
- retrySeconds += retrySeconds * (c.Rand.Float64()*0.2 - 0.1)
+ c.setStateParamsMu.Lock()
+ defer c.setStateParamsMu.Unlock()
- return retrySeconds
+ // Check once more if another process has already started waiting (it's
+ // possible for multiple to race between the acquiring the lock above). If
+ // so, we fall through and allow this insertion to happen, even though it
+ // might bring the batch slightly over limit, because arranging the locks
+ // otherwise would get complicated.
+ if c.waitOnBacklogWaiting {
+ return
}
- tryOnce := func() (*rivertype.JobRow, error) {
- ctx := context.Background()
+ // Tell all future insertions to start waiting. This one is allowed to fall
+ // through and succeed even though it may bring the batch a little over
+ // limit.
+ c.waitOnBacklogChan = make(chan struct{})
+ c.waitOnBacklogWaiting = true
+ c.Logger.WarnContext(ctx, c.Name+": Hit maximum backlog; completions will wait until below threshold", "max_backlog", c.maxBacklog)
+}
- ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
- defer cancel()
+// As configued, total time from initial attempt is ~7 seconds (1 + 2 + 4) (not
+// including jitter).
+const numRetries = 3
- return f(ctx)
- }
+func withRetries[T any](logCtx context.Context, baseService *baseservice.BaseService, retryFunc func(ctx context.Context) (T, error)) (T, error) {
+ uncancelledCtx := context.Background()
- var lastErr error
+ var (
+ defaultVal T
+ lastErr error
+ )
- // TODO: Added a basic retry algorithm based on the top-level retry policies
- // for now, but we may want to reevaluate this somewhat.
- for attempt := 1; attempt < numRetries+1; attempt++ {
- job, err := tryOnce()
+ for attempt := 1; attempt <= numRetries; attempt++ {
+ const timeout = 10 * time.Second
+
+ // I've found that we want at least ten seconds for a large batch,
+ // although it usually doesn't need that long.
+ ctx, cancel := context.WithTimeout(uncancelledCtx, timeout)
+ defer cancel()
+
+ retVal, err := retryFunc(ctx) //nolint:contextcheck
if err != nil {
+ // A cancelled context will never succeed, return immediately.
+ if errors.Is(err, context.Canceled) {
+ return defaultVal, err
+ }
+
+ // A closed pool will never succeed, return immediately.
+ if errors.Is(err, puddle.ErrClosedPool) { | This is somewhat pgx-specific, isn't it? Do we need a driver error type for this? |
river | github_2023 | go | 264 | riverqueue | bgentry | @@ -644,17 +644,30 @@ func (c *Client[TTx]) Start(ctx context.Context) error {
}()
}
- c.runProducers(fetchNewWorkCtx, workCtx)
- go c.signalStopComplete(workCtx)
+ for _, producer := range c.producersByQueueName {
+ producer := producer
+
+ if err := producer.StartWorkContext(fetchNewWorkCtx, workCtx); err != nil {
+ return err
+ }
+ }
+
+ go c.signalStopComplete(fetchNewWorkCtx)
c.baseService.Logger.InfoContext(workCtx, "River client successfully started", slog.String("client_id", c.ID()))
return nil
}
// ctx is used only for logging, not for lifecycle.
-func (c *Client[TTx]) signalStopComplete(ctx context.Context) {
- // Wait for producers and elector to exit:
+func (c *Client[TTx]) signalStopComplete(fetchCtx context.Context) {
+ <-fetchCtx.Done()
+
+ for _, producer := range c.producersByQueueName {
+ producer.Stop()
+ }
+
+ // Wait for notifier and elector to exit: | this is just for the elector now, isn't it? |
river | github_2023 | go | 264 | riverqueue | bgentry | @@ -385,18 +445,13 @@ func (p *producer) startNewExecutors(workCtx context.Context, jobs []*rivertype.
p.addActiveJob(job.ID, executor)
go executor.Execute(jobCtx)
- // TODO:
- // Errors can be recorded synchronously before the Executor slot is considered
- // available.
- //
- // Successful jobs can be sent to the completer for async acking, IF they
- // aren't already completed by the user. Do we need an internal field +
- // convenience method to make that part work? | thanks, this is quite outdated 😆 |
river | github_2023 | go | 264 | riverqueue | bgentry | @@ -136,63 +135,111 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) {
case <-ctx.Done():
t.Error("timed out waiting for last job to run")
}
- wg.Wait()
}
-func Test_Producer_Run(t *testing.T) {
+func TestProducer_PollOnly(t *testing.T) {
t.Parallel()
- ctx := context.Background()
-
- type testBundle struct {
- completer jobcompleter.JobCompleter
- exec riverdriver.Executor
- jobUpdates chan jobcompleter.CompleterJobUpdated
- workers *Workers
- }
-
- setup := func(t *testing.T) (*producer, *testBundle) {
+ testProducer(t, func(ctx context.Context, t *testing.T) *producer {
t.Helper()
- dbPool := riverinternaltest.TestDB(ctx, t)
- driver := riverpgxv5.New(dbPool)
- exec := driver.GetExecutor()
- listener := driver.GetListener()
+ var (
+ archetype = riverinternaltest.BaseServiceArchetype(t)
+ driver = riverpgxv5.New(nil)
+ tx = riverinternaltest.TestTx(ctx, t)
+ )
- archetype := riverinternaltest.BaseServiceArchetype(t)
+ // Wrap with a shared transaction because the producer fetching jobs may
+ // conflict with jobs being inserted in tests cases. | ```suggestion
// conflict with jobs being inserted in test cases.
``` |
river | github_2023 | go | 263 | riverqueue | bgentry | @@ -62,206 +89,294 @@ type Elector struct {
// NewElector returns an Elector using the given adapter. The name should correspond
// to the name of the database + schema combo and should be shared across all Clients
// running with that combination. The id should be unique to the Client.
-func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, name, id string, interval, ttlPadding time.Duration, logger *slog.Logger) (*Elector, error) {
- // TODO: validate name + id length/format, interval, etc
+func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, instanceName, clientID string) *Elector {
return baseservice.Init(archetype, &Elector{
- exec: exec,
- id: id,
- interval: interval,
- name: name,
- notifier: notifier,
- logger: logger.WithGroup("elector"),
+ exec: exec,
+ clientID: clientID,
+ electInterval: electInterval,
+ electIntervalJitter: electInteralJitter,
+ instanceName: instanceName,
+ notifier: notifier,
// TTL is at least the relect run interval used by clients to try and
// gain leadership or reelect themselves as leader, plus a little
// padding to account to give the leader a little breathing room in its
// reelection loop.
- ttl: interval + ttlPadding,
- }), nil
+ ttl: electInterval + electIntervalTTLPadding,
+ })
}
-func (e *Elector) Run(ctx context.Context) {
- // Before the elector returns, run a delete with NOTIFY to give up any
- // leadership that we have. If we do that here, we guarantee that any locks we
- // have will be released (even if they were acquired in gainLeadership but we
- // didn't wait for the response)
- //
- // This doesn't use ctx because it runs *after* the ctx is done.
- defer e.giveUpLeadership() //nolint:contextcheck
+func (e *Elector) Start(ctx context.Context) error {
+ ctx, shouldStart, stopped := e.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
// We'll send to this channel anytime a leader resigns on the key with `name`
- leadershipNotificationChan := make(chan struct{})
-
- handleNotification := func(topic notifier.NotificationTopic, payload string) {
- if topic != notifier.NotificationTopicLeadership {
- // This should not happen unless the notifier is broken.
- e.logger.Error("received unexpected notification", "topic", topic, "payload", payload)
- return
- }
- notification := pgNotification{}
- if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
- e.logger.Error("unable to unmarshal leadership notification", "err", err)
- return
- }
+ e.leadershipNotificationChan = make(chan struct{})
+
+ var sub *notifier.Subscription
+ if e.notifier == nil {
+ e.Logger.Info(e.Name+": No notifier configured; starting in poll mode", "client_id", e.clientID)
+ } else {
+ handleNotification := func(topic notifier.NotificationTopic, payload string) {
+ if topic != notifier.NotificationTopicLeadership {
+ // This should not happen unless the notifier is broken.
+ e.Logger.Error(e.Name+": Received unexpected notification", "client_id", e.clientID, "topic", topic, "payload", payload)
+ return
+ }
- if notification.Action != "resigned" || notification.Name != e.name {
- // We only care about resignations on because we use them to preempt the
- // election attempt backoff. And we only care about our own key name.
- return
- }
+ notification := pgNotification{}
+ if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
+ e.Logger.Error(e.Name+": Unable to unmarshal leadership notification", "client_id", e.clientID, "err", err)
+ return
+ }
- select {
- case <-ctx.Done():
- return
- case leadershipNotificationChan <- struct{}{}:
- }
- }
+ e.Logger.Info(e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.clientID)
- sub, err := notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
- if err != nil { //nolint:staticcheck
- // TODO(brandur): Propagate this after refactor.
- }
- if sub != nil {
- defer sub.Unlisten(ctx)
- }
+ if notification.Action != "resigned" || notification.Name != e.instanceName {
+ // We only care about resignations on because we use them to preempt the
+ // election attempt backoff. And we only care about our own key name. | typo here:
> We only care about resignations on because |
river | github_2023 | go | 263 | riverqueue | bgentry | @@ -62,206 +89,294 @@ type Elector struct {
// NewElector returns an Elector using the given adapter. The name should correspond
// to the name of the database + schema combo and should be shared across all Clients
// running with that combination. The id should be unique to the Client.
-func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, name, id string, interval, ttlPadding time.Duration, logger *slog.Logger) (*Elector, error) {
- // TODO: validate name + id length/format, interval, etc
+func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, instanceName, clientID string) *Elector {
return baseservice.Init(archetype, &Elector{
- exec: exec,
- id: id,
- interval: interval,
- name: name,
- notifier: notifier,
- logger: logger.WithGroup("elector"),
+ exec: exec,
+ clientID: clientID,
+ electInterval: electInterval,
+ electIntervalJitter: electInteralJitter,
+ instanceName: instanceName,
+ notifier: notifier,
// TTL is at least the relect run interval used by clients to try and
// gain leadership or reelect themselves as leader, plus a little
// padding to account to give the leader a little breathing room in its
// reelection loop.
- ttl: interval + ttlPadding,
- }), nil
+ ttl: electInterval + electIntervalTTLPadding,
+ })
}
-func (e *Elector) Run(ctx context.Context) {
- // Before the elector returns, run a delete with NOTIFY to give up any
- // leadership that we have. If we do that here, we guarantee that any locks we
- // have will be released (even if they were acquired in gainLeadership but we
- // didn't wait for the response)
- //
- // This doesn't use ctx because it runs *after* the ctx is done.
- defer e.giveUpLeadership() //nolint:contextcheck
+func (e *Elector) Start(ctx context.Context) error {
+ ctx, shouldStart, stopped := e.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
// We'll send to this channel anytime a leader resigns on the key with `name`
- leadershipNotificationChan := make(chan struct{})
-
- handleNotification := func(topic notifier.NotificationTopic, payload string) {
- if topic != notifier.NotificationTopicLeadership {
- // This should not happen unless the notifier is broken.
- e.logger.Error("received unexpected notification", "topic", topic, "payload", payload)
- return
- }
- notification := pgNotification{}
- if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
- e.logger.Error("unable to unmarshal leadership notification", "err", err)
- return
- }
+ e.leadershipNotificationChan = make(chan struct{})
+
+ var sub *notifier.Subscription
+ if e.notifier == nil {
+ e.Logger.Info(e.Name+": No notifier configured; starting in poll mode", "client_id", e.clientID)
+ } else {
+ handleNotification := func(topic notifier.NotificationTopic, payload string) {
+ if topic != notifier.NotificationTopicLeadership {
+ // This should not happen unless the notifier is broken.
+ e.Logger.Error(e.Name+": Received unexpected notification", "client_id", e.clientID, "topic", topic, "payload", payload)
+ return
+ }
- if notification.Action != "resigned" || notification.Name != e.name {
- // We only care about resignations on because we use them to preempt the
- // election attempt backoff. And we only care about our own key name.
- return
- }
+ notification := pgNotification{}
+ if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
+ e.Logger.Error(e.Name+": Unable to unmarshal leadership notification", "client_id", e.clientID, "err", err)
+ return
+ }
- select {
- case <-ctx.Done():
- return
- case leadershipNotificationChan <- struct{}{}:
- }
- }
+ e.Logger.Info(e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.clientID)
- sub, err := notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
- if err != nil { //nolint:staticcheck
- // TODO(brandur): Propagate this after refactor.
- }
- if sub != nil {
- defer sub.Unlisten(ctx)
- }
+ if notification.Action != "resigned" || notification.Name != e.instanceName {
+ // We only care about resignations on because we use them to preempt the
+ // election attempt backoff. And we only care about our own key name.
+ return
+ }
- for {
- if success := e.gainLeadership(ctx, leadershipNotificationChan); !success {
select {
case <-ctx.Done():
return
- default:
- // TODO: proper backoff
- e.logger.Error("gainLeadership returned unexpectedly, waiting to try again")
- time.Sleep(time.Second)
- continue
+ case e.leadershipNotificationChan <- struct{}{}:
}
}
- // notify all subscribers that we're the leader
- e.notifySubscribers(true)
+ e.Logger.Info(e.Name+": Listening for leadership changes", "client_id", e.clientID, "topic", notifier.NotificationTopicLeadership)
- err := e.keepLeadership(ctx, leadershipNotificationChan)
- e.notifySubscribers(false)
- if err != nil {
- select {
- case <-ctx.Done():
+ var err error
+ sub, err = notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
+ if err != nil { //nolint:staticcheck
+ // TODO(brandur): Propagate this after refactor.
+ }
+ }
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ e.Logger.InfoContext(ctx, e.Name+": Run loop started")
+ defer e.Logger.InfoContext(ctx, e.Name+": Run loop stopped")
+
+ if sub != nil {
+ defer sub.Unlisten(ctx)
+ }
+
+ for {
+ if err := e.attemptGainLeadershipLoop(ctx); err != nil {
+ // Function above only returns an error if context was cancelled
+ // or overall context is done.
+ if !errors.Is(err, context.Canceled) && ctx.Err() == nil {
+ panic(err)
+ }
return
- default:
- // TODO: backoff
- e.logger.Error("error keeping leadership", "err", err)
- continue
+ }
+
+ e.Logger.Info(e.Name+": Gained leadership", "client_id", e.clientID)
+ e.testSignals.GainedLeadership.Signal(struct{}{})
+
+ err := e.keepLeadershipLoop(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return
+ }
+
+ if !errors.Is(err, errLostLeadership) {
+ e.Logger.Error(e.Name+": Error keeping leadership", "client_id", e.clientID, "err", err)
+ }
}
}
- }
+ }()
+
+ return nil
}
-func (e *Elector) gainLeadership(ctx context.Context, leadershipNotificationChan <-chan struct{}) bool {
+func (e *Elector) attemptGainLeadershipLoop(ctx context.Context) error {
+ var numErrors int
+
for {
- success, err := e.attemptElect(ctx)
- if err != nil && !errors.Is(err, context.Canceled) {
- e.logger.Error("error attempting to elect", "err", err)
+ e.Logger.Info(e.Name+": Attempting to gain leadership", "client_id", e.clientID)
+
+ elected, err := attemptElectOrReelect(ctx, e.exec, false, &riverdriver.LeaderElectParams{
+ LeaderID: e.clientID,
+ Name: e.instanceName,
+ TTL: e.ttl,
+ })
+ if err != nil {
+ if errors.Is(err, context.Canceled) || ctx.Err() != nil {
+ return err
+ }
+
+ numErrors++
+ e.Logger.Error(e.Name+": Error attempting to elect", "client_id", e.clientID, "err", err, "num_errors", numErrors)
+ e.CancellableSleepExponentialBackoff(ctx, numErrors-1, baseservice.MaxAttemptsBeforeResetDefault)
+ continue
}
- if success {
- return true
+ if elected {
+ return nil
}
+ numErrors = 0
+
+ e.Logger.Info(e.Name+": Leadership bid was unsuccessful (not an error)", "client_id", e.clientID) | `Info` level might be a bit noisy for this running every 5 seconds on every worker and it being a completely benign/expected scenario, maybe `Debug`?
```suggestion
e.Logger.Debug(e.Name+": Leadership bid was unsuccessful (not an error)", "client_id", e.clientID)
``` |
river | github_2023 | go | 263 | riverqueue | bgentry | @@ -62,206 +89,294 @@ type Elector struct {
// NewElector returns an Elector using the given adapter. The name should correspond
// to the name of the database + schema combo and should be shared across all Clients
// running with that combination. The id should be unique to the Client.
-func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, name, id string, interval, ttlPadding time.Duration, logger *slog.Logger) (*Elector, error) {
- // TODO: validate name + id length/format, interval, etc
+func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, instanceName, clientID string) *Elector {
return baseservice.Init(archetype, &Elector{
- exec: exec,
- id: id,
- interval: interval,
- name: name,
- notifier: notifier,
- logger: logger.WithGroup("elector"),
+ exec: exec,
+ clientID: clientID,
+ electInterval: electInterval,
+ electIntervalJitter: electInteralJitter,
+ instanceName: instanceName,
+ notifier: notifier,
// TTL is at least the relect run interval used by clients to try and
// gain leadership or reelect themselves as leader, plus a little
// padding to account to give the leader a little breathing room in its
// reelection loop.
- ttl: interval + ttlPadding,
- }), nil
+ ttl: electInterval + electIntervalTTLPadding,
+ })
}
-func (e *Elector) Run(ctx context.Context) {
- // Before the elector returns, run a delete with NOTIFY to give up any
- // leadership that we have. If we do that here, we guarantee that any locks we
- // have will be released (even if they were acquired in gainLeadership but we
- // didn't wait for the response)
- //
- // This doesn't use ctx because it runs *after* the ctx is done.
- defer e.giveUpLeadership() //nolint:contextcheck
+func (e *Elector) Start(ctx context.Context) error {
+ ctx, shouldStart, stopped := e.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
// We'll send to this channel anytime a leader resigns on the key with `name`
- leadershipNotificationChan := make(chan struct{})
-
- handleNotification := func(topic notifier.NotificationTopic, payload string) {
- if topic != notifier.NotificationTopicLeadership {
- // This should not happen unless the notifier is broken.
- e.logger.Error("received unexpected notification", "topic", topic, "payload", payload)
- return
- }
- notification := pgNotification{}
- if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
- e.logger.Error("unable to unmarshal leadership notification", "err", err)
- return
- }
+ e.leadershipNotificationChan = make(chan struct{})
+
+ var sub *notifier.Subscription
+ if e.notifier == nil {
+ e.Logger.Info(e.Name+": No notifier configured; starting in poll mode", "client_id", e.clientID)
+ } else {
+ handleNotification := func(topic notifier.NotificationTopic, payload string) {
+ if topic != notifier.NotificationTopicLeadership {
+ // This should not happen unless the notifier is broken.
+ e.Logger.Error(e.Name+": Received unexpected notification", "client_id", e.clientID, "topic", topic, "payload", payload)
+ return
+ }
- if notification.Action != "resigned" || notification.Name != e.name {
- // We only care about resignations on because we use them to preempt the
- // election attempt backoff. And we only care about our own key name.
- return
- }
+ notification := pgNotification{}
+ if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
+ e.Logger.Error(e.Name+": Unable to unmarshal leadership notification", "client_id", e.clientID, "err", err)
+ return
+ }
- select {
- case <-ctx.Done():
- return
- case leadershipNotificationChan <- struct{}{}:
- }
- }
+ e.Logger.Info(e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.clientID)
- sub, err := notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
- if err != nil { //nolint:staticcheck
- // TODO(brandur): Propagate this after refactor.
- }
- if sub != nil {
- defer sub.Unlisten(ctx)
- }
+ if notification.Action != "resigned" || notification.Name != e.instanceName {
+ // We only care about resignations on because we use them to preempt the
+ // election attempt backoff. And we only care about our own key name.
+ return
+ }
- for {
- if success := e.gainLeadership(ctx, leadershipNotificationChan); !success {
select {
case <-ctx.Done():
return
- default:
- // TODO: proper backoff
- e.logger.Error("gainLeadership returned unexpectedly, waiting to try again")
- time.Sleep(time.Second)
- continue
+ case e.leadershipNotificationChan <- struct{}{}:
}
}
- // notify all subscribers that we're the leader
- e.notifySubscribers(true)
+ e.Logger.Info(e.Name+": Listening for leadership changes", "client_id", e.clientID, "topic", notifier.NotificationTopicLeadership)
- err := e.keepLeadership(ctx, leadershipNotificationChan)
- e.notifySubscribers(false)
- if err != nil {
- select {
- case <-ctx.Done():
+ var err error
+ sub, err = notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
+ if err != nil { //nolint:staticcheck
+ // TODO(brandur): Propagate this after refactor.
+ }
+ }
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ e.Logger.InfoContext(ctx, e.Name+": Run loop started")
+ defer e.Logger.InfoContext(ctx, e.Name+": Run loop stopped")
+
+ if sub != nil {
+ defer sub.Unlisten(ctx)
+ }
+
+ for {
+ if err := e.attemptGainLeadershipLoop(ctx); err != nil {
+ // Function above only returns an error if context was cancelled
+ // or overall context is done.
+ if !errors.Is(err, context.Canceled) && ctx.Err() == nil {
+ panic(err)
+ }
return
- default:
- // TODO: backoff
- e.logger.Error("error keeping leadership", "err", err)
- continue
+ }
+
+ e.Logger.Info(e.Name+": Gained leadership", "client_id", e.clientID)
+ e.testSignals.GainedLeadership.Signal(struct{}{})
+
+ err := e.keepLeadershipLoop(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return
+ }
+
+ if !errors.Is(err, errLostLeadership) {
+ e.Logger.Error(e.Name+": Error keeping leadership", "client_id", e.clientID, "err", err)
+ }
}
}
- }
+ }()
+
+ return nil
}
-func (e *Elector) gainLeadership(ctx context.Context, leadershipNotificationChan <-chan struct{}) bool {
+func (e *Elector) attemptGainLeadershipLoop(ctx context.Context) error {
+ var numErrors int
+
for {
- success, err := e.attemptElect(ctx)
- if err != nil && !errors.Is(err, context.Canceled) {
- e.logger.Error("error attempting to elect", "err", err)
+ e.Logger.Info(e.Name+": Attempting to gain leadership", "client_id", e.clientID)
+
+ elected, err := attemptElectOrReelect(ctx, e.exec, false, &riverdriver.LeaderElectParams{
+ LeaderID: e.clientID,
+ Name: e.instanceName,
+ TTL: e.ttl,
+ })
+ if err != nil {
+ if errors.Is(err, context.Canceled) || ctx.Err() != nil {
+ return err
+ }
+
+ numErrors++
+ e.Logger.Error(e.Name+": Error attempting to elect", "client_id", e.clientID, "err", err, "num_errors", numErrors)
+ e.CancellableSleepExponentialBackoff(ctx, numErrors-1, baseservice.MaxAttemptsBeforeResetDefault)
+ continue
}
- if success {
- return true
+ if elected {
+ return nil
}
+ numErrors = 0
+
+ e.Logger.Info(e.Name+": Leadership bid was unsuccessful (not an error)", "client_id", e.clientID)
+ e.testSignals.DeniedLeadership.Signal(struct{}{})
+
select {
- case <-ctx.Done():
- return false
- case <-time.After(e.interval):
- // TODO: This could potentially leak memory / timers if we're seeing a ton
- // of resignations. May want to make this reusable & cancel it when retrying?
- case <-leadershipNotificationChan:
- // Somebody just resigned, try to win the next election immediately.
+ case <-e.CancellableSleepRandomBetweenC(ctx, e.electInterval, e.electInterval+e.electIntervalJitter): | there's a potential leaking of timers here, but only if resignations are happening frequently |
river | github_2023 | go | 263 | riverqueue | bgentry | @@ -62,206 +89,294 @@ type Elector struct {
// NewElector returns an Elector using the given adapter. The name should correspond
// to the name of the database + schema combo and should be shared across all Clients
// running with that combination. The id should be unique to the Client.
-func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, name, id string, interval, ttlPadding time.Duration, logger *slog.Logger) (*Elector, error) {
- // TODO: validate name + id length/format, interval, etc
+func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, notifier *notifier.Notifier, instanceName, clientID string) *Elector {
return baseservice.Init(archetype, &Elector{
- exec: exec,
- id: id,
- interval: interval,
- name: name,
- notifier: notifier,
- logger: logger.WithGroup("elector"),
+ exec: exec,
+ clientID: clientID,
+ electInterval: electInterval,
+ electIntervalJitter: electInteralJitter,
+ instanceName: instanceName,
+ notifier: notifier,
// TTL is at least the relect run interval used by clients to try and
// gain leadership or reelect themselves as leader, plus a little
// padding to account to give the leader a little breathing room in its
// reelection loop.
- ttl: interval + ttlPadding,
- }), nil
+ ttl: electInterval + electIntervalTTLPadding,
+ })
}
-func (e *Elector) Run(ctx context.Context) {
- // Before the elector returns, run a delete with NOTIFY to give up any
- // leadership that we have. If we do that here, we guarantee that any locks we
- // have will be released (even if they were acquired in gainLeadership but we
- // didn't wait for the response)
- //
- // This doesn't use ctx because it runs *after* the ctx is done.
- defer e.giveUpLeadership() //nolint:contextcheck
+func (e *Elector) Start(ctx context.Context) error {
+ ctx, shouldStart, stopped := e.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
// We'll send to this channel anytime a leader resigns on the key with `name`
- leadershipNotificationChan := make(chan struct{})
-
- handleNotification := func(topic notifier.NotificationTopic, payload string) {
- if topic != notifier.NotificationTopicLeadership {
- // This should not happen unless the notifier is broken.
- e.logger.Error("received unexpected notification", "topic", topic, "payload", payload)
- return
- }
- notification := pgNotification{}
- if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
- e.logger.Error("unable to unmarshal leadership notification", "err", err)
- return
- }
+ e.leadershipNotificationChan = make(chan struct{})
+
+ var sub *notifier.Subscription
+ if e.notifier == nil {
+ e.Logger.Info(e.Name+": No notifier configured; starting in poll mode", "client_id", e.clientID)
+ } else {
+ handleNotification := func(topic notifier.NotificationTopic, payload string) {
+ if topic != notifier.NotificationTopicLeadership {
+ // This should not happen unless the notifier is broken.
+ e.Logger.Error(e.Name+": Received unexpected notification", "client_id", e.clientID, "topic", topic, "payload", payload)
+ return
+ }
- if notification.Action != "resigned" || notification.Name != e.name {
- // We only care about resignations on because we use them to preempt the
- // election attempt backoff. And we only care about our own key name.
- return
- }
+ notification := pgNotification{}
+ if err := json.Unmarshal([]byte(payload), ¬ification); err != nil {
+ e.Logger.Error(e.Name+": Unable to unmarshal leadership notification", "client_id", e.clientID, "err", err)
+ return
+ }
- select {
- case <-ctx.Done():
- return
- case leadershipNotificationChan <- struct{}{}:
- }
- }
+ e.Logger.Info(e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.clientID)
- sub, err := notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
- if err != nil { //nolint:staticcheck
- // TODO(brandur): Propagate this after refactor.
- }
- if sub != nil {
- defer sub.Unlisten(ctx)
- }
+ if notification.Action != "resigned" || notification.Name != e.instanceName {
+ // We only care about resignations on because we use them to preempt the
+ // election attempt backoff. And we only care about our own key name.
+ return
+ }
- for {
- if success := e.gainLeadership(ctx, leadershipNotificationChan); !success {
select {
case <-ctx.Done():
return
- default:
- // TODO: proper backoff
- e.logger.Error("gainLeadership returned unexpectedly, waiting to try again")
- time.Sleep(time.Second)
- continue
+ case e.leadershipNotificationChan <- struct{}{}:
}
}
- // notify all subscribers that we're the leader
- e.notifySubscribers(true)
+ e.Logger.Info(e.Name+": Listening for leadership changes", "client_id", e.clientID, "topic", notifier.NotificationTopicLeadership)
- err := e.keepLeadership(ctx, leadershipNotificationChan)
- e.notifySubscribers(false)
- if err != nil {
- select {
- case <-ctx.Done():
+ var err error
+ sub, err = notifier.ListenRetryLoop(ctx, &e.BaseService, e.notifier, notifier.NotificationTopicLeadership, handleNotification)
+ if err != nil { //nolint:staticcheck
+ // TODO(brandur): Propagate this after refactor.
+ }
+ }
+
+ go func() {
+ // This defer should come first so that it's last out, thereby avoiding
+ // races.
+ defer close(stopped)
+
+ e.Logger.InfoContext(ctx, e.Name+": Run loop started")
+ defer e.Logger.InfoContext(ctx, e.Name+": Run loop stopped")
+
+ if sub != nil {
+ defer sub.Unlisten(ctx)
+ }
+
+ for {
+ if err := e.attemptGainLeadershipLoop(ctx); err != nil {
+ // Function above only returns an error if context was cancelled
+ // or overall context is done.
+ if !errors.Is(err, context.Canceled) && ctx.Err() == nil {
+ panic(err)
+ }
return
- default:
- // TODO: backoff
- e.logger.Error("error keeping leadership", "err", err)
- continue
+ }
+
+ e.Logger.Info(e.Name+": Gained leadership", "client_id", e.clientID)
+ e.testSignals.GainedLeadership.Signal(struct{}{})
+
+ err := e.keepLeadershipLoop(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return
+ }
+
+ if !errors.Is(err, errLostLeadership) {
+ e.Logger.Error(e.Name+": Error keeping leadership", "client_id", e.clientID, "err", err) | Is this inverted? The message seems to align with when the node _lost_ its leadership |
river | github_2023 | go | 263 | riverqueue | bgentry | @@ -442,6 +442,8 @@ func (n *Notifier) Listen(ctx context.Context, topic NotificationTopic, notifyFu
}
n.subscriptions[topic] = append(existingSubs, sub)
+ n.Logger.InfoContext(ctx, n.Name+": Added subscription", "new_num_subscriptions", len(n.subscriptions[topic]), "topic", topic) | This one also feels like it should maybe be a `Debug` level given it's indicative of healthy operation and pretty low level, thoughts? |
river | github_2023 | go | 254 | riverqueue | bgentry | @@ -0,0 +1,424 @@
+package riverbench
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os"
+ "os/signal"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/riverqueue/river"
+ "github.com/riverqueue/river/riverdriver"
+)
+
+type Benchmarker[TTx any] struct {
+ driver riverdriver.Driver[TTx]
+ duration time.Duration
+ logger *slog.Logger
+ name string
+ numTotalJobs int
+}
+
+func NewBenchmarker[TTx any](driver riverdriver.Driver[TTx], logger *slog.Logger, duration time.Duration, numTotalJobs int) *Benchmarker[TTx] {
+ return &Benchmarker[TTx]{
+ driver: driver,
+ duration: duration,
+ logger: logger,
+ name: "Benchmarker",
+ numTotalJobs: numTotalJobs,
+ }
+}
+
+// Run starts the benchmarking loop. Stops upon receiving SIGINT/SIGTERM, or
+// when reaching maximum configured run duration.
+func (b *Benchmarker[TTx]) Run(ctx context.Context) error {
+ var (
+ numJobsInserted atomic.Int64
+ numJobsLeft atomic.Int64
+ numJobsWorked atomic.Int64
+ shutdown = make(chan struct{})
+ shutdownClosed bool
+ )
+
+ // Prevents double-close on shutdown channel.
+ closeShutdown := func() {
+ if !shutdownClosed {
+ b.logger.InfoContext(ctx, "Closing shutdown channel")
+ close(shutdown)
+ }
+ shutdownClosed = true
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Installing signals allows us to try and stop the client cleanly, and also
+ // to produce a final summary log line for th whole bench run (by default,
+ // Go will terminate programs abruptly and not even defers will run).
+ go func() {
+ signalChan := make(chan os.Signal, 1)
+ signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
+
+ select {
+ case <-ctx.Done():
+ case <-signalChan:
+ closeShutdown()
+
+ // Wait again since the client may take an absurd amount of time to
+ // shut down. If we receive another signal in the intervening
+ // period, cancel context, thereby forcing a hard shut down.
+ select {
+ case <-ctx.Done():
+ case <-signalChan:
+ fmt.Printf("second signal received; canceling context\n")
+ cancel()
+ }
+ }
+ }()
+
+ if err := b.resetJobsTable(ctx); err != nil {
+ return err
+ }
+
+ workers := river.NewWorkers()
+ river.AddWorker(workers, &BenchmarkWorker{})
+
+ client, err := river.NewClient(b.driver, &river.Config{
+ Logger: slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelWarn})),
+ Queues: map[string]river.QueueConfig{
+ river.QueueDefault: {MaxWorkers: river.QueueNumWorkersMax}, | aaaaWe'd probably want to make this `MaxWorkers` param an argument to the bench command. Setting it to 10k is probably not realistic, and it would probably be useful to play around with different input values to find the sweet spot on throughput.
The fetch cooldown is another attr that's probably worth setting up as an input variable to this. A queue that's optimized for throughput is going to have a much shorter & more aggressive fetch cooldown than most users would typically want (because it would have the tradeoff of constantly hitting the DB to fetch more jobs).
I would suggest setting these each to a specific default that doesn't necessarily map to River's own defaults, because those defaults are likely going to be what's better for a large number of smaller users than the small percentage of users chasing max throughput. But if your purpose is actually to gauge throughput, you'll want some different values. |
river | github_2023 | others | 254 | riverqueue | bgentry | @@ -22,9 +23,10 @@ require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
- github.com/riverqueue/river/riverdriver v0.0.17 // indirect
+ github.com/oklog/ulid/v2 v2.1.0 // indirect | seems like this is outdated diff? |
river | github_2023 | go | 253 | riverqueue | bgentry | @@ -114,8 +114,18 @@ func (e *Elector) Run(ctx context.Context) {
}
}
- subscription := e.notifier.Listen(notifier.NotificationTopicLeadership, handleNotification)
- defer subscription.Unlisten()
+ sub, err := e.notifier.Listen(ctx, notifier.NotificationTopicLeadership, handleNotification)
+ if err != nil {
+ if !errors.Is(err, context.Canceled) {
+ // TODO(brandur): We shouldn't be silently swallowing major start up
+ // problems like this. Unfortunately, Run doesn't return an error, so
+ // fixing this will be a big refactor, so don't do it right now.
+ e.logger.Error(e.name+": Error listening for leadership changes", "err", err, "topic", notifier.NotificationTopicJobControl) | I think the challenge is that it's probably desirable to have the client fail to start if any of its critical subprocesses fail to start correctly at initial boot, however if those subprocesses fail at any other point they should enter into a graceful exponential backoff loop to try and get healthy again.
This is the reason to propagate more of the client's internal status in some fashion (ala the client monitor concept, inspired by LaunchDarkly's client) so that the user can decide for themselves when the failures have gone on for too long and warrant a shutdown or new client. |
river | github_2023 | go | 253 | riverqueue | bgentry | @@ -24,189 +28,327 @@ const (
type NotifyFunc func(topic NotificationTopic, payload string)
type Subscription struct {
- creationTime time.Time
- topic NotificationTopic
notifyFunc NotifyFunc
-
- unlistenOnce *sync.Once
notifier *Notifier
+ topic NotificationTopic
+ unlistenOnce sync.Once
}
-func (s *Subscription) Unlisten() {
+func (s *Subscription) Unlisten(ctx context.Context) {
s.unlistenOnce.Do(func() {
- s.notifier.unlisten(s)
+ // Unlisten uses background context in case of cancellation.
+ if err := s.notifier.unlisten(context.Background(), s); err != nil { //nolint:contextcheck
+ s.notifier.Logger.ErrorContext(ctx, s.notifier.Name+": Error unlistening on topic", "err", err, "topic", s.topic)
+ }
})
}
-type subscriptionChange struct {
- isNewTopic bool
- topic NotificationTopic
+// Test-only properties.
+type notifierTestSignals struct {
+ BackoffError rivercommon.TestSignal[error] // non-cancellation error received by main run loop
+ ListeningBegin rivercommon.TestSignal[struct{}] // notifier has entered a listen loop
+ ListeningEnd rivercommon.TestSignal[struct{}] // notifier has left a listen loop
+}
+
+func (ts *notifierTestSignals) Init() {
+ ts.BackoffError.Init()
+ ts.ListeningBegin.Init()
+ ts.ListeningEnd.Init()
}
type Notifier struct {
baseservice.BaseService
-
- listener riverdriver.Listener
- notificationBuf chan *riverdriver.Notification
- statusChangeFunc func(componentstatus.Status)
- logger *slog.Logger
-
- mu sync.Mutex
- isConnActive bool
- subs map[NotificationTopic][]*Subscription
- subChangeCh chan *subscriptionChange
+ startstop.BaseStartStop
+
+ listener riverdriver.Listener
+ notificationBuf chan *riverdriver.Notification
+ statusChangeFunc func(componentstatus.Status)
+ testSignals notifierTestSignals
+ waitInterruptChan chan func()
+
+ mu sync.RWMutex
+ isConnected bool
+ isStarted bool
+ isWaiting bool
+ subscriptions map[NotificationTopic][]*Subscription
+ waitCancel context.CancelFunc
}
-func New(archetype *baseservice.Archetype, listener riverdriver.Listener, statusChangeFunc func(componentstatus.Status), logger *slog.Logger) *Notifier {
+func New(archetype *baseservice.Archetype, listener riverdriver.Listener, statusChangeFunc func(componentstatus.Status)) *Notifier {
notifier := baseservice.Init(archetype, &Notifier{
- listener: listener,
- notificationBuf: make(chan *riverdriver.Notification, 1000),
- statusChangeFunc: statusChangeFunc,
- logger: logger.WithGroup("notifier"),
+ listener: listener,
+ notificationBuf: make(chan *riverdriver.Notification, 1000),
+ statusChangeFunc: statusChangeFunc,
+ waitInterruptChan: make(chan func(), 10),
- subs: make(map[NotificationTopic][]*Subscription),
- subChangeCh: make(chan *subscriptionChange, 1000),
+ subscriptions: make(map[NotificationTopic][]*Subscription),
})
return notifier
}
-func (n *Notifier) Run(ctx context.Context) {
- n.statusChangeFunc(componentstatus.Initializing)
- var wg sync.WaitGroup
+func (n *Notifier) Start(ctx context.Context) error {
+ ctx, shouldStart, stopped := n.StartInit(ctx)
+ if !shouldStart {
+ return nil
+ }
+
+ // The loop below will connect/close on every iteration, but do one initial
+ // connect so the notifier fails fast in case of an obvious problem.
+ if err := n.listenerConnect(ctx, false); err != nil {
+ close(stopped)
+ if errors.Is(err, context.Canceled) {
+ return nil
+ }
+ return err
+ }
- wg.Add(1)
go func() {
- defer wg.Done()
- n.deliverNotifications(ctx)
- }()
+ defer close(stopped)
- for {
- n.getConnAndRun(ctx)
+ n.Logger.InfoContext(ctx, n.Name+": Notifier started")
+ defer n.Logger.InfoContext(ctx, n.Name+": Notifier stopped")
- select {
- case <-ctx.Done():
- wg.Wait()
- n.logger.Info(n.Name + ": Notifier stopped")
- n.statusChangeFunc(componentstatus.Stopped)
- return
- default:
- // TODO: exponential backoff
+ n.withLock(func() { n.isStarted = true })
+ defer n.withLock(func() { n.isStarted = false })
+
+ defer n.listenerClose(ctx, false)
+
+ n.statusChangeFunc(componentstatus.Initializing)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ n.deliverNotifications(ctx)
+ }()
+
+ for attempt := 0; ; attempt++ { | doesn't `attempt` need to get reset at some point once a connection is successfully established? Otherwise it will continually increment every time there's a conn failure or reconnect, meaning you could have a very long backoff retry if the program has been running for a long time even if there hasn't been an issue for awhile. |
river | github_2023 | go | 253 | riverqueue | bgentry | @@ -24,189 +28,327 @@ const (
type NotifyFunc func(topic NotificationTopic, payload string)
type Subscription struct {
- creationTime time.Time
- topic NotificationTopic
notifyFunc NotifyFunc
-
- unlistenOnce *sync.Once
notifier *Notifier
+ topic NotificationTopic
+ unlistenOnce sync.Once
}
-func (s *Subscription) Unlisten() {
+func (s *Subscription) Unlisten(ctx context.Context) {
s.unlistenOnce.Do(func() {
- s.notifier.unlisten(s)
+ // Unlisten uses background context in case of cancellation.
+ if err := s.notifier.unlisten(context.Background(), s); err != nil { //nolint:contextcheck | Is it safe to use a background context with no timeout here? I'm guessing not, because this underlying code:
https://github.com/riverqueue/river/blob/35ee45ee57e15b91eb1f722c4bf6ff69c003779d/riverdriver/riverpgxv5/river_pgx_v5_driver.go#L524-L530
Could hang forever if the context never times out and there isn't some other mechanism to force it to give up.
I get that you don't want to inherit from `ctx` because it may/will already be cancelled at the time this runs, but it probably needs at least _some_ timeout to make sure it doesn't hang forever. |
river | github_2023 | go | 253 | riverqueue | bgentry | @@ -220,122 +362,175 @@ func (n *Notifier) runOnce(ctx context.Context) error {
// * Wait for notifications
// * Ping conn if 5 seconds have elapsed between notifications to keep it alive
- // * Manage listens/unlistens on conn
+ // * Manage listens/unlistens on conn (waitInterruptChan)
// * If any errors are encountered, return them so we can kill the conn and start over
select {
case <-ctx.Done():
- return <-errCh
+ return <-errChan
+
case <-needPingCtx.Done():
- if err := drainErrCh(); err != nil {
+ if err := drainErrChan(); err != nil {
return err
}
// Ping the conn to see if it's still alive
if err := n.listener.Ping(ctx); err != nil {
return err
}
- case err := <-errCh:
+
+ case err := <-errChan:
if errors.Is(err, context.Canceled) {
return nil
}
if err != nil {
- n.logger.Error("error from notification wait", "err", err)
+ n.Logger.ErrorContext(ctx, n.Name+": Error from notification wait", "err", err)
return err
}
- case subChange := <-n.subChangeCh:
- if err := drainErrCh(); err != nil {
- return err
- }
- // Apply the subscription change
- if subChange.isNewTopic {
- return n.listener.Listen(ctx, string(subChange.topic))
- } else {
- return n.listener.Unlisten(ctx, string(subChange.topic))
- }
}
+
return nil
}
-func (n *Notifier) setConnActive() []NotificationTopic {
- n.mu.Lock()
- defer n.mu.Unlock()
- n.isConnActive = true
-
- topics := make([]NotificationTopic, 0, len(n.subs))
- for topic := range n.subs {
- topics = append(topics, topic)
+// Sends an interrupt operation to the main loop, waits on the result, and
+// returns an error if there was one.
+//
+// MUST be called with the `n.mu` mutex already locked.
+func (n *Notifier) sendInterruptAndReceiveResult(operation func() error) error {
+ errChan := make(chan error)
+ n.waitInterruptChan <- func() {
+ errChan <- operation()
}
- return topics
-}
-func (n *Notifier) setConnInactive() {
- n.mu.Lock()
- defer n.mu.Unlock()
- n.isConnActive = false
+ n.waitCancel()
- // drain any pending changes from subChangeCh, because they'll be reflected
- // automatically when the conn restarts.
- for {
- select {
- case <-n.subChangeCh:
- default:
- return
- }
+ // Notably, these unlock then lock again, the reverse of what you'd normally
+ // expect in a mutex pattern. This is because this function is only expected
+ // to be called with the mutex already locked, but we need to unlock it to
+ // give the main loop a chance to run interrupt operations.
+ n.mu.Unlock()
+ defer n.mu.Lock()
+
+ select {
+ case err := <-errChan:
+ return err
+ case <-time.After(5 * time.Second):
+ return errors.New("timed out waiting for interrupt operation")
}
}
-func (n *Notifier) Listen(topic NotificationTopic, notifyFunc NotifyFunc) *Subscription {
+func (n *Notifier) Listen(ctx context.Context, topic NotificationTopic, notifyFunc NotifyFunc) (*Subscription, error) {
n.mu.Lock()
defer n.mu.Unlock()
sub := &Subscription{
- creationTime: time.Now(),
- topic: topic,
- notifyFunc: notifyFunc,
- unlistenOnce: &sync.Once{},
- notifier: n,
+ notifyFunc: notifyFunc,
+ topic: topic,
+ notifier: n,
}
- isNewTopic := false
-
- subs := n.subs[topic]
- if subs == nil {
- isNewTopic = true
- subs = make([]*Subscription, 0, 10)
+ existingSubs, existingTopic := n.subscriptions[topic]
+ if !existingTopic {
+ existingSubs = make([]*Subscription, 0, 10)
}
- subs = append(subs, sub)
- n.subs[topic] = subs
-
- if isNewTopic && n.isConnActive {
- // send to chan
- n.subChangeCh <- &subscriptionChange{
- topic: topic,
- isNewTopic: isNewTopic,
+ n.subscriptions[topic] = append(existingSubs, sub)
+
+ // We add the new subscription to the subscription list optimistically, and
+ // it needs to be done this way in case of a restart after an interrupt
+ // below has been run, but after a return to this function (say we were to
+ // add the new sub at the end of this function, it would not be picked
+ // during the restart). But in case of an error subscribing, remove the sub.
+ //
+ // By the time this function is run (i.e. after an interrupt), a lock on
+ // `n.mu` has been reacquired, and modifying subscription state is safe.
+ removeSub := func() {
+ n.subscriptions[sub.topic] = slices.DeleteFunc(n.subscriptions[sub.topic], func(s *Subscription) bool {
+ return s == sub
+ })
+
+ if len(n.subscriptions[sub.topic]) <= 1 {
+ delete(n.subscriptions, sub.topic)
}
}
- return sub
-}
+ if !existingTopic {
+ // If already waiting, send an interrupt to the wait function to run a
+ // listen operation. If not, connect and listen directly, returning any
+ // errors as feedback to the caller.
+ if n.isWaiting {
+ if err := n.sendInterruptAndReceiveResult(func() error { return n.listenerListen(ctx, topic) }); err != nil {
+ removeSub()
+ return nil, err
+ }
+ } else {
+ var justConnected bool
+
+ if !n.isConnected {
+ if err := n.listenerConnect(ctx, true); err != nil {
+ removeSub() | I don't think this behavior aligns with how the users of the notifier are designed today. I believe they all expect that once they call `Listen(topic)`, the notifier will do its best to make sure that `topic` is listened to until the subscriber calls `Unlisten` on the subscription. This is the case regardless of what errors/reconnects/etc happen on the notifier in the interim.
What's here seems fundamentally different. The listen attempt can _fail inline_, in which case that topic will not be listened to at all unless the caller tries to `Listen()` again. Because those subscribers are just swallowing the errors as of now (due to them being designed to consume the previous API which did not surface such errors), they will just carry on operating but without a functional listener. That doesn't seem like a desirable outcome. |
river | github_2023 | go | 246 | riverqueue | bgentry | @@ -475,12 +475,14 @@ func (l *Listener) Close(ctx context.Context) error {
return nil
}
- if err := l.conn.Conn().Close(ctx); err != nil {
- return err
- } | Man, I noticed [this change](https://github.com/riverqueue/river/pull/212/files#diff-445bd06e41eab3a8a89576b7cda31b0e834bbefa4fa969af11c1ce17766b1cceL149) when I reviewed the mega-PR, should have said something 😞 |
river | github_2023 | go | 250 | riverqueue | bgentry | @@ -475,14 +475,10 @@ func (l *Listener) Close(ctx context.Context) error {
return nil
}
- err := l.conn.Conn().Close(ctx)
-
- // Regardless of the error state returned above, always release and unset
- // the listener's local connection. | I'm not actually sure this is a good idea and I don't think we want to return the conn to the pool. If there are `LISTEN` subscriptions active which weren't properly `UNLISTEN`'d, the conn may still receive additional notifications that could interfere with other operations on it.
For pgbouncer compatibility reasons, we will probably also end up going a direction where the listener conn is created from a standalone config or at least a separate pool from the main river db pool.
Or maybe you can elaborate more on why you think this is a necessary change? |
river | github_2023 | go | 250 | riverqueue | bgentry | @@ -34,10 +39,144 @@ func TestNew(t *testing.T) {
})
}
+func TestListener_Close(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ t.Run("NoOpWithoutConn", func(t *testing.T) {
+ t.Parallel()
+
+ listener := &Listener{dbPool: testPool(ctx, t, nil)}
+ require.Nil(t, listener.conn)
+ require.NoError(t, listener.Close(ctx))
+ })
+
+ t.Run("ReleasesAndUnsetsConn", func(t *testing.T) {
+ t.Parallel()
+
+ config := testPoolConfig()
+
+ releaseInvoked := make(chan struct{})
+
+ // pgx calls AfterRelease in a goroutine, which is why we communicate with a channel.
+ config.AfterRelease = func(c *pgx.Conn) bool {
+ close(releaseInvoked)
+ return true
+ }
+
+ listener := &Listener{dbPool: testPool(ctx, t, config)}
+
+ require.NoError(t, listener.Connect(ctx))
+ require.NotNil(t, listener.conn)
+
+ require.NoError(t, listener.Close(ctx))
+
+ // Connection has been released.
+ select {
+ case <-releaseInvoked:
+ case <-time.After(3 * time.Second):
+ require.FailNow(t, "Timed out waiting for connection to be released")
+ }
+
+ // Despite error, internal connection still unset.
+ require.Nil(t, listener.conn)
+ })
+
+ t.Run("UnsetsConnEvenOnError", func(t *testing.T) {
+ t.Parallel()
+
+ var connStub *connStub
+
+ config := testPoolConfig()
+ config.ConnConfig.DialFunc = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ conn, err := (&net.Dialer{KeepAlive: 5 * time.Minute}).Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ connStub = newConnStub(conn)
+ return connStub, nil
+ }
+
+ listener := &Listener{dbPool: testPool(ctx, t, config)}
+
+ require.NoError(t, listener.Connect(ctx))
+ require.NotNil(t, listener.conn)
+
+ conn := listener.conn.Conn()
+
+ expectedErr := errors.New("conn close error")
+ connStub.closeFunc = func() error {
+ t.Logf("Close invoked; returning error")
+ return expectedErr
+ }
+
+ // Error isn't passed through because we invoke pgxpool.Release.
+ require.NoError(t, listener.Close(ctx))
+
+ // Despite error, internal connection still unset.
+ require.Nil(t, listener.conn)
+
+ // Make sure that our stubbing above worked by closing the connection we
+ // retained and verifying it's the error we set.
+ require.ErrorIs(t, conn.Close(ctx), expectedErr)
+ })
+}
+
func TestInterpretError(t *testing.T) {
t.Parallel()
require.EqualError(t, interpretError(errors.New("an error")), "an error")
require.ErrorIs(t, interpretError(pgx.ErrNoRows), rivertype.ErrNotFound)
require.NoError(t, interpretError(nil))
}
+
+// connStub implements net.Conn and allows us to stub particular functions like
+// Close that are otherwise nigh impossible to test.
+type connStub struct {
+ net.Conn
+
+ closeFunc func() error
+}
+
+func newConnStub(conn net.Conn) *connStub {
+ return &connStub{
+ Conn: conn,
+
+ closeFunc: conn.Close,
+ }
+}
+
+func (c *connStub) Close() error {
+ return c.closeFunc()
+}
+
+func testPoolConfig() *pgxpool.Config { | not more packages 😩 |
river | github_2023 | others | 229 | riverqueue | bgentry | @@ -22,13 +22,6 @@ sql:
rename:
river_job_state: "JobState"
- river_job_state_available: "JobStateAvailable"
- river_job_state_cancelled: "JobStateCancelled"
- river_job_state_completed: "JobStateCompleted"
- river_job_state_discarded: "JobStateDiscarded"
- river_job_state_retryable: "JobStateRetryable"
- river_job_state_running: "JobStateRunning"
- river_job_state_scheduled: "JobStateScheduled" | one less thing to have to fix when I get that pending state added soon. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -117,7 +133,178 @@ type ExecutorTx interface {
Rollback(ctx context.Context) error
}
+// Listner listens for notifications. In Postgres, this is a database connection | ```suggestion
// Listener listens for notifications. In Postgres, this is a database connection
``` |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -332,7 +331,7 @@ var (
// ErrNotFound is returned when a query by ID does not match any existing
// rows. For example, attempting to cancel a job that doesn't exist will
// return this error.
- ErrNotFound = errors.New("not found")
+ ErrNotFound = rivertype.ErrNotFound | Got thrown off at first trying to figure out how you were still returning the top level `ErrNotFound` but now I see it. I think this makes sense—if it's the same actual error value across packages then it will always be considered equal and `errors.Is()` checks will succeed, right?
Only downside is the extra layer of indirection figuring out what the aliased value actually is, but I don't think that matters too much in the case of an error like this. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -1052,44 +1040,28 @@ func (c *Client[TTx]) JobCancel(ctx context.Context, jobID int64) (*rivertype.Jo
// Returns the up-to-date JobRow for the specified jobID if it exists. Returns
// ErrNotFound if the job doesn't exist.
func (c *Client[TTx]) JobCancelTx(ctx context.Context, tx TTx, jobID int64) (*rivertype.JobRow, error) {
- job, err := c.adapter.JobCancelTx(ctx, c.driver.UnwrapTx(tx), jobID)
- if errors.Is(err, riverdriver.ErrNoRows) {
- return nil, ErrNotFound
- }
- if err != nil {
- return nil, err
- }
+ return c.jobCancel(ctx, c.driver.UnwrapExecutor(tx), jobID) | I love that these are refactored to share all their logic, thank you! |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -38,10 +44,13 @@ func New(dbPool *pgxpool.Pool) *Driver {
return &Driver{dbPool: dbPool, queries: dbsqlc.New()}
}
-func (d *Driver) GetDBPool() *pgxpool.Pool { return d.dbPool }
-func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, dbsqlc.New()} }
-func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.Executor { return &Executor{tx, dbsqlc.New()} }
-func (d *Driver) UnwrapTx(tx pgx.Tx) pgx.Tx { return tx }
+func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, dbsqlc.New()} }
+func (d *Driver) GetListener() riverdriver.Listener { return &Listener{dbPool: d.dbPool} }
+func (d *Driver) HasPool() bool { return d.dbPool != nil }
+
+func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.ExecutorTx {
+ return &ExecutorTx{Executor: Executor{tx, dbsqlc.New()}, tx: tx} | I'm surprised golangci-lint doesn't complain about initializing structs without named fields. Do you think we should stop doing that or are you not worried about it? |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -117,7 +133,178 @@ type ExecutorTx interface {
Rollback(ctx context.Context) error
}
+// Listner listens for notifications. In Postgres, this is a database connection
+// where `LISTEN` has been run.
+//
+// API is not stable. DO NOT IMPLEMENT.
+type Listener interface {
+ Close(ctx context.Context) error
+ Connect(ctx context.Context) error
+ Listen(ctx context.Context, topic string) error
+ Ping(ctx context.Context) error
+ Unlisten(ctx context.Context, topic string) error
+ WaitForNotification(ctx context.Context) (*Notification, error)
+}
+
+type Notification struct {
+ Payload string
+ Topic string
+}
+
+type JobCancelParams struct {
+ ID int64
+ CancelAttemptedAt time.Time
+ JobControlTopic string
+}
+
+type JobDeleteBeforeParams struct {
+ CancelledFinalizedAtHorizon time.Time
+ CompletedFinalizedAtHorizon time.Time
+ DiscardedFinalizedAtHorizon time.Time
+ Max int
+}
+
+type JobGetAvailableParams struct {
+ AttemptedBy string
+ Max int
+ Queue string
+}
+
+type JobGetByKindAndUniquePropertiesParams struct {
+ Kind string
+ ByArgs bool
+ Args []byte
+ ByCreatedAt bool
+ CreatedAtStart time.Time
+ CreatedAtEnd time.Time
+ ByQueue bool
+ Queue string
+ ByState bool
+ State []string
+}
+
+type JobGetStuckParams struct {
+ Max int
+ StuckHorizon time.Time
+}
+
+type JobInsertParams struct {
+ Attempt int
+ AttemptedAt *time.Time
+ EncodedArgs []byte
+ Errors [][]byte
+ FinalizedAt *time.Time
+ Kind string
+ MaxAttempts int
+ Metadata []byte
+ Priority int
+ Queue string
+ ScheduledAt *time.Time
+ State rivertype.JobState
+ Tags []string
+} | I assume the reason you've exposed additional fields here that aren't meant to be available on normal inserts (`Finalized`, `Attempt`, etc) is to facilitate testing without a bunch of specialized test insert helpers?
I'm a bit concerned seeing all the bulk inserts now referencing these fields which were previously meant to rely on default values, but are now being explicitly set as part of i.e. a `COPY` or even on a normal insert.
Want to clarify a bit on why you went this route? I'm a bit afraid of subtle bugs being introduced by the tweaked insert commands for example, though there could also be other places where there might be subtly changed behavior. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -1143,10 +1099,10 @@ func (c *Client[TTx]) ID() string {
return c.config.ID
}
-func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*dbadapter.JobInsertParams, error) {
+func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { | haven't seen a triple return value in awhile 😆 |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -2577,25 +2604,29 @@ func Test_Client_JobCompletion(t *testing.T) {
t.Parallel()
require := require.New(t)
- var dbPool *pgxpool.Pool
now := time.Now().UTC()
- config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error {
- _, err := queries.JobSetState(ctx, dbPool, dbsqlc.JobSetStateParams{
+
+ client, bundle := setup(t, newTestConfig(t, nil))
+
+ type JobArgs struct {
+ JobArgsReflectKind[JobArgs]
+ }
+
+ AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error {
+ _, err := client.driver.GetExecutor().JobUpdate(ctx, &riverdriver.JobUpdateParams{
ID: job.ID,
- ErrorDoUpdate: true,
- Error: []byte("{\"error\": \"oops\"}"),
+ ErrorsDoUpdate: true,
+ Errors: [][]byte{[]byte("{\"error\": \"oops\"}")}, | Hmm, I don't love that we have to re-transmit the entire error list over the wire every time we append one. For jobs that have been run many times this could start to be a lot of extra overhead.
Was this necessitated by limitations in the underlying drivers or something? |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -3246,16 +3289,15 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) {
t.Run("Defaults", func(t *testing.T) {
t.Parallel()
- insertParams, err := insertParamsFromArgsAndOptions(noOpArgs{}, nil)
+ insertParams, _, err := insertParamsFromArgsAndOptions(noOpArgs{}, nil)
require.NoError(t, err)
require.Equal(t, `{"name":""}`, string(insertParams.EncodedArgs))
require.Equal(t, (noOpArgs{}).Kind(), insertParams.Kind)
require.Equal(t, rivercommon.MaxAttemptsDefault, insertParams.MaxAttempts)
require.Equal(t, rivercommon.PriorityDefault, insertParams.Priority)
require.Equal(t, QueueDefault, insertParams.Queue)
- require.Equal(t, time.Time{}, insertParams.ScheduledAt)
- require.Equal(t, []string(nil), insertParams.Tags)
- require.False(t, insertParams.Unique) | did we add coverage for the returned unique opts somewhere else? |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -1,44 +0,0 @@
-package dbsqlc
-
-import (
- "github.com/riverqueue/river/internal/util/sliceutil"
- "github.com/riverqueue/river/rivertype"
-)
-
-func JobRowFromInternal(internal *RiverJob) *rivertype.JobRow {
- return &rivertype.JobRow{
- ID: internal.ID,
- Attempt: max(int(internal.Attempt), 0),
- AttemptedAt: internal.AttemptedAt,
- AttemptedBy: internal.AttemptedBy,
- CreatedAt: internal.CreatedAt,
- EncodedArgs: internal.Args,
- Errors: sliceutil.Map(internal.Errors, func(e AttemptError) rivertype.AttemptError { return AttemptErrorFromInternal(&e) }),
- FinalizedAt: internal.FinalizedAt,
- Kind: internal.Kind,
- MaxAttempts: max(int(internal.MaxAttempts), 0),
- Metadata: internal.Metadata,
- Priority: max(int(internal.Priority), 0),
- Queue: internal.Queue,
- ScheduledAt: internal.ScheduledAt.UTC(), // TODO(brandur): Very weird this is the only place a UTC conversion happens. | Do we have any follow-ups to track from the loss of this note? Not sure how deeply we ever looked into ensuring there were no TZ related issues. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -293,3 +309,39 @@ func (e *Elector) notifySubscribers(isLeader bool) {
}
}
}
+
+const deadlineTimeout = 5 * time.Second
+
+// LeaderAttemptElect attempts to elect a leader for the given name. The
+// bool alreadyElected indicates whether this is a potential reelection of
+// an already-elected leader. If the election is successful because there is
+// no leader or the previous leader expired, the provided leaderID will be
+// set as the new leader with a TTL of ttl.
+//
+// Returns whether this leader was successfully elected or an error if one
+// occurred.
+func LeaderAttemptElect(ctx context.Context, exec riverdriver.Executor, alreadyElected bool, params *riverdriver.LeaderElectParams) (bool, error) { | Should this be unexported? Can't see it used externally afaict. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -165,7 +166,11 @@ func (e *Elector) gainLeadership(ctx context.Context, leadershipNotificationChan
}
func (e *Elector) attemptElect(ctx context.Context) (bool, error) {
- elected, err := e.adapter.LeadershipAttemptElect(ctx, false, e.name, e.id, e.interval)
+ elected, err := LeaderAttemptElect(ctx, e.exec, false, &riverdriver.LeaderElectParams{
+ LeaderID: e.id,
+ Name: e.name,
+ TTL: e.interval,
+ }) | Welp, I missed this in #217, but we should use the TTL padding here too 🤦♂️ |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -0,0 +1,114 @@
+package leadership
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/riverqueue/river/internal/riverinternaltest"
+ "github.com/riverqueue/river/internal/riverinternaltest/testfactory"
+ "github.com/riverqueue/river/internal/util/ptrutil"
+ "github.com/riverqueue/river/riverdriver"
+ "github.com/riverqueue/river/riverdriver/riverpgxv5"
+)
+
+func TestLeaderAttemptElect(t *testing.T) {
+ t.Parallel()
+
+ const (
+ clientID = "client-id"
+ leaderInstanceName = "default"
+ leaderTTL = 10 * time.Second
+ )
+
+ ctx := context.Background()
+
+ type testBundle struct {
+ exec riverdriver.Executor
+ }
+
+ setup := func(t *testing.T) *testBundle {
+ t.Helper()
+
+ driver := riverpgxv5.New(nil)
+
+ return &testBundle{
+ exec: driver.UnwrapExecutor(riverinternaltest.TestTx(ctx, t)),
+ }
+ }
+
+ t.Run("ElectsLeader", func(t *testing.T) {
+ t.Parallel()
+
+ bundle := setup(t)
+
+ elected, err := LeaderAttemptElect(ctx, bundle.exec, false, &riverdriver.LeaderElectParams{
+ LeaderID: clientID,
+ Name: leaderInstanceName,
+ TTL: leaderTTL,
+ })
+ require.NoError(t, err)
+ require.True(t, elected) // won election
+
+ leader, err := bundle.exec.LeaderGetElectedLeader(ctx, leaderInstanceName)
+ require.NoError(t, err)
+ require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond)
+ require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond)
+ })
+
+ t.Run("ReelectsSameLeader", func(t *testing.T) {
+ t.Parallel()
+
+ bundle := setup(t)
+
+ leader := testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{
+ LeaderID: ptrutil.Ptr(clientID),
+ Name: ptrutil.Ptr(leaderInstanceName),
+ })
+
+ // Re-elect the same leader. Use a larger TTL to see if time is updated,
+ // because we are in a test transaction and the time is frozen at the start of
+ // the transaction. | On the topic of test coverage here, I'm realizing that all these tests are within a transaction and we won't be able to properly test the DB queries that use `now()` as a result. We'll either need to avoid using `now()` and pass in the time, or have some higher level integration-type tests which operate on a full DB in order to really validate this. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -127,25 +118,25 @@ func (s *Scheduler) Start(ctx context.Context) error { //nolint:dupl
}
type schedulerRunOnceResult struct {
- NumCompletedJobsScheduled int64
+ NumCompletedJobsScheduled int
}
func (s *Scheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, error) {
res := &schedulerRunOnceResult{}
for {
// Wrapped in a function so that defers run as expected.
- numScheduled, err := func() (int64, error) {
+ numScheduled, err := func() (int, error) { | oh come on, you're not gonna schedule 2.1 billion jobs in one query?? 😢 |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -220,6 +223,15 @@ func (p *JobListParams) First(count int) *JobListParams {
return result
}
+// Kinds returns an updated filter set that will only return jobs of the given
+// kinds.
+func (p *JobListParams) Kinds(kinds ...string) *JobListParams {
+ result := p.copy()
+ result.kinds = make([]string, 0, len(kinds))
+ copy(result.kinds, kinds)
+ return result
+} | A bit hard to tell, but I think this addition might be missing test coverage? |
river | github_2023 | others | 212 | riverqueue | bgentry | @@ -1,14 +1,7 @@
module github.com/riverqueue/river/riverdriver
-go 1.21
+go 1.21.4
-require github.com/jackc/pgx/v5 v5.5.0
+replace github.com/riverqueue/river/rivertype => ../rivertype | Is this meant to stay in here? |
river | github_2023 | others | 212 | riverqueue | bgentry | @@ -14,11 +20,40 @@ sql:
emit_methods_with_db_argument: true
emit_result_struct_pointers: true
+ rename:
+ river_job_state: "JobState"
+ river_job_state_available: "JobStateAvailable"
+ river_job_state_cancelled: "JobStateCancelled"
+ river_job_state_completed: "JobStateCompleted"
+ river_job_state_discarded: "JobStateDiscarded"
+ river_job_state_retryable: "JobStateRetryable"
+ river_job_state_running: "JobStateRunning"
+ river_job_state_scheduled: "JobStateScheduled"
+ ttl: "TTL"
+
overrides:
+ - db_type: "pg_catalog.interval"
+ go_type: "time.Duration"
+
- db_type: "timestamptz"
go_type: "time.Time"
+
- db_type: "timestamptz"
go_type:
type: "time.Time"
pointer: true
nullable: true
+
+ # specific columns
+
+ # This one is necessary because `args` is nullable (this seems to have
+ # been an oversight, but one we're determined isn't worth correcting
+ # for now), and the `database/sql` variant of sqlc will give it a
+ # crazy type by default, so here we give it something more reasonable. | To be fixed 🔜 |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -0,0 +1,96 @@
+// Package testfactory provides low level helpers for inserting records directly
+// into the database.
+package testfactory
+
+import (
+ "context"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/riverqueue/river/internal/rivercommon"
+ "github.com/riverqueue/river/internal/util/ptrutil"
+ "github.com/riverqueue/river/riverdriver"
+ "github.com/riverqueue/river/rivertype"
+)
+
+type JobOpts struct {
+ EncodedArgs []byte
+ FinalizedAt *time.Time
+ Metadata []byte
+ Priority *int
+ Queue *string
+ ScheduledAt *time.Time
+ State *rivertype.JobState
+}
+
+func Job(ctx context.Context, t *testing.T, exec riverdriver.Executor, opts *JobOpts) *rivertype.JobRow { | we should have done this a long time ago 😆 |
river | github_2023 | others | 212 | riverqueue | bgentry | @@ -0,0 +1,315 @@
+CREATE TYPE river_job_state AS ENUM(
+ 'available',
+ 'cancelled',
+ 'completed',
+ 'discarded',
+ 'retryable',
+ 'running',
+ 'scheduled'
+);
+
+CREATE TABLE river_job(
+ id bigserial PRIMARY KEY,
+ args jsonb,
+ attempt smallint NOT NULL DEFAULT 0,
+ attempted_at timestamptz,
+ attempted_by text[],
+ created_at timestamptz NOT NULL DEFAULT NOW(),
+ errors jsonb[],
+ finalized_at timestamptz,
+ kind text NOT NULL,
+ max_attempts smallint NOT NULL,
+ metadata jsonb NOT NULL DEFAULT '{}' ::jsonb,
+ priority smallint NOT NULL DEFAULT 1,
+ queue text NOT NULL DEFAULT 'default' ::text,
+ state river_job_state NOT NULL DEFAULT 'available' ::river_job_state,
+ scheduled_at timestamptz NOT NULL DEFAULT NOW(),
+ tags varchar(255)[] NOT NULL DEFAULT '{}' ::varchar(255)[],
+ CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL),
+ CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4),
+ CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128),
+ CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128)
+);
+
+-- name: JobCancel :one
+WITH locked_job AS (
+ SELECT
+ id, queue, state, finalized_at
+ FROM river_job
+ WHERE river_job.id = @id
+ FOR UPDATE
+),
+notification AS (
+ SELECT
+ id,
+ pg_notify(@job_control_topic, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text)
+ FROM
+ locked_job
+ WHERE
+ state NOT IN ('cancelled', 'completed', 'discarded')
+ AND finalized_at IS NULL
+),
+updated_job AS (
+ UPDATE river_job
+ SET
+ -- If the job is actively running, we want to let its current client and
+ -- producer handle the cancellation. Otherwise, immediately cancel it.
+ state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END,
+ finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END,
+ -- Mark the job as cancelled by query so that the rescuer knows not to
+ -- rescue it, even if it gets stuck in the running state:
+ metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], @cancel_attempted_at::jsonb, true)
+ FROM notification
+ WHERE river_job.id = notification.id
+ RETURNING river_job.*
+)
+SELECT *
+FROM river_job
+WHERE id = @id::bigint
+ AND id NOT IN (SELECT id FROM updated_job)
+UNION
+SELECT *
+FROM updated_job;
+
+-- name: JobDeleteBefore :one
+WITH deleted_jobs AS (
+ DELETE FROM river_job
+ WHERE id IN (
+ SELECT id
+ FROM river_job
+ WHERE
+ (state = 'cancelled' AND finalized_at < @cancelled_finalized_at_horizon::timestamptz) OR
+ (state = 'completed' AND finalized_at < @completed_finalized_at_horizon::timestamptz) OR
+ (state = 'discarded' AND finalized_at < @discarded_finalized_at_horizon::timestamptz)
+ ORDER BY id
+ LIMIT @max::bigint
+ )
+ RETURNING *
+)
+SELECT count(*)
+FROM deleted_jobs;
+
+-- name: JobGetAvailable :many
+WITH locked_jobs AS (
+ SELECT
+ *
+ FROM
+ river_job
+ WHERE
+ state = 'available'::river_job_state
+ AND queue = @queue::text
+ AND scheduled_at <= now()
+ ORDER BY
+ priority ASC,
+ scheduled_at ASC,
+ id ASC
+ LIMIT @max::integer
+ FOR UPDATE
+ SKIP LOCKED
+)
+UPDATE
+ river_job
+SET
+ state = 'running'::river_job_state,
+ attempt = river_job.attempt + 1,
+ attempted_at = now(),
+ attempted_by = array_append(river_job.attempted_by, @attempted_by::text)
+FROM
+ locked_jobs
+WHERE
+ river_job.id = locked_jobs.id
+RETURNING
+ river_job.*;
+
+-- name: JobGetByKindMany :many
+SELECT *
+FROM river_job
+WHERE kind = any(@kind::text[])
+ORDER BY id;
+
+-- name: JobGetByKindAndUniqueProperties :one | Missed the alphabetization on these two |
river | github_2023 | others | 212 | riverqueue | bgentry | @@ -0,0 +1,315 @@
+CREATE TYPE river_job_state AS ENUM(
+ 'available',
+ 'cancelled',
+ 'completed',
+ 'discarded',
+ 'retryable',
+ 'running',
+ 'scheduled'
+);
+
+CREATE TABLE river_job(
+ id bigserial PRIMARY KEY,
+ args jsonb,
+ attempt smallint NOT NULL DEFAULT 0,
+ attempted_at timestamptz,
+ attempted_by text[],
+ created_at timestamptz NOT NULL DEFAULT NOW(),
+ errors jsonb[],
+ finalized_at timestamptz,
+ kind text NOT NULL,
+ max_attempts smallint NOT NULL,
+ metadata jsonb NOT NULL DEFAULT '{}' ::jsonb,
+ priority smallint NOT NULL DEFAULT 1,
+ queue text NOT NULL DEFAULT 'default' ::text,
+ state river_job_state NOT NULL DEFAULT 'available' ::river_job_state,
+ scheduled_at timestamptz NOT NULL DEFAULT NOW(),
+ tags varchar(255)[] NOT NULL DEFAULT '{}' ::varchar(255)[],
+ CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL),
+ CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4),
+ CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128),
+ CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128)
+);
+
+-- name: JobCancel :one
+WITH locked_job AS (
+ SELECT
+ id, queue, state, finalized_at
+ FROM river_job
+ WHERE river_job.id = @id
+ FOR UPDATE
+),
+notification AS (
+ SELECT
+ id,
+ pg_notify(@job_control_topic, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text)
+ FROM
+ locked_job
+ WHERE
+ state NOT IN ('cancelled', 'completed', 'discarded')
+ AND finalized_at IS NULL
+),
+updated_job AS (
+ UPDATE river_job
+ SET
+ -- If the job is actively running, we want to let its current client and
+ -- producer handle the cancellation. Otherwise, immediately cancel it.
+ state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END,
+ finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END,
+ -- Mark the job as cancelled by query so that the rescuer knows not to
+ -- rescue it, even if it gets stuck in the running state:
+ metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], @cancel_attempted_at::jsonb, true)
+ FROM notification
+ WHERE river_job.id = notification.id
+ RETURNING river_job.*
+)
+SELECT *
+FROM river_job
+WHERE id = @id::bigint
+ AND id NOT IN (SELECT id FROM updated_job)
+UNION
+SELECT *
+FROM updated_job;
+
+-- name: JobDeleteBefore :one
+WITH deleted_jobs AS (
+ DELETE FROM river_job
+ WHERE id IN (
+ SELECT id
+ FROM river_job
+ WHERE
+ (state = 'cancelled' AND finalized_at < @cancelled_finalized_at_horizon::timestamptz) OR
+ (state = 'completed' AND finalized_at < @completed_finalized_at_horizon::timestamptz) OR
+ (state = 'discarded' AND finalized_at < @discarded_finalized_at_horizon::timestamptz)
+ ORDER BY id
+ LIMIT @max::bigint
+ )
+ RETURNING *
+)
+SELECT count(*)
+FROM deleted_jobs;
+
+-- name: JobGetAvailable :many
+WITH locked_jobs AS (
+ SELECT
+ *
+ FROM
+ river_job
+ WHERE
+ state = 'available'::river_job_state
+ AND queue = @queue::text
+ AND scheduled_at <= now()
+ ORDER BY
+ priority ASC,
+ scheduled_at ASC,
+ id ASC
+ LIMIT @max::integer
+ FOR UPDATE
+ SKIP LOCKED
+)
+UPDATE
+ river_job
+SET
+ state = 'running'::river_job_state,
+ attempt = river_job.attempt + 1,
+ attempted_at = now(),
+ attempted_by = array_append(river_job.attempted_by, @attempted_by::text)
+FROM
+ locked_jobs
+WHERE
+ river_job.id = locked_jobs.id
+RETURNING
+ river_job.*;
+
+-- name: JobGetByKindMany :many
+SELECT *
+FROM river_job
+WHERE kind = any(@kind::text[])
+ORDER BY id;
+
+-- name: JobGetByKindAndUniqueProperties :one
+SELECT *
+FROM river_job
+WHERE kind = @kind
+ AND CASE WHEN @by_args::boolean THEN args = @args ELSE true END
+ AND CASE WHEN @by_created_at::boolean THEN tstzrange(@created_at_start::timestamptz, @created_at_end::timestamptz, '[)') @> created_at ELSE true END
+ AND CASE WHEN @by_queue::boolean THEN queue = @queue ELSE true END
+ AND CASE WHEN @by_state::boolean THEN state::text = any(@state::text[]) ELSE true END;
+
+-- name: JobGetByID :one
+SELECT *
+FROM river_job
+WHERE id = @id
+LIMIT 1;
+
+-- name: JobGetByIDMany :many
+SELECT *
+FROM river_job
+WHERE id = any(@id::bigint[])
+ORDER BY id;
+
+-- name: JobGetStuck :many
+SELECT *
+FROM river_job
+WHERE state = 'running'::river_job_state
+ AND attempted_at < @stuck_horizon::timestamptz
+LIMIT @max;
+
+-- name: JobInsert :one
+INSERT INTO river_job(
+ args,
+ attempt,
+ attempted_at,
+ created_at,
+ errors,
+ finalized_at, | Per earlier conversation, I think there's a good argument for minimizing these two insert queries so that they only have fields which are intended to be set during initial insert. We can save the fields like `errors`, `finalized_at`, `attempt`, and `attempted_at` for a separate insert function only used by internal tests.
This would keep this queries shorter and eliminate some unnecessary data being sent over the wire on every insert. Then we can add a separate insert query that lets you set _any_ field (for testing) but which isn't actually used by any application code. |
river | github_2023 | others | 212 | riverqueue | bgentry | @@ -0,0 +1,30 @@
+-- name: JobInsertMany :copyfrom
+INSERT INTO river_job(
+ args,
+ attempt,
+ attempted_at,
+ errors,
+ finalized_at, | Likewise on eliminating unnecessary fields from this `JobInsertMany` query. The good thing is the extra fields will _only_ need to be specified on the all-fields test helper insert query, whereas `JobInsert` and `JobInsertMany` will have shorter lists of fields. Plus we will only need a non-copy (one-at-a-time) API for the test helper because we won't care about efficiently inserting many jobs at once in tests. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -55,22 +53,13 @@ func JobCompleteTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs JobArgs](ctx
return nil, errors.New("job must be running")
}
- var (
- driver TDriver
- queries = &dbsqlc.Queries{}
- )
-
- internal, err := queries.JobSetState(ctx, driver.UnwrapTx(tx), dbsqlc.JobSetStateParams{
- ID: job.ID,
- FinalizedAtDoUpdate: true,
- FinalizedAt: ptrutil.Ptr(time.Now()),
- State: dbsqlc.JobStateCompleted,
- })
+ var driver TDriver
+ jobRow, err := driver.UnwrapExecutor(tx).JobSetStateIfRunning(ctx, riverdriver.JobSetStateCompleted(job.ID, time.Now())) | This is a small behavioral change due to the differences in the underlying queries, but I don't think it's a big issue. You really shouldn't be using this to set a job as complete if it's not actually running. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -99,34 +447,133 @@ func (t *ExecutorTx) Rollback(ctx context.Context) error {
return t.tx.Rollback(ctx)
}
-func interpretError(err error) error {
- if errors.Is(err, pgx.ErrNoRows) {
- return riverdriver.ErrNoRows
+type Listener struct {
+ conn *pgxpool.Conn
+ dbPool *pgxpool.Pool
+ mu sync.RWMutex
+}
+
+func (l *Listener) Close(ctx context.Context) error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if l.conn == nil {
+ return nil
+ }
+
+ if err := l.conn.Conn().Close(ctx); err != nil {
+ return err
+ }
+ l.conn.Release()
+ l.conn = nil
+ return nil
+}
+
+func (l *Listener) Connect(ctx context.Context) error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if l.conn != nil {
+ return errors.New("connection already established")
+ }
+
+ conn, err := l.dbPool.Acquire(ctx)
+ if err != nil {
+ return err
}
+
+ l.conn = conn
+ return nil
+}
+
+func (l *Listener) Listen(ctx context.Context, topic string) error {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ _, err := l.conn.Exec(ctx, "LISTEN "+topic)
return err
}
-func mapMigrations(migrations []*dbsqlc.RiverMigration) []*riverdriver.Migration {
- if migrations == nil {
- return nil
+func (l *Listener) Ping(ctx context.Context) error {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ return l.conn.Ping(ctx)
+}
+
+func (l *Listener) Unlisten(ctx context.Context, topic string) error {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ _, err := l.conn.Exec(ctx, "UNLISTEN "+topic)
+ return err
+}
+
+func (l *Listener) WaitForNotification(ctx context.Context) (*riverdriver.Notification, error) {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ notification, err := l.conn.Conn().WaitForNotification(ctx)
+ if err != nil {
+ return nil, err
}
- return mapSlice(migrations, func(m *dbsqlc.RiverMigration) *riverdriver.Migration {
- return &riverdriver.Migration{
- ID: int(m.ID),
- CreatedAt: m.CreatedAt,
- Version: int(m.Version),
- }
- })
+ return &riverdriver.Notification{
+ Topic: notification.Channel,
+ Payload: notification.Payload,
+ }, nil
}
-// mapSlice manipulates a slice and transforms it to a slice of another type.
-func mapSlice[T any, R any](collection []T, mapFunc func(T) R) []R {
- result := make([]R, len(collection))
+func attemptErrorFromInternal(e *dbsqlc.AttemptError) rivertype.AttemptError {
+ return rivertype.AttemptError{
+ At: e.At,
+ Attempt: int(e.Attempt),
+ Error: e.Error,
+ Trace: e.Trace,
+ }
+}
- for i, item := range collection {
- result[i] = mapFunc(item)
+func interpretError(err error) error {
+ if errors.Is(err, pgx.ErrNoRows) {
+ return rivertype.ErrNotFound
+ }
+ return err
+}
+
+func jobRowFromInternal(internal *dbsqlc.RiverJob) *rivertype.JobRow {
+ return &rivertype.JobRow{
+ ID: internal.ID, | May as well fully sort this struct declaration now, same for `migrationFromInternal` below |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -0,0 +1,1118 @@
+package riverdrivertest
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "slices"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/riverqueue/river/internal/notifier"
+ "github.com/riverqueue/river/internal/rivercommon"
+ "github.com/riverqueue/river/internal/riverinternaltest/testfactory" //nolint:depguard
+ "github.com/riverqueue/river/internal/util/ptrutil"
+ "github.com/riverqueue/river/riverdriver"
+ "github.com/riverqueue/river/rivertype"
+)
+
+type testBundle struct{}
+
+func setupExecutor[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) (riverdriver.Executor, *testBundle) {
+ t.Helper()
+
+ tx := beginTx(ctx, t)
+ return driver.UnwrapExecutor(tx), &testBundle{}
+}
+
+// ExerciseExecutorFull exercises a driver that's expected to provide full
+// functionality.
+func ExerciseExecutorFull[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) {
+ t.Helper()
+
+ const clientID = "test-client-id"
+
+ makeInsertParams := func() *riverdriver.JobInsertParams {
+ return &riverdriver.JobInsertParams{
+ EncodedArgs: []byte(`{}`),
+ Kind: "fake_job",
+ MaxAttempts: rivercommon.MaxAttemptsDefault,
+ Metadata: []byte(`{}`),
+ Priority: rivercommon.PriorityDefault,
+ Queue: rivercommon.QueueDefault,
+ ScheduledAt: nil,
+ State: rivertype.JobStateAvailable,
+ }
+ }
+
+ // Expect no pool. We'll be using transactions only throughout these tests.
+ require.False(t, driver.HasPool())
+
+ // Encompasses all minimal functionality.
+ ExerciseExecutorMigrationOnly[TTx](ctx, t, driver, beginTx)
+
+ t.Run("Begin", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ execTx, err := exec.Begin(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() { _ = execTx.Rollback(ctx) })
+
+ // Job visible in subtransaction, but not parent.
+ job := testfactory.Job(ctx, t, execTx, &testfactory.JobOpts{})
+
+ _, err = execTx.JobGetByID(ctx, job.ID)
+ require.NoError(t, err)
+
+ require.NoError(t, execTx.Rollback(ctx))
+
+ _, err = exec.JobGetByID(ctx, job.ID)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ })
+
+ t.Run("JobCancel", func(t *testing.T) {
+ t.Parallel()
+
+ for _, startingState := range []rivertype.JobState{
+ rivertype.JobStateAvailable,
+ rivertype.JobStateRetryable,
+ rivertype.JobStateScheduled,
+ } {
+ startingState := startingState
+
+ t.Run(fmt.Sprintf("CancelsJobIn%sState", startingState), func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+ nowStr := now.Format(time.RFC3339Nano)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ State: &startingState,
+ })
+ require.Equal(t, startingState, job.State)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: now,
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.NotNil(t, jobAfter)
+
+ require.Equal(t, rivertype.JobStateCancelled, jobAfter.State)
+ require.WithinDuration(t, time.Now(), *jobAfter.FinalizedAt, 2*time.Second)
+ require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata))
+ })
+ }
+
+ t.Run("RunningJobIsNotImmediatelyCancelled", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+ nowStr := now.Format(time.RFC3339Nano)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ State: ptrutil.Ptr(rivertype.JobStateRunning),
+ })
+ require.Equal(t, rivertype.JobStateRunning, job.State)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: now,
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.NotNil(t, jobAfter)
+ require.Equal(t, rivertype.JobStateRunning, jobAfter.State)
+ require.Nil(t, jobAfter.FinalizedAt)
+ require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata))
+ })
+
+ for _, startingState := range []rivertype.JobState{
+ rivertype.JobStateCancelled,
+ rivertype.JobStateCompleted,
+ rivertype.JobStateDiscarded,
+ } {
+ startingState := startingState
+
+ t.Run(fmt.Sprintf("DoesNotAlterFinalizedJobIn%sState", startingState), func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ FinalizedAt: ptrutil.Ptr(time.Now()),
+ State: &startingState,
+ })
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: time.Now(),
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.Equal(t, startingState, jobAfter.State)
+ require.WithinDuration(t, *job.FinalizedAt, *jobAfter.FinalizedAt, time.Microsecond)
+ require.JSONEq(t, `{}`, string(jobAfter.Metadata))
+ })
+ }
+
+ t.Run("ReturnsErrNotFoundIfJobDoesNotExist", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: 1234567890,
+ CancelAttemptedAt: time.Now(),
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ require.Nil(t, jobAfter)
+ })
+ })
+
+ t.Run("JobDeleteBefore", func(t *testing.T) {
+ t.Parallel()
+ })
+
+ t.Run("JobGetAvailable", func(t *testing.T) {
+ t.Parallel()
+
+ t.Run("Success", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1)
+
+ jobRow := jobRows[0]
+ require.Equal(t, []string{clientID}, jobRow.AttemptedBy)
+ })
+
+ t.Run("ConstrainedToLimit", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ // Two rows inserted but only one found because of the added limit.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 1,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1)
+ })
+
+ t.Run("ConstrainedToQueue", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ Queue: ptrutil.Ptr("other-queue"),
+ })
+
+ // Job is in a non-default queue so it's not found.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Empty(t, jobRows)
+ })
+
+ t.Run("ConstrainedToScheduledAtBeforeNow", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ ScheduledAt: ptrutil.Ptr(time.Now().Add(1 * time.Minute)),
+ })
+
+ // Job is scheduled a while from now so it's not found.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Empty(t, jobRows)
+ })
+
+ t.Run("Prioritized", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ // Insert jobs with decreasing priority numbers (3, 2, 1) which means increasing priority.
+ for i := 3; i > 0; i-- {
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ Priority: &i,
+ })
+ }
+
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 2,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 2, "expected to fetch exactly 2 jobs")
+
+ // Because the jobs are ordered within the fetch query's CTE but *not* within
+ // the final query, the final result list may not actually be sorted. This is
+ // fine, because we've already ensured that we've fetched the jobs we wanted
+ // to fetch via that ORDER BY. For testing we'll need to sort the list after
+ // fetch to easily assert that the expected jobs are in it.
+ sort.Slice(jobRows, func(i, j int) bool { return jobRows[i].Priority < jobRows[j].Priority })
+
+ require.Equal(t, 1, jobRows[0].Priority, "expected first job to have priority 1")
+ require.Equal(t, 2, jobRows[1].Priority, "expected second job to have priority 2")
+
+ // Should fetch the one remaining job on the next attempt:
+ jobRows, err = exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 1,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1, "expected to fetch exactly 1 job")
+ require.Equal(t, 3, jobRows[0].Priority, "expected final job to have priority 3")
+ })
+ })
+
+ t.Run("JobGetByID", func(t *testing.T) {
+ t.Parallel()
+
+ t.Run("FetchesAnExistingJob", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ job, err := exec.JobGetByID(ctx, job.ID)
+ require.NoError(t, err)
+ require.NotNil(t, job)
+
+ require.Equal(t, job.ID, job.ID)
+ require.Equal(t, rivertype.JobStateAvailable, job.State)
+ require.WithinDuration(t, now, job.CreatedAt, 100*time.Millisecond)
+ require.WithinDuration(t, now, job.ScheduledAt, 100*time.Millisecond)
+ })
+
+ t.Run("ReturnsErrNoRowsIfJobDoesntExist", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ job, err := exec.JobGetByID(ctx, 99999)
+ require.Error(t, err)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ require.Nil(t, job)
+ })
+ })
+
+ t.Run("JobGetByIDMany", func(t *testing.T) {
+ t.Parallel()
+ })
+
+ t.Run("JobGetByKindAndUniqueProperties", func(t *testing.T) {
+ t.Parallel()
+ })
+
+ t.Run("JobGetByKindMany", func(t *testing.T) {
+ t.Parallel()
+ })
+
+ t.Run("JobGetStuck", func(t *testing.T) {
+ t.Parallel()
+ })
+
+ t.Run("JobInsert", func(t *testing.T) {
+ t.Parallel()
+ }) | Are these still meant to be empty? same comment for the other empty ones in here |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -1355,57 +1360,70 @@ func Test_Client_JobGet(t *testing.T) {
func Test_Client_JobList(t *testing.T) {
t.Parallel()
- var (
- ctx = context.Background()
- queries = dbsqlc.New()
- )
-
- type insertJobParams struct {
- AttemptedAt *time.Time
- FinalizedAt *time.Time
- Kind string
- Metadata []byte
- Queue string
- ScheduledAt *time.Time
- State dbsqlc.JobState
- }
+ ctx := context.Background()
- insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob {
- job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{
- Attempt: 1,
- AttemptedAt: params.AttemptedAt,
- FinalizedAt: params.FinalizedAt,
- Kind: valutil.FirstNonZero(params.Kind, "test_kind"),
- MaxAttempts: rivercommon.MaxAttemptsDefault,
- Metadata: params.Metadata,
- Priority: rivercommon.PriorityDefault,
- Queue: QueueDefault,
- ScheduledAt: params.ScheduledAt,
- State: valutil.FirstNonZero(params.State, dbsqlc.JobStateAvailable),
- })
- require.NoError(t, err)
- return job
+ type testBundle struct {
+ exec riverdriver.Executor
}
- type testBundle struct{}
-
setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) {
t.Helper()
+ dbPool := riverinternaltest.TestDB(ctx, t)
config := newTestConfig(t, nil)
- client := newTestClient(ctx, t, config)
+ client := newTestClient(t, dbPool, config)
- return client, &testBundle{}
+ return client, &testBundle{
+ exec: client.driver.GetExecutor(),
+ }
}
+ t.Run("FiltersByKind", func(t *testing.T) { //nolint:dupl | heh, this linter can get pretty annoying in tests. |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -1355,57 +1360,70 @@ func Test_Client_JobGet(t *testing.T) {
func Test_Client_JobList(t *testing.T) {
t.Parallel()
- var (
- ctx = context.Background()
- queries = dbsqlc.New()
- )
-
- type insertJobParams struct {
- AttemptedAt *time.Time
- FinalizedAt *time.Time
- Kind string
- Metadata []byte
- Queue string
- ScheduledAt *time.Time
- State dbsqlc.JobState
- }
+ ctx := context.Background()
- insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob {
- job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{
- Attempt: 1,
- AttemptedAt: params.AttemptedAt,
- FinalizedAt: params.FinalizedAt,
- Kind: valutil.FirstNonZero(params.Kind, "test_kind"),
- MaxAttempts: rivercommon.MaxAttemptsDefault,
- Metadata: params.Metadata,
- Priority: rivercommon.PriorityDefault,
- Queue: QueueDefault,
- ScheduledAt: params.ScheduledAt,
- State: valutil.FirstNonZero(params.State, dbsqlc.JobStateAvailable),
- })
- require.NoError(t, err)
- return job
+ type testBundle struct {
+ exec riverdriver.Executor
}
- type testBundle struct{}
-
setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) {
t.Helper()
+ dbPool := riverinternaltest.TestDB(ctx, t)
config := newTestConfig(t, nil)
- client := newTestClient(ctx, t, config)
+ client := newTestClient(t, dbPool, config)
- return client, &testBundle{}
+ return client, &testBundle{
+ exec: client.driver.GetExecutor(),
+ }
}
+ t.Run("FiltersByKind", func(t *testing.T) { //nolint:dupl
+ t.Parallel()
+
+ client, bundle := setup(t)
+
+ job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1")})
+ job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1")})
+ job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_2")})
+
+ jobs, err := client.JobList(ctx, NewJobListParams().Kinds("test_kind_1"))
+ require.NoError(t, err)
+ // jobs ordered by ScheduledAt ASC by default
+ require.Equal(t, []int64{job1.ID, job2.ID}, sliceutil.Map(jobs, func(job *rivertype.JobRow) int64 { return job.ID }))
+
+ jobs, err = client.JobList(ctx, NewJobListParams().Kinds("test_kind_2"))
+ require.NoError(t, err)
+ require.Equal(t, []int64{job3.ID}, sliceutil.Map(jobs, func(job *rivertype.JobRow) int64 { return job.ID }))
+ })
+
+ t.Run("FiltersByQueue", func(t *testing.T) { //nolint:dupl | Thanks for adding this and fixing the issue :pray: |
river | github_2023 | go | 212 | riverqueue | bgentry | @@ -0,0 +1,1731 @@
+package riverdrivertest
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "slices"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/riverqueue/river/internal/notifier"
+ "github.com/riverqueue/river/internal/rivercommon"
+ "github.com/riverqueue/river/internal/riverinternaltest/testfactory" //nolint:depguard
+ "github.com/riverqueue/river/internal/util/ptrutil"
+ "github.com/riverqueue/river/internal/util/sliceutil"
+ "github.com/riverqueue/river/riverdriver"
+ "github.com/riverqueue/river/rivertype"
+)
+
+type testBundle struct{}
+
+func setupExecutor[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) (riverdriver.Executor, *testBundle) {
+ t.Helper()
+
+ tx := beginTx(ctx, t)
+ return driver.UnwrapExecutor(tx), &testBundle{}
+}
+
+// ExerciseExecutorFull exercises a driver that's expected to provide full
+// functionality.
+func ExerciseExecutorFull[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) {
+ t.Helper()
+
+ const clientID = "test-client-id"
+
+ // Expect no pool. We'll be using transactions only throughout these tests.
+ require.False(t, driver.HasPool())
+
+ // Encompasses all minimal functionality.
+ ExerciseExecutorMigrationOnly[TTx](ctx, t, driver, beginTx)
+
+ t.Run("Begin", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ execTx, err := exec.Begin(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() { _ = execTx.Rollback(ctx) })
+
+ // Job visible in subtransaction, but not parent.
+ job := testfactory.Job(ctx, t, execTx, &testfactory.JobOpts{})
+
+ _, err = execTx.JobGetByID(ctx, job.ID)
+ require.NoError(t, err)
+
+ require.NoError(t, execTx.Rollback(ctx))
+
+ _, err = exec.JobGetByID(ctx, job.ID)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ })
+
+ t.Run("JobCancel", func(t *testing.T) {
+ t.Parallel()
+
+ for _, startingState := range []rivertype.JobState{
+ rivertype.JobStateAvailable,
+ rivertype.JobStateRetryable,
+ rivertype.JobStateScheduled,
+ } {
+ startingState := startingState
+
+ t.Run(fmt.Sprintf("CancelsJobIn%sState", startingState), func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+ nowStr := now.Format(time.RFC3339Nano)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ State: &startingState,
+ })
+ require.Equal(t, startingState, job.State)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: now,
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.NotNil(t, jobAfter)
+
+ require.Equal(t, rivertype.JobStateCancelled, jobAfter.State)
+ require.WithinDuration(t, time.Now(), *jobAfter.FinalizedAt, 2*time.Second)
+ require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata))
+ })
+ }
+
+ t.Run("RunningJobIsNotImmediatelyCancelled", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+ nowStr := now.Format(time.RFC3339Nano)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ State: ptrutil.Ptr(rivertype.JobStateRunning),
+ })
+ require.Equal(t, rivertype.JobStateRunning, job.State)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: now,
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.NotNil(t, jobAfter)
+ require.Equal(t, rivertype.JobStateRunning, jobAfter.State)
+ require.Nil(t, jobAfter.FinalizedAt)
+ require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata))
+ })
+
+ for _, startingState := range []rivertype.JobState{
+ rivertype.JobStateCancelled,
+ rivertype.JobStateCompleted,
+ rivertype.JobStateDiscarded,
+ } {
+ startingState := startingState
+
+ t.Run(fmt.Sprintf("DoesNotAlterFinalizedJobIn%sState", startingState), func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ FinalizedAt: ptrutil.Ptr(time.Now()),
+ State: &startingState,
+ })
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: job.ID,
+ CancelAttemptedAt: time.Now(),
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.NoError(t, err)
+ require.Equal(t, startingState, jobAfter.State)
+ require.WithinDuration(t, *job.FinalizedAt, *jobAfter.FinalizedAt, time.Microsecond)
+ require.JSONEq(t, `{}`, string(jobAfter.Metadata))
+ })
+ }
+
+ t.Run("ReturnsErrNotFoundIfJobDoesNotExist", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{
+ ID: 1234567890,
+ CancelAttemptedAt: time.Now(),
+ JobControlTopic: string(notifier.NotificationTopicJobControl),
+ })
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ require.Nil(t, jobAfter)
+ })
+ })
+
+ t.Run("JobDeleteBefore", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ var (
+ horizon = time.Now()
+ beforeHorizon = horizon.Add(-1 * time.Minute)
+ afterHorizon = horizon.Add(1 * time.Minute)
+ )
+
+ deletedJob1 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{FinalizedAt: &beforeHorizon, State: ptrutil.Ptr(rivertype.JobStateCancelled)})
+ deletedJob2 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{FinalizedAt: &beforeHorizon, State: ptrutil.Ptr(rivertype.JobStateCompleted)})
+ deletedJob3 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{FinalizedAt: &beforeHorizon, State: ptrutil.Ptr(rivertype.JobStateDiscarded)})
+
+ // Not deleted because not appropriate state.
+ notDeletedJob1 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable)})
+ notDeletedJob2 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)})
+
+ // Not deleted because after the delete horizon.
+ notDeletedJob3 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{FinalizedAt: &afterHorizon, State: ptrutil.Ptr(rivertype.JobStateCancelled)})
+
+ // Max two deleted on the first pass.
+ numDeleted, err := exec.JobDeleteBefore(ctx, &riverdriver.JobDeleteBeforeParams{
+ CancelledFinalizedAtHorizon: horizon,
+ CompletedFinalizedAtHorizon: horizon,
+ DiscardedFinalizedAtHorizon: horizon,
+ Max: 2,
+ })
+ require.NoError(t, err)
+ require.Equal(t, 2, numDeleted)
+
+ // And one more pass gets the last one.
+ numDeleted, err = exec.JobDeleteBefore(ctx, &riverdriver.JobDeleteBeforeParams{
+ CancelledFinalizedAtHorizon: horizon,
+ CompletedFinalizedAtHorizon: horizon,
+ DiscardedFinalizedAtHorizon: horizon,
+ Max: 2,
+ })
+ require.NoError(t, err)
+ require.Equal(t, 1, numDeleted)
+
+ // All deleted.
+ _, err = exec.JobGetByID(ctx, deletedJob1.ID)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ _, err = exec.JobGetByID(ctx, deletedJob2.ID)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+ _, err = exec.JobGetByID(ctx, deletedJob3.ID)
+ require.ErrorIs(t, err, rivertype.ErrNotFound)
+
+ // Not deleted
+ _, err = exec.JobGetByID(ctx, notDeletedJob1.ID)
+ require.NoError(t, err)
+ _, err = exec.JobGetByID(ctx, notDeletedJob2.ID)
+ require.NoError(t, err)
+ _, err = exec.JobGetByID(ctx, notDeletedJob3.ID)
+ require.NoError(t, err)
+ })
+
+ t.Run("JobGetAvailable", func(t *testing.T) {
+ t.Parallel()
+
+ t.Run("Success", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1)
+
+ jobRow := jobRows[0]
+ require.Equal(t, []string{clientID}, jobRow.AttemptedBy)
+ })
+
+ t.Run("ConstrainedToLimit", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ // Two rows inserted but only one found because of the added limit.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 1,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1)
+ })
+
+ t.Run("ConstrainedToQueue", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ Queue: ptrutil.Ptr("other-queue"),
+ })
+
+ // Job is in a non-default queue so it's not found.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Empty(t, jobRows)
+ })
+
+ t.Run("ConstrainedToScheduledAtBeforeNow", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ ScheduledAt: ptrutil.Ptr(time.Now().Add(1 * time.Minute)),
+ })
+
+ // Job is scheduled a while from now so it's not found.
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 100,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Empty(t, jobRows)
+ })
+
+ t.Run("Prioritized", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ // Insert jobs with decreasing priority numbers (3, 2, 1) which means increasing priority.
+ for i := 3; i > 0; i-- {
+ _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{
+ Priority: &i,
+ })
+ }
+
+ jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 2,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.Len(t, jobRows, 2, "expected to fetch exactly 2 jobs")
+
+ // Because the jobs are ordered within the fetch query's CTE but *not* within
+ // the final query, the final result list may not actually be sorted. This is
+ // fine, because we've already ensured that we've fetched the jobs we wanted
+ // to fetch via that ORDER BY. For testing we'll need to sort the list after
+ // fetch to easily assert that the expected jobs are in it.
+ sort.Slice(jobRows, func(i, j int) bool { return jobRows[i].Priority < jobRows[j].Priority })
+
+ require.Equal(t, 1, jobRows[0].Priority, "expected first job to have priority 1")
+ require.Equal(t, 2, jobRows[1].Priority, "expected second job to have priority 2")
+
+ // Should fetch the one remaining job on the next attempt:
+ jobRows, err = exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{
+ AttemptedBy: clientID,
+ Max: 1,
+ Queue: rivercommon.QueueDefault,
+ })
+ require.NoError(t, err)
+ require.NoError(t, err)
+ require.Len(t, jobRows, 1, "expected to fetch exactly 1 job")
+ require.Equal(t, 3, jobRows[0].Priority, "expected final job to have priority 3")
+ })
+ })
+
+ t.Run("JobGetByID", func(t *testing.T) {
+ t.Parallel()
+
+ t.Run("FetchesAnExistingJob", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ now := time.Now().UTC()
+
+ job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{})
+
+ fetchedJob, err := exec.JobGetByID(ctx, job.ID)
+ require.NoError(t, err)
+ require.NotNil(t, fetchedJob)
+
+ require.Equal(t, job.ID, fetchedJob.ID)
+ require.Equal(t, rivertype.JobStateAvailable, fetchedJob.State)
+ require.WithinDuration(t, now, fetchedJob.CreatedAt, 100*time.Millisecond)
+ require.WithinDuration(t, now, fetchedJob.ScheduledAt, 100*time.Millisecond)
+ })
+
+ t.Run("ReturnsErrNotFoundIfJobDoesNotExist", func(t *testing.T) {
+ t.Parallel()
+
+ exec, _ := setupExecutor(ctx, t, driver, beginTx)
+
+ job, err := exec.JobGetByID(ctx, 99999) | realized from some of our other tests that we can just use 0 here and have no possibility of a flaky test ever
```suggestion
job, err := exec.JobGetByID(ctx, 0)
``` |
river | github_2023 | go | 222 | riverqueue | bgentry | @@ -79,25 +83,52 @@ func (d *DebouncedChan) nonBlockingSendOnC() {
}
}
-func (d *DebouncedChan) waitForTimer() {
+// Waits for the timer to be fired, and loops as long as Call invocations comes | ```suggestion
// Waits for the timer to be fired, and loops as long as Call invocations come
``` |
river | github_2023 | go | 221 | riverqueue | bgentry | @@ -77,3 +80,63 @@ func TestDebouncedChan_OnlyBuffersOneEvent(t *testing.T) {
case <-time.After(20 * time.Millisecond):
}
}
+
+func TestDebouncedChan_ContinuousOperation(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ const (
+ cooldown = 17 * time.Millisecond
+ increment = 1 * time.Millisecond
+ testTime = 150 * time.Millisecond
+ )
+
+ var (
+ debouncedChan = NewDebouncedChan(ctx, cooldown)
+ goroutineDone = make(chan struct{})
+ numSignals int
+ )
+
+ go func() {
+ defer close(goroutineDone)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-debouncedChan.C():
+ numSignals++
+ }
+ }
+ }()
+
+ for tm := increment; tm <= testTime; tm += increment {
+ time.Sleep(increment)
+ debouncedChan.Call()
+ }
+
+ cancel()
+
+ select {
+ case <-goroutineDone:
+ case <-time.After(3 * time.Second):
+ require.FailNow(t, "Timed out waiting for goroutine to finish")
+ }
+
+ // Expect number of signals equal to number of cooldown periods that fit
+ // into our total test time, multiplied by two, because the debounced chan
+ // fires at the beginning and end of a bounce period. +1 for the last period
+ // that fires on the leading edge, but which we don't give time for the
+ // timer to fully expire. (We've chosen numbers so that test time doesn't
+ // divide by cooldown perfectly.) | Nope, doesn't sound intentional at all. I think the purpose was for it to be a leading edge debounce algorithm (fire immediately, then do not fire again until (a) another signal comes in, and (b) at least `cooldown` has elapsed). |
river | github_2023 | go | 221 | riverqueue | bgentry | @@ -11,45 +11,66 @@ import (
// subsequent calls are delayed until the cooldown period has elapsed and are
// also coalesced into a single call.
type DebouncedChan struct {
- done <-chan struct{}
- c chan struct{}
- cooldown time.Duration
-
- mu sync.Mutex
- timer *time.Timer
- sendWhenTimerExpired bool
+ c chan struct{}
+ cooldown time.Duration
+ ctxDone <-chan struct{}
+ mu sync.Mutex
+ sendOnTimerExpired bool
+ timer *time.Timer
+ timerDone bool | Do you want to preserve the traditional mutex organization where its declaration precedes all the things that it protects? I think it happens to actually be alphabetical here, but maybe we should keep the line gap and add a comment about `// protects the following` or w/e |
river | github_2023 | go | 221 | riverqueue | bgentry | @@ -11,45 +11,66 @@ import (
// subsequent calls are delayed until the cooldown period has elapsed and are
// also coalesced into a single call.
type DebouncedChan struct {
- done <-chan struct{}
- c chan struct{}
- cooldown time.Duration
-
- mu sync.Mutex
- timer *time.Timer
- sendWhenTimerExpired bool
+ c chan struct{}
+ cooldown time.Duration
+ ctxDone <-chan struct{}
+ mu sync.Mutex
+ sendOnTimerExpired bool
+ timer *time.Timer
+ timerDone bool
}
// NewDebouncedChan returns a new DebouncedChan which sends on the channel no
// more often than the cooldown period.
func NewDebouncedChan(ctx context.Context, cooldown time.Duration) *DebouncedChan {
return &DebouncedChan{
- done: ctx.Done(),
+ ctxDone: ctx.Done(),
c: make(chan struct{}, 1),
cooldown: cooldown,
}
}
+// C is the debounced channel. Multiple invocations to Call during the cooldown
+// period will deduplicate to a single emission on this channel on the period's
+// leading edge, and one more on the trailing edge. | Yeah, definitely doesn't sound like what I intended here. I wanted it to be leading edge, but if no other calls come in after the initial one nothing should happen, it should just be ready to fire again immediately on the next one.
However if a 2nd call comes in immediately after the 1st one, my thought was that we should still fire immediately a 2nd time at the end of the cooldown. And then repeat, i.e. only fire a 3rd time if yet another call comes in before `cooldown` completes (or whenever it comes in at some later time after `cooldown`).
Hopefully that makes sense? Could easily diagram it if that helps. |
river | github_2023 | others | 220 | riverqueue | bgentry | @@ -7,12 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
-## [0.0.21] - 2024-02-19
+## [0.0.22] - 2024-02-19
### Fixed
- Brings in another leadership election fix similar to #217 in which a TTL equal to the elector's run interval plus a configured TTL padding is also used for the initial attempt to gain leadership (#217 brought it in for reelection only). [PR #219](https://github.com/riverqueue/river/pull/219).
+## [0.0.21] - 2024-02-19 | I swear, we're gonna need a CI check for this 😆 |
river | github_2023 | go | 206 | riverqueue | brandur | @@ -101,6 +101,15 @@ type Config struct {
// Defaults to 1 second.
FetchPollInterval time.Duration
+ // ID is the unique identifier for this client. If not set, a random ULID will | What do you think about saying "random identifier" instead of "random ULID" here?
I'm kind of thinking that we shouldn't commit to the ULID algorithm specifically — it'd be nice to get rid of that dependency, and also I think a more human friendly combination of say host name plus start time might be a better default for operations anyway. Not something we need to do right now, but if we changed it, it'd a minor alteration of the contract above. |
river | github_2023 | others | 201 | riverqueue | bgentry | @@ -7,9 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [0.0.20] - 2024-02-14
+
### Fixed
-- Fix a leadership re-election query bug that would cause past leaders to think they were continuing to win elections. [PR #199].
+- Fix a leadership re-election query bug that would cause past leaders to think they were continuing to win elections. [PR #199](https://github.com/riverqueue/river/pull/199). | Doh; thanks |
river | github_2023 | others | 192 | riverqueue | brandur | @@ -18,9 +18,10 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- postgres-version: [16, 15, 14]
go-version:
+ - "1.22"
- "1.21"
+ postgres-version: [16, 15, 14] | WFM. |
river | github_2023 | others | 192 | riverqueue | brandur | @@ -2,6 +2,8 @@ module github.com/riverqueue/river/cmd/river
go 1.21.4 | I think it's okay. One thing I've noticed is that to pick up a toolchain from a version, you need to have the patch version in there. i.e. `go 1.22` wont mind 1.22, but `go 1.22.0` will. |
river | github_2023 | others | 192 | riverqueue | brandur | @@ -2,6 +2,8 @@ module github.com/riverqueue/river/cmd/river
go 1.21.4
+toolchain go1.21.6 | Can we take `toolchain` out? Go should be able to pick it up from the version above. |
river | github_2023 | go | 192 | riverqueue | brandur | @@ -12,7 +12,7 @@ import (
const sampleGoMod = `module github.com/riverqueue/river
-go 1.21.4 | Won't matter as far as the test is concerned. |
river | github_2023 | go | 186 | riverqueue | brandur | @@ -1310,6 +1308,50 @@ func Test_Client_InsertManyTx(t *testing.T) {
})
}
+func Test_Client_JobGet(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ type testBundle struct{}
+
+ setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) {
+ t.Helper()
+
+ config := newTestConfig(t, nil)
+ client := newTestClient(ctx, t, config)
+
+ return client, &testBundle{}
+ }
+
+ t.Run("FetchesAnExistingJob", func(t *testing.T) {
+ t.Parallel()
+
+ client, _ := setup(t)
+
+ newJob, err := client.Insert(ctx, noOpArgs{}, nil)
+ require.NoError(t, err)
+
+ job, err := client.JobGet(ctx, newJob.ID)
+ require.NoError(t, err)
+ require.NotNil(t, job)
+
+ require.Equal(t, newJob.ID, job.ID)
+ require.Equal(t, newJob.State, job.State)
+ })
+
+ t.Run("ReturnsErrNotFoundIfJobDoesNotExist", func(t *testing.T) {
+ t.Parallel()
+
+ client, _ := setup(t)
+
+ job, err := client.JobGet(ctx, 999999) | Hah, very unlikely, but small chance this fails locally because our test DB sequences don't get reset between runs. Probably will never happen, but I guess zero might be a slighter safer alternative. |
river | github_2023 | others | 184 | riverqueue | brandur | @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+### Changed
+
+- Validate queue name on job insertion. Allow queue names with hyphen separators in addition to underscore. [PR #184](https://github.com/riverqueue/rive/pull/184). | Typo here: "/rive/" |
river | github_2023 | go | 179 | riverqueue | bgentry | @@ -135,18 +136,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) {
conn, err := n.establishConn(ctx)
if err != nil {
- if errors.Is(err, context.Canceled) { | Hmm, hard to tell without digging into the code, but maybe it's possible pgx doesn't surface this source error correctly? `establishConn` is a simple method that just returns the result of `pgx.ConnectConfig` |
river | github_2023 | others | 117 | riverqueue | brandur | @@ -242,7 +242,7 @@ jobs:
- name: Setup sqlc
uses: sqlc-dev/setup-sqlc@v4
with:
- sqlc-version: "1.22.0"
+ sqlc-version: "1.24.0" | Maybe do a rebase here since I think the sqlc upgrade went through in a separate PR.
It might make sense to do these separately anyway, just so they go through quickly (small change so easy to review + merge) and don't end up interfering in multiple places.
FWIW, I don't usually upgrade to every new sqlc version unless there's something good in there. Historically for us anyway, it hasn't made a whole lot of difference whether you're using the latest or not. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -1188,3 +1188,76 @@ func validateQueueName(queueName string) error {
}
return nil
}
+
+// JobList returns a paginated list of jobs matching the provided filters. The
+// provided context is used for the underlying Postgres query and can be used to
+// cancel the operation or apply a timeout.
+//
+// params := river.JobListParams{}.WithLimit(10).State(river.JobStateCompleted)
+// jobRows, err := client.JobList(ctx, params)
+// if err != nil {
+// // handle error
+// }
+func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) {
+ if params == nil {
+ params = NewJobListParams()
+ }
+ // TODO(bgentry): confirm with Brandur, do we want to error in this scenario?
+ // If so this error is not worded appropriately for these methods.
+ if c.driver.GetDBPool() == nil {
+ return nil, errInsertNoDriverDBPool
+ } | So this error actually predates the `database/sql` driver slight. What it's trying to protect against is a case where you initiated a driver with a `nil` pool, but are trying to call a function that needs a pool. You might do this for example in a test case where you're only using test transactions:
https://riverqueue.com/docs/testing#test-transactions
But yeah, we can make the error more general to apply to non-insert functions like here. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -1188,3 +1188,76 @@ func validateQueueName(queueName string) error {
}
return nil
}
+
+// JobList returns a paginated list of jobs matching the provided filters. The
+// provided context is used for the underlying Postgres query and can be used to
+// cancel the operation or apply a timeout.
+//
+// params := river.JobListParams{}.WithLimit(10).State(river.JobStateCompleted)
+// jobRows, err := client.JobList(ctx, params)
+// if err != nil {
+// // handle error
+// }
+func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) {
+ if params == nil {
+ params = NewJobListParams()
+ }
+ // TODO(bgentry): confirm with Brandur, do we want to error in this scenario?
+ // If so this error is not worded appropriately for these methods.
+ if c.driver.GetDBPool() == nil {
+ return nil, errInsertNoDriverDBPool
+ }
+
+ dbParams, err := params.toDBParams()
+ if err != nil {
+ return nil, err
+ }
+
+ internalJobs, err := c.adapter.JobList(ctx, *dbParams)
+ if err != nil {
+ return nil, err
+ }
+ return dbsqlc.JobRowsFromInternal(internalJobs), nil
+}
+
+// JobListTx inserts many jobs at once using Postgres' `COPY FROM` mechanism, | This comment/example is still the original one for `InsertManyTx`. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -71,6 +72,29 @@ type JobInsertResult struct {
UniqueSkippedAsDuplicate bool
}
+type SortOrder int
+
+const (
+ SortOrderUnspecified SortOrder = iota
+ SortOrderAsc
+ SortOrderDesc
+)
+
+type JobListOrderBy struct {
+ Expr string
+ Order SortOrder
+}
+
+type JobListParams struct {
+ Conditions string | Just checking here: conditions are meant to be strictly stay an internal construct right? Just wondering if we have to do anything about making sure that SQL injections aren't possible. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -0,0 +1,271 @@
+package river
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/riverqueue/river/internal/dbadapter"
+ "github.com/riverqueue/river/rivertype"
+)
+
+// JobListPaginationCursor is used to specify a starting point for a paginated
+// job list query.
+type JobListPaginationCursor struct {
+ id int64
+ kind string
+ queue string
+ time time.Time
+}
+
+// JobListPaginationCursorFromJob creates a JobListPaginationCursor from a JobRow.
+func JobListPaginationCursorFromJob(job *rivertype.JobRow) *JobListPaginationCursor {
+ return &JobListPaginationCursor{
+ id: job.ID,
+ kind: job.Kind,
+ queue: job.Queue,
+ time: jobListTimeValue(job),
+ }
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler to decode the cursor from
+// a previously marshaled string.
+func (c *JobListPaginationCursor) UnmarshalText(text []byte) error { | Just want to make sure I'm following: what's the purpose of this function? Is the idea here that it allows the cursor to be saved to JSON or otherwise persisted? Is that expected to be a feature that people want? |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -0,0 +1,271 @@
+package river
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/riverqueue/river/internal/dbadapter"
+ "github.com/riverqueue/river/rivertype"
+)
+
+// JobListPaginationCursor is used to specify a starting point for a paginated
+// job list query.
+type JobListPaginationCursor struct { | Shed color, but IMO `JobListCursor` might be an adequate name for this that's a little less of a mouthful. IMO use of "cursor" implies pagination. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -0,0 +1,118 @@
+package dbsqlc | So I think my main (big) nit is mostly that this should live somewhere besides `dbsqlc` — I think that it's a little too confusing that this code _looks_ like it's generated, but it's not generated, and that `dbsqlc` shares a mix of generated and non-generated code. IMO, we should put it somewhere else even if it looks almost exactly like `dbsqlc` code. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -1197,3 +1197,58 @@ func validateQueueName(queueName string) error {
}
return nil
}
+
+// JobList returns a paginated list of jobs matching the provided filters. The
+// provided context is used for the underlying Postgres query and can be used to
+// cancel the operation or apply a timeout.
+//
+// params := river.JobListParams{}.WithLimit(10).State(river.JobStateCompleted) | I don't think that this syntax is valid is it? Go would want parenthesis around it at minimum like:
```
(river.JobListParams{}).WithLimit(10)...
```
It might be best to use the `NewJobListParams()` to construct it since we have that anyway. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -1197,3 +1197,58 @@ func validateQueueName(queueName string) error {
}
return nil
}
+
+// JobList returns a paginated list of jobs matching the provided filters. The
+// provided context is used for the underlying Postgres query and can be used to
+// cancel the operation or apply a timeout.
+//
+// params := river.JobListParams{}.WithLimit(10).State(river.JobStateCompleted)
+// jobRows, err := client.JobList(ctx, params)
+// if err != nil {
+// // handle error
+// }
+func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) {
+ if params == nil {
+ params = NewJobListParams()
+ }
+ if c.driver.GetDBPool() == nil {
+ return nil, errNoDriverDBPool
+ }
+
+ dbParams, err := params.toDBParams()
+ if err != nil {
+ return nil, err
+ }
+
+ internalJobs, err := c.adapter.JobList(ctx, *dbParams)
+ if err != nil {
+ return nil, err
+ }
+ return dbsqlc.JobRowsFromInternal(internalJobs), nil
+}
+
+// JobListTx returns a paginated list of jobs matching the provided filters. The
+// provided context is used for the underlying Postgres query and can be used to
+// cancel the operation or apply a timeout.
+//
+// params := river.JobListParams{}.WithLimit(10).State(river.JobStateCompleted) | Same comment here. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -0,0 +1,130 @@
+package db | Ugh sorry for the churn, but could we call this something a little less generic so that tab complete works better? Maybe `dblist` until such a time that we want to put something else besides listing in the package. |
river | github_2023 | go | 117 | riverqueue | brandur | @@ -0,0 +1,288 @@
+package river
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/riverqueue/river/internal/dbadapter"
+ "github.com/riverqueue/river/rivertype"
+)
+
+// JobListCursor is used to specify a starting point for a paginated
+// job list query.
+type JobListCursor struct {
+ id int64
+ kind string
+ queue string
+ time time.Time
+}
+
+// JobListCursorFromJob creates a JobListCursor from a JobRow.
+func JobListCursorFromJob(job *rivertype.JobRow) *JobListCursor {
+ return &JobListCursor{
+ id: job.ID,
+ kind: job.Kind,
+ queue: job.Queue,
+ time: jobListTimeValue(job),
+ }
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler to decode the cursor from
+// a previously marshaled string.
+func (c *JobListCursor) UnmarshalText(text []byte) error {
+ dst := make([]byte, base64.StdEncoding.DecodedLen(len(text)))
+ n, err := base64.StdEncoding.Decode(dst, text)
+ if err != nil {
+ return err
+ }
+ dst = dst[:n]
+
+ wrapperValue := jobListPaginationCursorJSON{}
+ if err := json.Unmarshal(dst, &wrapperValue); err != nil {
+ return err
+ }
+ *c = JobListCursor{
+ id: wrapperValue.ID,
+ kind: wrapperValue.Kind,
+ queue: wrapperValue.Queue,
+ time: wrapperValue.Time,
+ }
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler to encode the cursor as an
+// opaque string.
+func (c JobListCursor) MarshalText() ([]byte, error) {
+ wrapperValue := jobListPaginationCursorJSON{
+ ID: c.id,
+ Kind: c.kind,
+ Queue: c.queue,
+ Time: c.time,
+ }
+ data, err := json.Marshal(wrapperValue)
+ if err != nil {
+ return nil, err
+ }
+ dst := make([]byte, base64.URLEncoding.EncodedLen(len(data)))
+ base64.URLEncoding.Encode(dst, data)
+ return dst, nil
+}
+
+type jobListPaginationCursorJSON struct {
+ ID int64 `json:"id"`
+ Kind string `json:"kind"`
+ Queue string `json:"queue"`
+ Time time.Time `json:"time"`
+}
+
+// SortOrder specifies the direction of a sort.
+type SortOrder int
+
+const (
+ // SortOrderAsc specifies that the sort should in ascending order.
+ SortOrderAsc SortOrder = iota
+ // SortOrderDesc specifies that the sort should in descending order.
+ SortOrderDesc
+)
+
+// JobListOrderByField specifies the field to sort by.
+type JobListOrderByField int
+
+const (
+ // JobListOrderByTime specifies that the sort should be by time. The specific
+ // time field used will vary by job state.
+ JobListOrderByTime JobListOrderByField = iota | Are you sure about this one? Wouldn't it be better just to have an option for each timestamp field that you'd specify explicitly for what to sort on?
Currently, you have to read source code to understand what this will order by, and even if it were to be documented, it might be faster/easier to have users explicitly specify the timestamp they want. |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -935,6 +935,46 @@ func (c *Client[TTx]) runProducers(fetchNewWorkCtx, workCtx context.Context) {
}
}
+// Cancel cancels the job with the given ID. If possible, the job is cancelled
+// immediately and will not be retried. The provided context is used for the
+// underlying Postgres update and can be used to cancel the operation or apply a
+// timeout.
+//
+// If the job is still in the queue (available, scheduled, or retryable), it is
+// immediately marked as cancelled and will not be retried.
+//
+// If the job is already finalized (cancelled, completed, or discarded), no
+// changes are made.
+//
+// If the job is currently running, it is not immediately cancelled, but is
+// instead marked for cancellation. The client running the job will also be
+// notified (via LISTEN/NOTIFY) to cancel the running job's context. Although
+// the job's context will be cancelled, since Go does not provide a mechanism to
+// interrupt a running goroutine the job will continue running until it returns.
+// As always, it is important for workers to respect context cancellation and
+// return promptly when the job context is done.
+//
+// Once the cancellation signal is received by the client running the job, any
+// error returned by that job will result in it being cancelled permanently and
+// not retried. However if the job returns no error, it will be completed as
+// usual.
+//
+// In the event the running job finishes executing _before_ the cancellation
+// signal is received, the job will be completed or retried as usual without
+// regard to the cancellation. TODO: fix this w/ smarter completion query that
+// uses metadata value? | IMO: if the job was about to be completed, just let that happen without the cancellation superseding it. However, if a job that was cancelled were to be set as retryable with the possibility of running again, that definitely seems wrong and should be fixed. |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -274,6 +274,94 @@ func Test_Client(t *testing.T) {
require.WithinDuration(t, time.Now().Add(15*time.Minute), updatedJob.ScheduledAt, 2*time.Second)
})
+ t.Run("CancelRunningJob", func(t *testing.T) {
+ t.Parallel()
+
+ client, bundle := setup(t)
+
+ jobStartedChan := make(chan int64)
+
+ type JobArgs struct {
+ JobArgsReflectKind[JobArgs]
+ }
+
+ AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error {
+ jobStartedChan <- job.ID
+ <-ctx.Done()
+ return ctx.Err()
+ }))
+
+ startClient(ctx, t, client)
+
+ insertedJob, err := client.Insert(ctx, &JobArgs{}, nil)
+ require.NoError(t, err)
+
+ startedJobID := riverinternaltest.WaitOrTimeout(t, jobStartedChan)
+ require.Equal(t, insertedJob.ID, startedJobID)
+
+ // Cancel the job:
+ cancelled, err := client.Cancel(ctx, insertedJob.ID)
+ require.NoError(t, err)
+ require.True(t, cancelled)
+
+ event := riverinternaltest.WaitOrTimeout(t, bundle.subscribeChan)
+ require.Equal(t, EventKindJobCancelled, event.Kind)
+ require.Equal(t, JobStateCancelled, event.Job.State)
+ require.WithinDuration(t, time.Now(), *event.Job.FinalizedAt, 2*time.Second)
+
+ updatedJob, err := bundle.queries.JobGetByID(ctx, client.driver.GetDBPool(), insertedJob.ID)
+ require.NoError(t, err)
+ require.Equal(t, dbsqlc.JobStateCancelled, updatedJob.State)
+ require.WithinDuration(t, time.Now(), *updatedJob.FinalizedAt, 2*time.Second)
+ })
+
+ t.Run("CancelScheduledJob", func(t *testing.T) {
+ t.Parallel()
+
+ client, bundle := setup(t)
+
+ jobStartedChan := make(chan int64)
+
+ type JobArgs struct {
+ JobArgsReflectKind[JobArgs]
+ }
+
+ AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error {
+ jobStartedChan <- job.ID
+ <-ctx.Done()
+ return ctx.Err()
+ }))
+
+ startClient(ctx, t, client)
+
+ insertedJob, err := client.Insert(ctx, &JobArgs{}, &InsertOpts{ScheduledAt: time.Now().Add(5 * time.Minute)})
+ require.NoError(t, err)
+
+ // Cancel the job:
+ cancelled, err := client.Cancel(ctx, insertedJob.ID)
+ require.NoError(t, err)
+ require.True(t, cancelled)
+
+ updatedJob, err := bundle.queries.JobGetByID(ctx, client.driver.GetDBPool(), insertedJob.ID)
+ require.NoError(t, err)
+ require.Equal(t, dbsqlc.JobStateCancelled, updatedJob.State)
+ require.WithinDuration(t, time.Now(), *updatedJob.FinalizedAt, 2*time.Second)
+ })
+
+ t.Run("CancelNonExistentJob", func(t *testing.T) {
+ t.Parallel()
+
+ client, _ := setup(t)
+ startClient(ctx, t, client)
+
+ // Cancel an unknown job ID:
+ cancelled, err := client.Cancel(ctx, 0)
+ // TODO(bgentry): should we try to make this return an error even though the
+ // query was successfully a no-op? or is it fine that it returns false, nil? | Hmm, I definitely would've leaned toward returning an error on a job that doesn't exist at all. The trouble is that it's quite easy in Go to pass the wrong value since all types are only "moderately strong".
e.g. You might be holding an `int64` that you think is a job ID, but is actually the primary ID of a different entity in your DB, pass that in expecting a cancellation, but never being the wiser that the wrong thing is happening because you're expecting a cancellation to be somewhat of an edge case anyway that's often a no-op. |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -146,10 +146,15 @@ func (s *Rescuer) Start(ctx context.Context) error {
}
type rescuerRunOnceResult struct {
+ NumJobsCancelled int64
NumJobsDiscarded int64
NumJobsRetried int64
}
+type metadataWithCancelledByQuery struct {
+ CancelledByQuery bool `json:"cancelled_by_query"` | What do you mean exactly by "cancelled by query"? Does that just mean that it was cancelled and cancellation happens by a query so that it was cancelled by a query? |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -150,12 +155,47 @@ func (p *producer) Run(fetchCtx, workCtx context.Context, statusFunc producerSta
// TODO: fetcher should have some jitter in it to avoid stampeding issues.
fetchLimiter := chanutil.NewDebouncedChan(fetchCtx, p.config.FetchCooldown)
+ handleJobControlNotification := func(topic notifier.NotificationTopic, payload string) {
+ var decoded jobControlPayload
+ if err := json.Unmarshal([]byte(payload), &decoded); err != nil {
+ p.Logger.ErrorContext(workCtx, p.Name+": Failed to unmarshal job control notification payload", slog.String("err", err.Error()))
+ return
+ }
+ if string(decoded.Action) == string(jobControlActionCancel) && decoded.Queue == p.config.QueueName && decoded.JobID > 0 {
+ select {
+ case p.cancelCh <- decoded.JobID:
+ default:
+ p.Logger.WarnContext(workCtx, p.Name+": Job cancel notification dropped due to full buffer", slog.Int64("job_id", decoded.JobID))
+ }
+ return
+ }
+ p.Logger.DebugContext(workCtx, p.Name+": Received job control notification with unknown action or other queue",
+ slog.String("action", string(decoded.Action)),
+ slog.Int64("job_id", decoded.JobID),
+ slog.String("queue", decoded.Queue),
+ )
+ }
+ sub := p.config.Notifier.Listen(notifier.NotificationTopicJobControl, handleJobControlNotification)
+ defer sub.Unlisten()
+
p.fetchAndRunLoop(fetchCtx, workCtx, fetchLimiter, statusFunc)
statusFunc(p.config.QueueName, componentstatus.ShuttingDown)
p.executorShutdownLoop()
statusFunc(p.config.QueueName, componentstatus.Stopped)
}
+type jobControlAction string
+
+const (
+ jobControlActionCancel jobControlAction = "cancel" | OOC, are there any other actions you were expecting to come in? |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -935,6 +935,90 @@ func (c *Client[TTx]) runProducers(fetchNewWorkCtx, workCtx context.Context) {
}
}
+// Cancel cancels the job with the given ID. If possible, the job is cancelled
+// immediately and will not be retried. The provided context is used for the
+// underlying Postgres update and can be used to cancel the operation or apply a
+// timeout.
+//
+// If the job is still in the queue (available, scheduled, or retryable), it is
+// immediately marked as cancelled and will not be retried.
+//
+// If the job is already finalized (cancelled, completed, or discarded), no
+// changes are made.
+//
+// If the job is currently running, it is not immediately cancelled, but is
+// instead marked for cancellation. The client running the job will also be
+// notified (via LISTEN/NOTIFY) to cancel the running job's context. Although
+// the job's context will be cancelled, since Go does not provide a mechanism to
+// interrupt a running goroutine the job will continue running until it returns.
+// As always, it is important for workers to respect context cancellation and
+// return promptly when the job context is done.
+//
+// Once the cancellation signal is received by the client running the job, any
+// error returned by that job will result in it being cancelled permanently and
+// not retried. However if the job returns no error, it will be completed as
+// usual.
+//
+// In the event the running job finishes executing _before_ the cancellation
+// signal is received, the job will be completed or retried as usual without
+// regard to the cancellation. TODO: fix this w/ smarter completion query that
+// uses metadata value?
+//
+// Returns true if the job was cancelled or cancellation was initiated, and
+// false if this was a no-op because the job was already finalized.
+func (c *Client[TTx]) Cancel(ctx context.Context, jobID int64) (bool, error) { | I kinda would've leaned toward having this return a job row representing the updated job for purposes of logging and additional checking. Thoughts?
A side benefit would be that it'd also allow you to disambiguate between the different edge conditions too. i.e. If the job was completed before it was cancelled, you could check its final state to see that. |
river | github_2023 | go | 141 | riverqueue | brandur | @@ -274,6 +274,130 @@ func Test_Client(t *testing.T) {
require.WithinDuration(t, time.Now().Add(15*time.Minute), updatedJob.ScheduledAt, 2*time.Second)
})
+ // This helper is used to test cancelling a job both _in_ a transaction and
+ // _outside of_ a transaction. The exact same test logic applies to each case,
+ // the only difference is a different cancelFunc provided by the specific
+ // subtest.
+ cancelRunningJobTestHelper := func(t *testing.T, cancelFunc func(ctx context.Context, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error)) { //nolint:thelper
+ client, bundle := setup(t)
+
+ jobStartedChan := make(chan int64)
+
+ type JobArgs struct {
+ JobArgsReflectKind[JobArgs]
+ }
+
+ AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error {
+ jobStartedChan <- job.ID
+ <-ctx.Done()
+ return ctx.Err()
+ }))
+
+ statusUpdateCh := client.monitor.RegisterUpdates()
+ startClient(ctx, t, client)
+ waitForClientHealthy(ctx, t, statusUpdateCh) | > It makes me wonder if there's a more systemic issue here; should we be waiting for some components to come up before returning from `Start()`? Or should our `startClient` test helper always be waiting for the client to become healthy before returning?
Yeah, good question. I kind of suspect that ... yes, a `Start` that returned only when things were really healthy and ready to go would overall be better. Shorter term, I like the idea of a test helper that waits for everything to be fully healthy, like you've done here, but we may want to make it even more widespread with an easy way to get at it from `riverinternaltest`. |
river | github_2023 | go | 145 | riverqueue | brandur | @@ -0,0 +1,31 @@
+package river
+
+import (
+ "context"
+ "testing"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/stretchr/testify/require"
+)
+
+func TestClientFromContext(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ client := &Client[pgx.Tx]{}
+ ctx = withClient(ctx, client)
+
+ require.Equal(t, client, ClientFromContext[pgx.Tx](ctx))
+
+ result, err := ClientFromContextSafely[pgx.Tx](ctx)
+ require.NoError(t, err)
+ require.Equal(t, client, result)
+
+ require.Panics(t, func() { | What do you think about switching to the use of `require.PanicsWithError` here? Just makes sure we for sure got the right panic and didn't trigger some other unexpected one by accident. |
river | github_2023 | go | 140 | riverqueue | brandur | @@ -135,7 +137,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) {
if errors.Is(err, context.Canceled) {
return
}
- log.Printf("error establishing connection from pool: %v", err)
+ slog.Error("error establishing connection from pool", "err", err) | I think this invocation and subsequent ones should be using the logger instance set above right? |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.