_id
stringlengths
86
170
text
stringlengths
54
39.3k
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/collection/collection.go#L691-L710
func (c *readonlyCollection) WatchOneF(key string, f func(e *watch.Event) error) error { watcher, err := watch.NewWatcher(c.ctx, c.etcdClient, c.prefix, c.Path(key), c.template) if err != nil { return err } defer watcher.Close() for { select { case e := <-watcher.Watch(): if err := f(e); err != nil { if err == errutil.ErrBreak { return nil } return err } case <-c.ctx.Done(): return c.ctx.Err() } } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L251-L265
func (c *controller) enqueueKey(ctx string, obj interface{}) { switch o := obj.(type) { case *prowjobv1.ProwJob: ns := o.Spec.Namespace if ns == "" { ns = o.Namespace } c.workqueue.AddRateLimited(toKey(ctx, ns, o.Name)) case *pipelinev1alpha1.PipelineRun: c.workqueue.AddRateLimited(toKey(ctx, o.Namespace, o.Name)) default: logrus.Warnf("cannot enqueue unknown type %T: %v", o, obj) return } }
https://github.com/jinzhu/now/blob/8ec929ed50c3ac25ce77ba4486e1f277c552c591/now.go#L199-L203
func (now *Now) Between(begin, end string) bool { beginTime := now.MustParse(begin) endTime := now.MustParse(end) return now.After(beginTime) && now.Before(endTime) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/storage_volumes.go#L156-L186
func (c *Cluster) StorageVolumeNextSnapshot(name string, typ int) int { base := name + shared.SnapshotDelimiter + "snap" length := len(base) q := fmt.Sprintf("SELECT name FROM storage_volumes WHERE type=? AND snapshot=? AND SUBSTR(name,1,?)=?") var numstr string inargs := []interface{}{typ, true, length, base} outfmt := []interface{}{numstr} results, err := queryScan(c.db, q, inargs, outfmt) if err != nil { return 0 } max := 0 for _, r := range results { numstr = r[0].(string) if len(numstr) <= length { continue } substr := numstr[length:] var num int count, err := fmt.Sscanf(substr, "%d", &num) if err != nil || count != 1 { continue } if num >= max { max = num + 1 } } return max }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L3451-L3455
func (v EventExecutionContextCreated) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime32(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/robots/issue-creator/sources/triage-filer.go#L460-L472
func (c *Cluster) Labels() []string { labels := []string{"kind/flake"} topTests := make([]string, len(c.Tests)) for i, test := range c.topTestsFailed(len(c.Tests)) { topTests[i] = test.Name } for sig := range c.filer.creator.TestsSIGs(topTests) { labels = append(labels, "sig/"+sig) } return labels }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L5925-L5927
func (api *API) MultiCloudImageSettingLocator(href string) *MultiCloudImageSettingLocator { return &MultiCloudImageSettingLocator{Href(href), api} }
https://github.com/ianschenck/envflag/blob/9111d830d133f952887a936367fb0211c3134f0d/envflag.go#L130-L132
func String(name string, value string, usage string) *string { return EnvironmentFlags.String(name, value, usage) }
https://github.com/giantswarm/certctl/blob/2a6615f61499cd09a8d5ced9a5fade322d2de254/service/vault-factory/vault_factory.go#L30-L44
func New(config Config) (spec.VaultFactory, error) { newVaultFactory := &vaultFactory{ Config: config, } // Dependencies. if newVaultFactory.Address == "" { return nil, microerror.Maskf(invalidConfigError, "Vault address must not be empty") } if newVaultFactory.AdminToken == "" { return nil, microerror.Maskf(invalidConfigError, "Vault admin token must not be empty") } return newVaultFactory, nil }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dimg/ftgc.go#L98-L101
func (gc *GraphicContext) ClearRect(x1, y1, x2, y2 int) { imageColor := image.NewUniform(gc.Current.FillColor) draw.Draw(gc.img, image.Rect(x1, y1, x2, y2), imageColor, image.ZP, draw.Over) }
https://github.com/mgutz/str/blob/968bf66e3da857419e4f6e71b2d5c9ae95682dc4/funcsPZ.go#L275-L381
func ToArgv(s string) []string { const ( InArg = iota InArgQuote OutOfArg ) currentState := OutOfArg currentQuoteChar := "\x00" // to distinguish between ' and " quotations // this allows to use "foo'bar" currentArg := "" argv := []string{} isQuote := func(c string) bool { return c == `"` || c == `'` } isEscape := func(c string) bool { return c == `\` } isWhitespace := func(c string) bool { return c == " " || c == "\t" } L := len(s) for i := 0; i < L; i++ { c := s[i : i+1] //fmt.Printf("c %s state %v arg %s argv %v i %d\n", c, currentState, currentArg, args, i) if isQuote(c) { switch currentState { case OutOfArg: currentArg = "" fallthrough case InArg: currentState = InArgQuote currentQuoteChar = c case InArgQuote: if c == currentQuoteChar { currentState = InArg } else { currentArg += c } } } else if isWhitespace(c) { switch currentState { case InArg: argv = append(argv, currentArg) currentState = OutOfArg case InArgQuote: currentArg += c case OutOfArg: // nothing } } else if isEscape(c) { switch currentState { case OutOfArg: currentArg = "" currentState = InArg fallthrough case InArg: fallthrough case InArgQuote: if i == L-1 { if runtime.GOOS == "windows" { // just add \ to end for windows currentArg += c } else { panic("Escape character at end string") } } else { if runtime.GOOS == "windows" { peek := s[i+1 : i+2] if peek != `"` { currentArg += c } } else { i++ c = s[i : i+1] currentArg += c } } } } else { switch currentState { case InArg, InArgQuote: currentArg += c case OutOfArg: currentArg = "" currentArg += c currentState = InArg } } } if currentState == InArg { argv = append(argv, currentArg) } else if currentState == InArgQuote { panic("Starting quote has no ending quote.") } return argv }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/event.go#L80-L101
func eventInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error { uuid := r.URL.Query().Get(":uuid") if !bson.IsObjectIdHex(uuid) { msg := fmt.Sprintf("uuid parameter is not ObjectId: %s", uuid) return &errors.HTTP{Code: http.StatusBadRequest, Message: msg} } objID := bson.ObjectIdHex(uuid) e, err := event.GetByID(objID) if err != nil { return &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()} } scheme, err := permission.SafeGet(e.Allowed.Scheme) if err != nil { return err } allowed := permission.Check(t, scheme, e.Allowed.Contexts...) if !allowed { return permission.ErrUnauthorized } w.Header().Add("Content-Type", "application/json") return json.NewEncoder(w).Encode(e) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pps.go#L505-L531
func (c APIClient) CreatePipeline( name string, image string, cmd []string, stdin []string, parallelismSpec *pps.ParallelismSpec, input *pps.Input, outputBranch string, update bool, ) error { _, err := c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: NewPipeline(name), Transform: &pps.Transform{ Image: image, Cmd: cmd, Stdin: stdin, }, ParallelismSpec: parallelismSpec, Input: input, OutputBranch: outputBranch, Update: update, }, ) return grpcutil.ScrubGRPC(err) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L143-L150
func (rn *RawNode) Propose(data []byte) error { return rn.raft.Step(pb.Message{ Type: pb.MsgProp, From: rn.raft.id, Entries: []pb.Entry{ {Data: data}, }}) }
https://github.com/t3rm1n4l/nitro/blob/937fe99f63a01a8bea7661c49e2f3f8af6541d7c/nitro.go#L551-L557
func (s *Snapshot) Open() bool { if atomic.LoadInt32(&s.refCount) == 0 { return false } atomic.AddInt32(&s.refCount, 1) return true }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/webhook.go#L56-L71
func webhookInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error { webhookName := r.URL.Query().Get(":name") webhook, err := servicemanager.Webhook.Find(webhookName) if err != nil { if err == eventTypes.ErrWebhookNotFound { w.WriteHeader(http.StatusNotFound) } return err } ctx := permission.Context(permTypes.CtxTeam, webhook.TeamOwner) if !permission.Check(t, permission.PermWebhookRead, ctx) { return permission.ErrUnauthorized } w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(webhook) }
https://github.com/op/go-logging/blob/970db520ece77730c7e4724c61121037378659d9/logger.go#L174-L177
func (l *Logger) Fatal(args ...interface{}) { l.log(CRITICAL, nil, args...) os.Exit(1) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/animation/easyjson.go#L988-L992
func (v GetPlaybackRateParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoAnimation10(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/datastore/query.go#L127-L131
func (q *Query) EventualConsistency() *Query { q = q.clone() q.eventual = true return q }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/ready_wait.go#L21-L30
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { select { case <-ready: return nil case <-rpcCtx.Done(): return rpcCtx.Err() case <-clientCtx.Done(): return clientCtx.Err() } }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dsvg/converters.go#L157-L175
func getPrec(num float64, better bool) int { max := 3 eps := 0.0005 if better { max = 6 eps = 0.0000005 } prec := 0 for math.Mod(num, 1) > eps { num *= 10 eps *= 10 prec++ } if max < prec { return max } return prec }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/projects.go#L50-L52
func (c *ClusterTx) ProjectHasProfiles(name string) (bool, error) { return projectHasProfiles(c.tx, name) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domdebugger/easyjson.go#L544-L548
func (v *RemoveEventListenerBreakpointParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomdebugger6(&r, v) return r.Error() }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pps/server/api_server.go#L901-L1062
func (a *apiServer) listDatum(pachClient *client.APIClient, job *pps.Job, page, pageSize int64) (response *pps.ListDatumResponse, retErr error) { if err := checkLoggedIn(pachClient); err != nil { return nil, err } response = &pps.ListDatumResponse{} ctx := pachClient.Ctx() pfsClient := pachClient.PfsAPIClient // get information about 'job' jobInfo, err := a.InspectJob(ctx, &pps.InspectJobRequest{ Job: &pps.Job{ ID: job.ID, }, }) if err != nil { return nil, err } // authorize ListDatum (must have READER access to all inputs) if err := a.authorizePipelineOp(pachClient, pipelineOpListDatum, jobInfo.Input, jobInfo.Pipeline.Name, ); err != nil { return nil, err } // helper functions for pagination getTotalPages := func(totalSize int) int64 { return (int64(totalSize) + pageSize - 1) / pageSize // == ceil(totalSize/pageSize) } getPageBounds := func(totalSize int) (int, int, error) { start := int(page * pageSize) end := int((page + 1) * pageSize) switch { case totalSize <= start: return 0, 0, io.EOF case totalSize <= end: return start, totalSize, nil case end < totalSize: return start, end, nil } return 0, 0, goerr.New("getPageBounds: unreachable code") } df, err := workerpkg.NewDatumFactory(pachClient, jobInfo.Input) if err != nil { return nil, err } // If there's no stats commit (job not finished), compute datums using jobInfo if jobInfo.StatsCommit == nil { start := 0 end := df.Len() if pageSize > 0 { var err error start, end, err = getPageBounds(df.Len()) if err != nil { return nil, err } response.Page = page response.TotalPages = getTotalPages(df.Len()) } var datumInfos []*pps.DatumInfo for i := start; i < end; i++ { datum := df.Datum(i) // flattened slice of *worker.Input to job id := workerpkg.HashDatum(jobInfo.Pipeline.Name, jobInfo.Salt, datum) datumInfo := &pps.DatumInfo{ Datum: &pps.Datum{ ID: id, Job: jobInfo.Job, }, State: pps.DatumState_STARTING, } for _, input := range datum { datumInfo.Data = append(datumInfo.Data, input.FileInfo) } datumInfos = append(datumInfos, datumInfo) } response.DatumInfos = datumInfos return response, nil } // There is a stats commit -- job is finished // List the files under / in the stats branch to get all the datums file := &pfs.File{ Commit: jobInfo.StatsCommit, Path: "/", } var datumFileInfos []*pfs.FileInfo fs, err := pfsClient.ListFileStream(ctx, &pfs.ListFileRequest{File: file, Full: true}) if err != nil { return nil, grpcutil.ScrubGRPC(err) } // Omit files at the top level that correspond to aggregate job stats blacklist := map[string]bool{ "stats": true, "logs": true, "pfs": true, } pathToDatumHash := func(path string) (string, error) { _, datumHash := filepath.Split(path) if _, ok := blacklist[datumHash]; ok { return "", fmt.Errorf("value %v is not a datum hash", datumHash) } return datumHash, nil } for { f, err := fs.Recv() if err == io.EOF { break } else if err != nil { return nil, grpcutil.ScrubGRPC(err) } if _, err := pathToDatumHash(f.File.Path); err != nil { // not a datum continue } datumFileInfos = append(datumFileInfos, f) } var egGetDatums errgroup.Group limiter := limit.New(200) datumInfos := make([]*pps.DatumInfo, len(datumFileInfos)) for index, fileInfo := range datumFileInfos { fileInfo := fileInfo index := index egGetDatums.Go(func() error { limiter.Acquire() defer limiter.Release() datumHash, err := pathToDatumHash(fileInfo.File.Path) if err != nil { // not a datum return nil } datum, err := a.getDatum(pachClient, jobInfo.StatsCommit.Repo.Name, jobInfo.StatsCommit, job.ID, datumHash, df) if err != nil { return err } datumInfos[index] = datum return nil }) } if err = egGetDatums.Wait(); err != nil { return nil, err } // Sort results (failed first) sort.Slice(datumInfos, func(i, j int) bool { return datumInfos[i].State < datumInfos[j].State }) if pageSize > 0 { response.Page = page response.TotalPages = getTotalPages(len(datumInfos)) start, end, err := getPageBounds(len(datumInfos)) if err != nil { return nil, err } datumInfos = datumInfos[start:end] } response.DatumInfos = datumInfos return response, nil }
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/russian/stem.go#L11-L28
func Stem(word string, stemStopwWords bool) string { word = strings.ToLower(strings.TrimSpace(word)) w := snowballword.New(word) // Return small words and stop words if len(w.RS) <= 2 || (stemStopwWords == false && isStopWord(word)) { return word } preprocess(w) step1(w) step2(w) step3(w) step4(w) return w.String() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domdebugger/domdebugger.go#L174-L176
func (p *RemoveXHRBreakpointParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandRemoveXHRBreakpoint, p, nil) }
https://github.com/iron-io/functions_go/blob/91b84f5bbb17095bf1c7028ec6e70a3dc06a5893/client/apps/get_apps_parameters.go#L70-L73
func (o *GetAppsParams) WithTimeout(timeout time.Duration) *GetAppsParams { o.SetTimeout(timeout) return o }
https://github.com/olorin/nagiosplugin/blob/893f9702af4ea1e2dc4cd6528cbdcdb3dc7ca064/range.go#L85-L87
func (r *Range) CheckInt(val int) bool { return r.Check(float64(val)) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/git/localgit/localgit.go#L151-L154
func (lg *LocalGit) RevParse(org, repo, commitlike string) (string, error) { rdir := filepath.Join(lg.Dir, org, repo) return runCmdOutput(lg.Git, rdir, "rev-parse", commitlike) }
https://github.com/pandemicsyn/oort/blob/fca1d3baddc1d944387cc8bbe8b21f911ec9091b/oort/oort.go#L108-L112
func (o *Server) GetListenAddr() string { o.RLock() defer o.RUnlock() return o.ring.LocalNode().Address(2) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/util/sys.go#L43-L85
func GetIdmapSet() *idmap.IdmapSet { idmapSet, err := idmap.DefaultIdmapSet("", "") if err != nil { logger.Warn("Error reading default uid/gid map", log.Ctx{"err": err.Error()}) logger.Warnf("Only privileged containers will be able to run") idmapSet = nil } else { kernelIdmapSet, err := idmap.CurrentIdmapSet() if err == nil { logger.Infof("Kernel uid/gid map:") for _, lxcmap := range kernelIdmapSet.ToLxcString() { logger.Infof(fmt.Sprintf(" - %s", lxcmap)) } } if len(idmapSet.Idmap) == 0 { logger.Warnf("No available uid/gid map could be found") logger.Warnf("Only privileged containers will be able to run") idmapSet = nil } else { logger.Infof("Configured LXD uid/gid map:") for _, lxcmap := range idmapSet.Idmap { suffix := "" if lxcmap.Usable() != nil { suffix = " (unusable)" } for _, lxcEntry := range lxcmap.ToLxcString() { logger.Infof(" - %s%s", lxcEntry, suffix) } } err = idmapSet.Usable() if err != nil { logger.Warnf("One or more uid/gid map entry isn't usable (typically due to nesting)") logger.Warnf("Only privileged containers will be able to run") idmapSet = nil } } } return idmapSet }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/auth/server/api_server.go#L793-L809
func (a *apiServer) expiredClusterAdminCheck(ctx context.Context, username string) error { state, err := a.getEnterpriseTokenState() if err != nil { return fmt.Errorf("error confirming Pachyderm Enterprise token: %v", err) } isAdmin, err := a.isAdmin(ctx, username) if err != nil { return err } if state != enterpriseclient.State_ACTIVE && !isAdmin { return errors.New("Pachyderm Enterprise is not active in this " + "cluster (until Pachyderm Enterprise is re-activated or Pachyderm " + "auth is deactivated, only cluster admins can perform any operations)") } return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/overlay/overlay.go#L137-L139
func (p *HighlightFrameParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandHighlightFrame, p, nil) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/backend/batch_tx.go#L167-L192
func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { bucket := t.tx.Bucket(bucketName) if bucket == nil { if t.backend.lg != nil { t.backend.lg.Fatal( "failed to find a bucket", zap.String("bucket-name", string(bucketName)), ) } else { plog.Fatalf("bucket %s does not exist", bucketName) } } err := bucket.Delete(key) if err != nil { if t.backend.lg != nil { t.backend.lg.Fatal( "failed to delete a key", zap.String("bucket-name", string(bucketName)), zap.Error(err), ) } else { plog.Fatalf("cannot delete key from bucket (%v)", err) } } t.pending++ }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L3149-L3153
func (v *GetScriptSourceReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDebugger33(&r, v) return r.Error() }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/flagutil/kubernetes.go#L43-L55
func (o *KubernetesOptions) Validate(dryRun bool) error { if dryRun && o.DeckURI == "" { return errors.New("a dry-run was requested but required flag -deck-url was unset") } if o.DeckURI != "" { if _, err := url.ParseRequestURI(o.DeckURI); err != nil { return fmt.Errorf("invalid -deck-url URI: %q", o.DeckURI) } } return nil }
https://github.com/go-opencv/go-opencv/blob/a4fe8ec027ccc9eb8b7d0797db7c76e61083f1db/opencv/cxcore.go#L820-L822
func (seq *Seq) Pop(element unsafe.Pointer) { C.cvSeqPop((*C.struct_CvSeq)(seq), element) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L406-L410
func (v *StopScreencastParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage3(&r, v) return r.Error() }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/http/response.go#L90-L110
func (w *tchanResponseWriter) writeHeaders() { // TODO(prashant): Allow creating write buffers that let you grow the buffer underneath. wb := typed.NewWriteBufferWithSize(10000) wb.WriteUint16(uint16(w.statusCode)) writeVarintString(wb, http.StatusText(w.statusCode)) writeHeaders(wb, w.headers) arg2Writer, err := w.response.Arg2Writer() if err != nil { w.err = err return } if _, w.err = wb.FlushTo(arg2Writer); w.err != nil { return } if w.err = arg2Writer.Close(); w.err != nil { return } w.arg3Writer, w.err = w.response.Arg3Writer() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/overlay/easyjson.go#L1519-L1523
func (v HideHighlightParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoOverlay14(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/cluster/membership.go#L505-L606
func Promote(state *state.State, gateway *Gateway, nodes []db.RaftNode) error { logger.Info("Promote node to database node") // Sanity check that this is not already a database node if gateway.IsDatabaseNode() { return fmt.Errorf("this node is already a database node") } // Figure out our own address. address := "" err := state.Cluster.Transaction(func(tx *db.ClusterTx) error { var err error address, err = tx.NodeAddress() if err != nil { return errors.Wrap(err, "failed to fetch the address of this node") } return nil }) if err != nil { return err } // Sanity check that we actually have an address. if address == "" { return fmt.Errorf("node is not exposed on the network") } // Figure out our raft node ID, and an existing target raft node that // we'll contact to add ourselves as member. id := "" target := "" for _, node := range nodes { if node.Address == address { id = strconv.Itoa(int(node.ID)) } else { target = node.Address } } // Sanity check that our address was actually included in the given // list of raft nodes. if id == "" { return fmt.Errorf("this node is not included in the given list of database nodes") } // Replace our local list of raft nodes with the given one (which // includes ourselves). This will make the gateway start a raft node // when restarted. err = state.Node.Transaction(func(tx *db.NodeTx) error { err = tx.RaftNodesReplace(nodes) if err != nil { return errors.Wrap(err, "failed to set raft nodes") } return nil }) if err != nil { return err } // Lock regular access to the cluster database since we don't want any // other database code to run while we're reconfiguring raft. err = state.Cluster.EnterExclusive() if err != nil { return errors.Wrap(err, "failed to acquire cluster database lock") } // Wipe all existing raft data, for good measure (perhaps they were // somehow leftover). err = os.RemoveAll(state.OS.GlobalDatabaseDir()) if err != nil { return errors.Wrap(err, "failed to remove existing raft data") } // Re-initialize the gateway. This will create a new raft factory an // dqlite driver instance, which will be exposed over gRPC by the // gateway handlers. err = gateway.init() if err != nil { return errors.Wrap(err, "failed to re-initialize gRPC SQL gateway") } logger.Info( "Joining dqlite raft cluster", log15.Ctx{"id": id, "address": address, "target": target}) changer := gateway.raft.MembershipChanger() err = changer.Join(raft.ServerID(id), raft.ServerAddress(target), 5*time.Second) if err != nil { return err } // Unlock regular access to our cluster database, and make sure our // gateway still works correctly. err = state.Cluster.ExitExclusive(func(tx *db.ClusterTx) error { _, err := tx.Nodes() return err }) if err != nil { return errors.Wrap(err, "cluster database initialization failed") } return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/message.go#L87-L95
func (m *Message) AddEmbedded(name string, r io.Reader) error { m.Attachments = append(m.Attachments, Attachment{ Name: name, Reader: r, Embedded: true, }) return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/easyjson.go#L214-L218
func (v *TakeCoverageDeltaReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoCss1(&r, v) return r.Error() }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L8090-L8092
func (api *API) RecurringVolumeAttachmentLocator(href string) *RecurringVolumeAttachmentLocator { return &RecurringVolumeAttachmentLocator{Href(href), api} }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/robots/issue-creator/creator/creator.go#L182-L215
func (c *IssueCreator) CreateAndSync() { var err error if err = c.initialize(); err != nil { glog.Fatalf("Error initializing IssueCreator: %v.", err) } glog.Info("IssueCreator initialization complete.") for srcName, src := range sources { glog.Infof("Generating issues from source: %s.", srcName) var issues []Issue if issues, err = src.Issues(c); err != nil { glog.Errorf("Error generating issues. Source: %s Msg: %v.", srcName, err) continue } // Note: We assume that no issues made by this bot with ID's matching issues generated by // sources will be created while this code is creating issues. If this is a possibility then // this loop should be updated to fetch recently changed issues from github after every issue // sync that results in an issue being created. glog.Infof("Syncing issues from source: %s.", srcName) created := 0 for _, issue := range issues { if c.sync(issue) { created++ } } glog.Infof( "Created issues for %d of the %d issues synced from source: %s.", created, len(issues), srcName, ) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L902-L929
func (vlog *valueLog) sync(fid uint32) error { if vlog.opt.SyncWrites { return nil } vlog.filesLock.RLock() maxFid := atomic.LoadUint32(&vlog.maxFid) // During replay it is possible to get sync call with fid less than maxFid. // Because older file has already been synced, we can return from here. if fid < maxFid || len(vlog.filesMap) == 0 { vlog.filesLock.RUnlock() return nil } curlf := vlog.filesMap[maxFid] // Sometimes it is possible that vlog.maxFid has been increased but file creation // with same id is still in progress and this function is called. In those cases // entry for the file might not be present in vlog.filesMap. if curlf == nil { vlog.filesLock.RUnlock() return nil } curlf.lock.RLock() vlog.filesLock.RUnlock() err := curlf.sync() curlf.lock.RUnlock() return err }
https://github.com/skyrings/skyring-common/blob/d1c0bb1cbd5ed8438be1385c85c4f494608cde1e/dbprovider/mongodb/mailnotifier.go#L42-L51
func (m MongoDb) SaveMailNotifier(ctxt string, notifier models.MailNotifier) error { c := m.Connect(models.COLL_NAME_MAIL_NOTIFIER) defer m.Close(c) _, err := c.Upsert(bson.M{}, bson.M{"$set": notifier}) if err != nil { logger.Get().Error("%s-Error Updating the mail notifier info for: %s Error: %v", ctxt, notifier.MailId, err) return errors.New(fmt.Sprintf("Error Updating the mail notifier info for: %s Error: %v", notifier.MailId, err)) } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L109-L114
func KeyWithTs(key []byte, ts uint64) []byte { out := make([]byte, len(key)+8) copy(out, key) binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts) return out }
https://github.com/moul/sapin/blob/03dc419f50a637fd6fd353ffa8f4c93b4f3a80a6/sapin.go#L51-L99
func (s *Sapin) compute() { if s.output != "" { return } // size of the last line of the last floor maxSize := s.GetMaxSize() // each floor in the floors for floor := 0; floor < s.Size; floor++ { // each line in the lines of the floor for line := 0; line < floor+4; line++ { // size of the current line lineSize := s.GetLineSize(floor, line) // pad left with spaces for i := (maxSize-lineSize)/2 - 1; i > 0; i-- { s.putchar(" ") } // draw the body for i := 0; i < lineSize; i++ { s.putchar("*") } // new line s.putchar("\n") } } // the trunc for i := 0; i < s.Size; i++ { lineSize := s.Size + (s.Size+1)%2 // pad left with spaces for i := (maxSize-lineSize)/2 - 1; i > 0; i-- { s.putchar(" ") } // draw the body for i := 0; i < lineSize; i++ { s.putchar("|") } // new line s.putchar("\n") } }
https://github.com/urandom/handler/blob/61508044a5569d1609521d81e81f6737567fd104/auth/jwt.go#L85-L89
func Issuer(issuer string) TokenOpt { return TokenOpt{func(o *options) { o.issuer = issuer }} }
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/fbuf.go#L94-L96
func (b *Float64RingBuf) ReadWithoutAdvance(p []float64) (n int, err error) { return b.readAndMaybeAdvance(p, false) }
https://github.com/pivotal-pez/pezdispenser/blob/768e2777520868857916b66cfd4cfb7149383ca5/service/available_inventory.go#L21-L28
func GetAvailableInventory(taskCollection integrations.Collection) (inventory map[string]skurepo.SkuBuilder) { inventory = skurepo.GetRegistry() onceLoadInventoryPoller.Do(func() { startTaskPollingForRegisteredSkus(taskCollection) }) return }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pps/server/api_server.go#L592-L677
func (a *apiServer) listJob(pachClient *client.APIClient, pipeline *pps.Pipeline, outputCommit *pfs.Commit, inputCommits []*pfs.Commit, f func(*pps.JobInfo) error) error { authIsActive := true me, err := pachClient.WhoAmI(pachClient.Ctx(), &auth.WhoAmIRequest{}) if auth.IsErrNotActivated(err) { authIsActive = false } else if err != nil { return err } if authIsActive && pipeline != nil { // If 'pipeline is set, check that caller has access to the pipeline's // output repo; currently, that's all that's required for ListJob. // // If 'pipeline' isn't set, then we don't return an error (otherwise, a // caller without access to a single pipeline's output repo couldn't run // `pachctl list job` at all) and instead silently skip jobs where the user // doesn't have access to the job's output repo. resp, err := pachClient.Authorize(pachClient.Ctx(), &auth.AuthorizeRequest{ Repo: pipeline.Name, Scope: auth.Scope_READER, }) if err != nil { return err } if !resp.Authorized { return &auth.ErrNotAuthorized{ Subject: me.Username, Repo: pipeline.Name, Required: auth.Scope_READER, } } } if outputCommit != nil { outputCommit, err = a.resolveCommit(pachClient, outputCommit) if err != nil { return err } } for i, inputCommit := range inputCommits { inputCommits[i], err = a.resolveCommit(pachClient, inputCommit) if err != nil { return err } } jobs := a.jobs.ReadOnly(pachClient.Ctx()) jobPtr := &pps.EtcdJobInfo{} _f := func(key string) error { jobInfo, err := a.jobInfoFromPtr(pachClient, jobPtr, len(inputCommits) > 0) if err != nil { if isNotFoundErr(err) { // This can happen if a user deletes an upstream commit and thereby // deletes this job's output commit, but doesn't delete the etcdJobInfo. // In this case, the job is effectively deleted, but isn't removed from // etcd yet. return nil } else if auth.IsErrNotAuthorized(err) { return nil // skip job--see note under 'authIsActive && pipeline != nil' } return err } if len(inputCommits) > 0 { found := make([]bool, len(inputCommits)) pps.VisitInput(jobInfo.Input, func(in *pps.Input) { if in.Pfs != nil { for i, inputCommit := range inputCommits { if in.Pfs.Commit == inputCommit.ID { found[i] = true } } } }) for _, found := range found { if !found { return nil } } } return f(jobInfo) } if pipeline != nil { return jobs.GetByIndex(ppsdb.JobsPipelineIndex, pipeline, jobPtr, col.DefaultOptions, _f) } else if outputCommit != nil { return jobs.GetByIndex(ppsdb.JobsOutputIndex, outputCommit, jobPtr, col.DefaultOptions, _f) } else { return jobs.List(jobPtr, col.DefaultOptions, _f) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L444-L449
func getBuildPath(spec *prowapi.ProwJobSpec) string { jenkinsJobName := getJobName(spec) jenkinsPath := fmt.Sprintf("/job/%s/build", jenkinsJobName) return jenkinsPath }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/heapprofiler/easyjson.go#L956-L960
func (v GetSamplingProfileParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoHeapprofiler10(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L3220-L3224
func (v *GetScriptSourceParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDebugger34(&r, v) return r.Error() }
https://github.com/ChrisTrenkamp/goxpath/blob/c385f95c6022e7756e91beac5f5510872f7dcb7d/goxpath.go#L111-L117
func ParseExec(xpstr string, t tree.Node, opts ...FuncOpts) (tree.Result, error) { xp, err := Parse(xpstr) if err != nil { return nil, err } return xp.Exec(t, opts...) }
https://github.com/iron-io/functions_go/blob/91b84f5bbb17095bf1c7028ec6e70a3dc06a5893/client/tasks/get_tasks_parameters.go#L92-L95
func (o *GetTasksParams) WithHTTPClient(client *http.Client) *GetTasksParams { o.SetHTTPClient(client) return o }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/cluster/events.go#L115-L126
func eventsConnect(address string, cert *shared.CertInfo) (*lxd.EventListener, error) { client, err := Connect(address, cert, true) if err != nil { return nil, err } // Set the project to the special wildcard in order to get notified // about all events across all projects. client = client.UseProject("*") return client.GetEvents() }
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L1941-L1950
func (u OperationBody) GetCreatePassiveOfferOp() (result CreatePassiveOfferOp, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) if armName == "CreatePassiveOfferOp" { result = *u.CreatePassiveOfferOp ok = true } return }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/emulation/emulation.go#L535-L538
func (p SetVirtualTimePolicyParams) WithInitialVirtualTime(initialVirtualTime *cdp.TimeSinceEpoch) *SetVirtualTimePolicyParams { p.InitialVirtualTime = initialVirtualTime return &p }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L880-L884
func (v *SetFontSizesParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage9(&r, v) return r.Error() }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L2750-L2759
func (c *Client) DeleteProjectCard(projectCardID int) error { c.log("DeleteProjectCard", projectCardID) _, err := c.request(&request{ method: http.MethodDelete, accept: "application/vnd.github.symmetra-preview+json", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/ path: fmt.Sprintf("/projects/columns/cards/:%d", projectCardID), exitCodes: []int{204}, }, nil) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/check.go#L119-L133
func NewCheckPerfCommand() *cobra.Command { cmd := &cobra.Command{ Use: "perf [options]", Short: "Check the performance of the etcd cluster", Run: newCheckPerfCommand, } // TODO: support customized configuration cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)") cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.") cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.") cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.") return cmd }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/rule/rule.go#L458-L465
func (l *Load) Symbols() []string { syms := make([]string, 0, len(l.symbols)) for sym := range l.symbols { syms = append(syms, sym) } sort.Strings(syms) return syms }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/accessibility/accessibility.go#L92-L95
func (p GetPartialAXTreeParams) WithFetchRelatives(fetchRelatives bool) *GetPartialAXTreeParams { p.FetchRelatives = fetchRelatives return &p }
https://github.com/btcsuite/btclog/blob/84c8d2346e9fc8c7b947e243b9c24e6df9fd206a/log.go#L399-L404
func (l *slog) Warn(args ...interface{}) { lvl := l.Level() if lvl <= LevelWarn { l.b.print("WRN", l.tag, args...) } }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pps/server/githook/githook.go#L59-L82
func RunGitHookServer(address string, etcdAddress string, etcdPrefix string) error { c, err := client.NewFromAddress(address) if err != nil { return err } etcdClient, err := etcd.New(etcd.Config{ Endpoints: []string{etcdAddress}, DialOptions: client.DefaultDialOptions(), }) if err != nil { return err } hook, err := github.New() if err != nil { return err } s := &gitHookServer{ hook, c, etcdClient, ppsdb.Pipelines(etcdClient, etcdPrefix), } return http.ListenAndServe(fmt.Sprintf(":%d", GitHookPort), s) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/deck/main.go#L280-L428
func prodOnlyMain(cfg config.Getter, o options, mux *http.ServeMux) *http.ServeMux { prowJobClient, err := o.kubernetes.ProwJobClient(cfg().ProwJobNamespace, false) if err != nil { logrus.WithError(err).Fatal("Error getting ProwJob client for infrastructure cluster.") } buildClusterClients, err := o.kubernetes.BuildClusterClients(cfg().PodNamespace, false) if err != nil { logrus.WithError(err).Fatal("Error getting Kubernetes client.") } podLogClients := map[string]jobs.PodLogClient{} for clusterContext, client := range buildClusterClients { podLogClients[clusterContext] = &podLogClient{client: client} } ja := jobs.NewJobAgent(&filteringProwJobLister{ client: prowJobClient, hiddenRepos: sets.NewString(cfg().Deck.HiddenRepos...), hiddenOnly: o.hiddenOnly, showHidden: o.showHidden, }, podLogClients, cfg) ja.Start() // setup prod only handlers mux.Handle("/data.js", gziphandler.GzipHandler(handleData(ja))) mux.Handle("/prowjobs.js", gziphandler.GzipHandler(handleProwJobs(ja))) mux.Handle("/badge.svg", gziphandler.GzipHandler(handleBadge(ja))) mux.Handle("/log", gziphandler.GzipHandler(handleLog(ja))) mux.Handle("/rerun", gziphandler.GzipHandler(handleRerun(prowJobClient))) if o.spyglass { initSpyglass(cfg, o, mux, ja) } if o.hookURL != "" { mux.Handle("/plugin-help.js", gziphandler.GzipHandler(handlePluginHelp(newHelpAgent(o.hookURL)))) } if o.tideURL != "" { ta := &tideAgent{ log: logrus.WithField("agent", "tide"), path: o.tideURL, updatePeriod: func() time.Duration { return cfg().Deck.TideUpdatePeriod }, hiddenRepos: cfg().Deck.HiddenRepos, hiddenOnly: o.hiddenOnly, showHidden: o.showHidden, } ta.start() mux.Handle("/tide.js", gziphandler.GzipHandler(handleTidePools(cfg, ta))) mux.Handle("/tide-history.js", gziphandler.GzipHandler(handleTideHistory(ta))) } // Enable Git OAuth feature if oauthURL is provided. if o.oauthURL != "" { githubOAuthConfigRaw, err := loadToken(o.githubOAuthConfigFile) if err != nil { logrus.WithError(err).Fatal("Could not read github oauth config file.") } cookieSecretRaw, err := loadToken(o.cookieSecretFile) if err != nil { logrus.WithError(err).Fatal("Could not read cookie secret file.") } var githubOAuthConfig config.GitHubOAuthConfig if err := yaml.Unmarshal(githubOAuthConfigRaw, &githubOAuthConfig); err != nil { logrus.WithError(err).Fatal("Error unmarshalling github oauth config") } if !isValidatedGitOAuthConfig(&githubOAuthConfig) { logrus.Fatal("Error invalid github oauth config") } decodedSecret, err := base64.StdEncoding.DecodeString(string(cookieSecretRaw)) if err != nil { logrus.WithError(err).Fatal("Error decoding cookie secret") } if len(decodedSecret) == 0 { logrus.Fatal("Cookie secret should not be empty") } cookie := sessions.NewCookieStore(decodedSecret) githubOAuthConfig.InitGitHubOAuthConfig(cookie) goa := githuboauth.NewAgent(&githubOAuthConfig, logrus.WithField("client", "githuboauth")) oauthClient := &oauth2.Config{ ClientID: githubOAuthConfig.ClientID, ClientSecret: githubOAuthConfig.ClientSecret, RedirectURL: githubOAuthConfig.RedirectURL, Scopes: githubOAuthConfig.Scopes, Endpoint: github.Endpoint, } repoSet := make(map[string]bool) for r := range cfg().Presubmits { repoSet[r] = true } for _, q := range cfg().Tide.Queries { for _, v := range q.Repos { repoSet[v] = true } } var repos []string for k, v := range repoSet { if v { repos = append(repos, k) } } prStatusAgent := prstatus.NewDashboardAgent( repos, &githubOAuthConfig, logrus.WithField("client", "pr-status")) mux.Handle("/pr-data.js", handleNotCached( prStatusAgent.HandlePrStatus(prStatusAgent))) // Handles login request. mux.Handle("/github-login", goa.HandleLogin(oauthClient)) // Handles redirect from GitHub OAuth server. mux.Handle("/github-login/redirect", goa.HandleRedirect(oauthClient, githuboauth.NewGitHubClientGetter())) } // optionally inject http->https redirect handler when behind loadbalancer if o.redirectHTTPTo != "" { redirectMux := http.NewServeMux() redirectMux.Handle("/", func(oldMux *http.ServeMux, host string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("x-forwarded-proto") == "http" { redirectURL, err := url.Parse(r.URL.String()) if err != nil { logrus.Errorf("Failed to parse URL: %s.", r.URL.String()) http.Error(w, "Failed to perform https redirect.", http.StatusInternalServerError) return } redirectURL.Scheme = "https" redirectURL.Host = host http.Redirect(w, r, redirectURL.String(), http.StatusMovedPermanently) } else { oldMux.ServeHTTP(w, r) } } }(mux, o.redirectHTTPTo)) mux = redirectMux } return mux }
https://github.com/libp2p/go-libp2p-pubsub/blob/9db3dbdde90f44d1c420192c5cefd60682fbdbb9/pubsub.go#L280-L421
func (p *PubSub) processLoop(ctx context.Context) { defer func() { // Clean up go routines. for _, ch := range p.peers { close(ch) } p.peers = nil p.topics = nil }() for { select { case pid := <-p.newPeers: if _, ok := p.peers[pid]; ok { log.Warning("already have connection to peer: ", pid) continue } if p.blacklist.Contains(pid) { log.Warning("ignoring connection from blacklisted peer: ", pid) continue } messages := make(chan *RPC, 32) messages <- p.getHelloPacket() go p.handleNewPeer(ctx, pid, messages) p.peers[pid] = messages case s := <-p.newPeerStream: pid := s.Conn().RemotePeer() ch, ok := p.peers[pid] if !ok { log.Warning("new stream for unknown peer: ", pid) s.Reset() continue } if p.blacklist.Contains(pid) { log.Warning("closing stream for blacklisted peer: ", pid) close(ch) s.Reset() continue } p.rt.AddPeer(pid, s.Protocol()) case pid := <-p.newPeerError: delete(p.peers, pid) case pid := <-p.peerDead: ch, ok := p.peers[pid] if !ok { continue } close(ch) if p.host.Network().Connectedness(pid) == inet.Connected { // still connected, must be a duplicate connection being closed. // we respawn the writer as we need to ensure there is a stream active log.Warning("peer declared dead but still connected; respawning writer: ", pid) messages := make(chan *RPC, 32) messages <- p.getHelloPacket() go p.handleNewPeer(ctx, pid, messages) p.peers[pid] = messages continue } delete(p.peers, pid) for _, t := range p.topics { delete(t, pid) } p.rt.RemovePeer(pid) case treq := <-p.getTopics: var out []string for t := range p.myTopics { out = append(out, t) } treq.resp <- out case sub := <-p.cancelCh: p.handleRemoveSubscription(sub) case sub := <-p.addSub: p.handleAddSubscription(sub) case preq := <-p.getPeers: tmap, ok := p.topics[preq.topic] if preq.topic != "" && !ok { preq.resp <- nil continue } var peers []peer.ID for p := range p.peers { if preq.topic != "" { _, ok := tmap[p] if !ok { continue } } peers = append(peers, p) } preq.resp <- peers case rpc := <-p.incoming: p.handleIncomingRPC(rpc) case msg := <-p.publish: vals := p.getValidators(msg) p.pushMsg(vals, p.host.ID(), msg) case req := <-p.sendMsg: p.publishMessage(req.from, req.msg.Message) case req := <-p.addVal: p.addValidator(req) case req := <-p.rmVal: p.rmValidator(req) case thunk := <-p.eval: thunk() case pid := <-p.blacklistPeer: log.Infof("Blacklisting peer %s", pid) p.blacklist.Add(pid) ch, ok := p.peers[pid] if ok { close(ch) delete(p.peers, pid) for _, t := range p.topics { delete(t, pid) } p.rt.RemovePeer(pid) } case <-ctx.Done(): log.Info("pubsub processloop shutting down") return } } }
https://github.com/segmentio/nsq-go/blob/ff4eef968f46eb580d9dba4f637c5dfb1e5b2208/command.go#L33-L96
func ReadCommand(r *bufio.Reader) (cmd Command, err error) { var line string if line, err = r.ReadString('\n'); err != nil { err = errors.Wrap(err, "reading command") return } if n := len(line); n == 0 || line[n-1] != '\n' { err = errors.New("missing newline at the end of a command") return } else { line = line[:n-1] } if line == "IDENTIFY" { return readIdentify(r) } if strings.HasPrefix(line, "SUB ") { return readSub(line[4:]) } if strings.HasPrefix(line, "PUB ") { return readPub(line[4:], r) } if strings.HasPrefix(line, "MPUB ") { return readMPub(line[5:], r) } if strings.HasPrefix(line, "RDY ") { return readRdy(line[4:]) } if strings.HasPrefix(line, "FIN ") { return readFin(line[4:]) } if strings.HasPrefix(line, "REQ ") { return readReq(line[4:]) } if strings.HasPrefix(line, "TOUCH ") { return readTouch(line[6:]) } if line == "AUTH" { return readAuth(r) } if line == "CLS" { cmd = Cls{} return } if line == "NOP" { cmd = Nop{} return } err = errors.New("unknown command " + line) return }
https://github.com/rlmcpherson/s3gof3r/blob/864ae0bf7cf2e20c0002b7ea17f4d84fec1abc14/putter.go#L177-L191
func (p *putter) retryPutPart(part *part) { defer p.wg.Done() var err error for i := 0; i < p.c.NTry; i++ { err = p.putPart(part) if err == nil { p.sp.give <- part.b part.b = nil return } logger.debugPrintf("Error on attempt %d: Retrying part: %d, Error: %s", i, part.PartNumber, err) time.Sleep(time.Duration(math.Exp2(float64(i))) * 100 * time.Millisecond) // exponential back-off } p.err = err }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/provision/provision.go#L189-L193
func (u *Unit) Available() bool { return u.Status == StatusStarted || u.Status == StatusStarting || u.Status == StatusError }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/easyjson.go#L6784-L6788
func (v *EventRequestIntercepted) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoNetwork52(&r, v) return r.Error() }
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/pbuf.go#L54-L56
func (b *PointerRingBuf) ReadPtrs(p []interface{}) (n int, err error) { return b.readAndMaybeAdvance(p, true) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/exec/exec.go#L438-L450
func (c *Cmd) Wait() error { if c.Process == nil { return errors.New("exec: not started") } if c.finished { return errors.New("exec: Wait was already called") } c.finished = true state, err := c.Process.Wait() return c.WaitIO(state, err) }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/manifest/manifest.go#L221-L234
func FromBlob(manblob []byte, mt string) (Manifest, error) { switch NormalizedMIMEType(mt) { case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: return Schema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return OCI1FromManifest(manblob) case DockerV2Schema2MediaType: return Schema2FromManifest(manblob) case DockerV2ListMediaType: return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } }
https://github.com/TheThingsNetwork/go-utils/blob/aa2a11bd59104d2a8609328c2b2b55da61826470/errors/registry.go#L71-L77
func From(in error) Error { if err, ok := in.(Error); ok { return err } return FromGRPC(in) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L975-L1210
func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lease_LeaseLeases_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/grift/options.go#L20-L22
func (opts Options) Last(n name.Ident) bool { return opts.Parts[len(opts.Parts)-1].String() == n.String() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/exec_watch_command.go#L48-L121
func execWatchCommandFunc(c *cli.Context, ki client.KeysAPI) { args := c.Args() argslen := len(args) if argslen < 2 { handleError(c, ExitBadArgs, errors.New("key and command to exec required")) } var ( key string cmdArgs []string ) foundSep := false for i := range args { if args[i] == "--" && i != 0 { foundSep = true break } } if foundSep { key = args[0] cmdArgs = args[2:] } else { // If no flag is parsed, the order of key and cmdArgs will be switched and // args will not contain `--`. key = args[argslen-1] cmdArgs = args[:argslen-1] } index := 0 if c.Int("after-index") != 0 { index = c.Int("after-index") } recursive := c.Bool("recursive") sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt) go func() { <-sigch os.Exit(0) }() w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive}) for { resp, err := w.Next(context.TODO()) if err != nil { handleError(c, ExitServerError, err) } if resp.Node.Dir { fmt.Fprintf(os.Stderr, "Ignored dir %s change\n", resp.Node.Key) continue } cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) cmd.Env = environResponse(resp, os.Environ()) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr go func() { err := cmd.Start() if err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } cmd.Wait() }() } }
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/axe/pool.go#L27-L41
func (p *Pool) Add(task *Task) { // check existence if p.tasks[task.Name] != nil { panic(fmt.Sprintf(`axe: task with name "%s" already exists`, task.Name)) } // save task p.tasks[task.Name] = task // add task to queue task.Queue.tasks = append(task.Queue.tasks, task.Name) // save queue p.queues[task.Queue] = true }
https://github.com/HiLittleCat/core/blob/ae2101184ecd36354d3fcff0ea69d67d3fdbe156/context.go#L139-L146
func (ctx *Context) ResStatus(code int) (int, error) { if ctx.written == true { return 0, errors.New("Context.ResStatus: request has been writed") } ctx.written = true ctx.ResponseWriter.WriteHeader(code) return fmt.Fprint(ctx.ResponseWriter, http.StatusText(code)) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/errors.go#L48-L56
func (e ErrorHandlers) Get(status int) ErrorHandler { if eh, ok := e[status]; ok { return eh } if eh, ok := e[0]; ok { return eh } return defaultErrorHandler }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/serviceenv/service_env.go#L63-L68
func InitPachOnlyEnv(config *Configuration) *ServiceEnv { env := &ServiceEnv{Configuration: config} env.pachAddress = net.JoinHostPort("127.0.0.1", fmt.Sprintf("%d", env.PeerPort)) env.pachEg.Go(env.initPachClient) return env // env is not ready yet }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/resolve/index.go#L236-L246
func (r FindResult) IsSelfImport(from label.Label) bool { if from.Equal(r.Label) { return true } for _, e := range r.Embeds { if from.Equal(e) { return true } } return false }
https://github.com/blacksails/cgp/blob/570ac705cf2d7a9235d911d00b6f976ab3386c2f/account.go#L20-L27
func (a Account) RealName() (string, error) { var d dictionary err := a.Domain.cgp.request(getAccountSettings{Account: a.Email()}, &d) if err != nil { return "", err } return d.toMap()["RealName"], nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/profiler/easyjson.go#L739-L743
func (v StopPreciseCoverageParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoProfiler8(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/client/lxd_networks.go#L12-L33
func (r *ProtocolLXD) GetNetworkNames() ([]string, error) { if !r.HasExtension("network") { return nil, fmt.Errorf("The server is missing the required \"network\" API extension") } urls := []string{} // Fetch the raw value _, err := r.queryStruct("GET", "/networks", nil, "", &urls) if err != nil { return nil, err } // Parse it names := []string{} for _, url := range urls { fields := strings.Split(url, "/networks/") names = append(names, fields[len(fields)-1]) } return names, nil }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/signature/signature.go#L124-L186
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "critical": &critical, "optional": &optional, }); err != nil { return err } var creatorID string var timestamp float64 var gotCreatorID, gotTimestamp = false, false if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { switch key { case "creator": gotCreatorID = true return &creatorID case "timestamp": gotTimestamp = true return &timestamp default: var ignore interface{} return &ignore } }); err != nil { return err } if gotCreatorID { s.UntrustedCreatorID = &creatorID } if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} } s.UntrustedTimestamp = &intTimestamp } var t string var image, identity json.RawMessage if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ "type": &t, "image": &image, "identity": &identity, }); err != nil { return err } if t != signatureType { return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} } var digestString string if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ "docker-manifest-digest": &digestString, }); err != nil { return err } s.UntrustedDockerManifestDigest = digest.Digest(digestString) return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ "docker-reference": &s.UntrustedDockerReference, }) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/controller.go#L166-L230
func (c *Controller) Sync() error { pjs, err := c.prowJobClient.List(metav1.ListOptions{LabelSelector: c.selector}) if err != nil { return fmt.Errorf("error listing prow jobs: %v", err) } // Share what we have for gathering metrics. c.pjLock.Lock() c.pjs = pjs.Items c.pjLock.Unlock() // TODO: Replace the following filtering with a field selector once CRDs support field selectors. // https://github.com/kubernetes/kubernetes/issues/53459 var jenkinsJobs []prowapi.ProwJob for _, pj := range pjs.Items { if pj.Spec.Agent == prowapi.JenkinsAgent { jenkinsJobs = append(jenkinsJobs, pj) } } jbs, err := c.jc.ListBuilds(getJenkinsJobs(jenkinsJobs)) if err != nil { return fmt.Errorf("error listing jenkins builds: %v", err) } var syncErrs []error if err := c.terminateDupes(jenkinsJobs, jbs); err != nil { syncErrs = append(syncErrs, err) } pendingCh, triggeredCh := pjutil.PartitionActive(jenkinsJobs) errCh := make(chan error, len(jenkinsJobs)) reportCh := make(chan prowapi.ProwJob, len(jenkinsJobs)) // Reinstantiate on every resync of the controller instead of trying // to keep this in sync with the state of the world. c.pendingJobs = make(map[string]int) // Sync pending jobs first so we can determine what is the maximum // number of new jobs we can trigger when syncing the non-pendings. maxSyncRoutines := c.config().MaxGoroutines c.log.Debugf("Handling %d pending prowjobs", len(pendingCh)) syncProwJobs(c.log, c.syncPendingJob, maxSyncRoutines, pendingCh, reportCh, errCh, jbs) c.log.Debugf("Handling %d triggered prowjobs", len(triggeredCh)) syncProwJobs(c.log, c.syncTriggeredJob, maxSyncRoutines, triggeredCh, reportCh, errCh, jbs) close(errCh) close(reportCh) for err := range errCh { syncErrs = append(syncErrs, err) } var reportErrs []error reportTemplate := c.config().ReportTemplate reportTypes := c.cfg().GitHubReporter.JobTypesToReport for report := range reportCh { if err := reportlib.Report(c.ghc, reportTemplate, report, reportTypes); err != nil { reportErrs = append(reportErrs, err) c.log.WithFields(pjutil.ProwJobFields(&report)).WithError(err).Warn("Failed to report ProwJob status") } } if len(syncErrs) == 0 && len(reportErrs) == 0 { return nil } return fmt.Errorf("errors syncing: %v, errors reporting: %v", syncErrs, reportErrs) }
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/module/module.go#L89-L99
func Start(c context.Context, module, version string) error { req := &pb.StartModuleRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } res := &pb.StartModuleResponse{} return internal.Call(c, "modules", "StartModule", req, res) }
https://github.com/janos/web/blob/0fb0203103deb84424510a8d5166ac00700f2b0e/recovery/recovery.go#L49-L51
func WithPanicResponseHandler(h http.Handler) Option { return func(o *Handler) { o.panicResponseHandler = h } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/emulation/emulation.go#L343-L345
func (p *SetEmitTouchEventsForMouseParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetEmitTouchEventsForMouse, p, nil) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/session.go#L38-L72
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} for _, opt := range opts { opt(ops) } id := ops.leaseID if id == v3.NoLease { resp, err := client.Grant(ops.ctx, int64(ops.ttl)) if err != nil { return nil, err } id = v3.LeaseID(resp.ID) } ctx, cancel := context.WithCancel(ops.ctx) keepAlive, err := client.KeepAlive(ctx, id) if err != nil || keepAlive == nil { cancel() return nil, err } donec := make(chan struct{}) s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} // keep the lease alive until client error or cancelled context go func() { defer close(donec) for range keepAlive { // eat messages until keep alive channel closes } }() return s, nil }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/app.go#L433-L532
func updateApp(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) { var ia inputApp err = ParseInput(r, &ia) if err != nil { return err } imageReset, _ := strconv.ParseBool(InputValue(r, "imageReset")) updateData := app.App{ TeamOwner: ia.TeamOwner, Plan: appTypes.Plan{Name: ia.Plan}, Pool: ia.Pool, Description: ia.Description, Router: ia.Router, Tags: ia.Tags, Platform: InputValue(r, "platform"), UpdatePlatform: imageReset, RouterOpts: ia.RouterOpts, } tags, _ := InputValues(r, "tag") updateData.Tags = append(updateData.Tags, tags...) // for compatibility appName := r.URL.Query().Get(":appname") a, err := getAppFromContext(appName, r) if err != nil { return err } var wantedPerms []*permission.PermissionScheme if updateData.Router != "" || len(updateData.RouterOpts) > 0 { return &errors.HTTP{Code: http.StatusBadRequest, Message: "updating router was deprecated, please add the wanted router and remove the old one"} } if updateData.Description != "" { wantedPerms = append(wantedPerms, permission.PermAppUpdateDescription) } if len(updateData.Tags) > 0 { wantedPerms = append(wantedPerms, permission.PermAppUpdateTags) } if updateData.Plan.Name != "" { wantedPerms = append(wantedPerms, permission.PermAppUpdatePlan) } if updateData.Pool != "" { wantedPerms = append(wantedPerms, permission.PermAppUpdatePool) } if updateData.TeamOwner != "" { wantedPerms = append(wantedPerms, permission.PermAppUpdateTeamowner) } if updateData.Platform != "" { repo, _ := image.SplitImageName(updateData.Platform) platform, errPlat := servicemanager.Platform.FindByName(repo) if errPlat != nil { return errPlat } if platform.Disabled { canUsePlat := permission.Check(t, permission.PermPlatformUpdate) || permission.Check(t, permission.PermPlatformCreate) if !canUsePlat { return &errors.HTTP{Code: http.StatusBadRequest, Message: appTypes.ErrInvalidPlatform.Error()} } } wantedPerms = append(wantedPerms, permission.PermAppUpdatePlatform) updateData.UpdatePlatform = true } if updateData.UpdatePlatform { wantedPerms = append(wantedPerms, permission.PermAppUpdateImageReset) } if len(wantedPerms) == 0 { msg := "Neither the description, tags, plan, pool, team owner or platform were set. You must define at least one." return &errors.HTTP{Code: http.StatusBadRequest, Message: msg} } for _, perm := range wantedPerms { allowed := permission.Check(t, perm, contextsForApp(&a)..., ) if !allowed { return permission.ErrUnauthorized } } evt, err := event.New(&event.Opts{ Target: appTarget(appName), Kind: permission.PermAppUpdate, Owner: t, CustomData: event.FormToCustomData(InputFields(r)), Allowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(&a)...), }) if err != nil { return err } defer func() { evt.Done(err) }() keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, "") defer keepAliveWriter.Stop() w.Header().Set("Content-Type", "application/x-json-stream") writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)} evt.SetLogWriter(writer) err = a.Update(updateData, evt) if err == appTypes.ErrPlanNotFound { return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()} } if _, ok := err.(*router.ErrRouterNotFound); ok { return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()} } return err }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L4805-L4809
func (v *FocusParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDom54(&r, v) return r.Error() }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/image/docker_schema2.go#L52-L58
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ src: src, configBlob: configBlob, m: manifest.Schema2FromComponents(config, layers), } }
https://github.com/kubicorn/kubicorn/blob/c4a4b80994b4333709c0f8164faabd801866b986/profiles/azure/ubuntu.go#L25-L132
func NewUbuntuCluster(name string) *cluster.Cluster { controlPlaneProviderConfig := &cluster.ControlPlaneProviderConfig{ Cloud: cluster.CloudAzure, Location: "eastus", SSH: &cluster.SSH{ PublicKeyPath: "~/.ssh/id_rsa.pub", User: "root", }, KubernetesAPI: &cluster.KubernetesAPI{ Port: "443", }, Values: &cluster.Values{ ItemMap: map[string]string{ "INJECTEDTOKEN": kubeadm.GetRandomToken(), }, }, } machineSetsProviderConfigs := []*cluster.MachineProviderConfig{ { ServerPool: &cluster.ServerPool{ Type: cluster.ServerPoolTypeMaster, Name: fmt.Sprintf("%s-master", name), MaxCount: 1, Image: "UbuntuServer", Size: "Standard_DS3_v2 ", BootstrapScripts: []string{}, Firewalls: []*cluster.Firewall{ { Name: fmt.Sprintf("%s-master", name), IngressRules: []*cluster.IngressRule{ { IngressToPort: "22", IngressSource: "0.0.0.0/0", IngressProtocol: "tcp", }, { IngressToPort: "443", IngressSource: "0.0.0.0/0", IngressProtocol: "tcp", }, { IngressToPort: "1194", IngressSource: "0.0.0.0/0", IngressProtocol: "udp", }, }, EgressRules: []*cluster.EgressRule{ { EgressToPort: "all", // By default all egress from VM EgressDestination: "0.0.0.0/0", EgressProtocol: "tcp", }, { EgressToPort: "all", // By default all egress from VM EgressDestination: "0.0.0.0/0", EgressProtocol: "udp", }, }, }, }, }, }, { ServerPool: &cluster.ServerPool{ Type: cluster.ServerPoolTypeNode, Name: fmt.Sprintf("%s-node", name), MaxCount: 1, Image: "UbuntuServer", Size: "Standard_DS3_v2 ", BootstrapScripts: []string{}, Firewalls: []*cluster.Firewall{ { Name: fmt.Sprintf("%s-node", name), IngressRules: []*cluster.IngressRule{ { IngressToPort: "22", IngressSource: "0.0.0.0/0", IngressProtocol: "tcp", }, { IngressToPort: "1194", IngressSource: "0.0.0.0/0", IngressProtocol: "udp", }, }, EgressRules: []*cluster.EgressRule{ { EgressToPort: "all", // By default all egress from VM EgressDestination: "0.0.0.0/0", EgressProtocol: "tcp", }, { EgressToPort: "all", // By default all egress from VM EgressDestination: "0.0.0.0/0", EgressProtocol: "udp", }, }, }, }, }, }, } c := cluster.NewCluster(name) c.SetProviderConfig(controlPlaneProviderConfig) c.NewMachineSetsFromProviderConfigs(machineSetsProviderConfigs) return c }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/samples/gopher2/gopher2.go#L20-L32
func Main(gc draw2d.GraphicContext, ext string) (string, error) { gc.SetStrokeColor(image.Black) gc.SetFillColor(image.White) gc.Save() // Draw a (partial) gopher gc.Translate(-60, 65) gc.Rotate(-30 * (math.Pi / 180.0)) Draw(gc, 48, 48, 240, 72) gc.Restore() // Return the output filename return samples.Output("gopher2", ext), nil }