_id
stringlengths
86
170
text
stringlengths
54
39.3k
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/workload/workload.go#L101-L117
func (w *worker) createRepo(c *client.APIClient) error { repoName := w.randString(10) if err := c.CreateRepo(repoName); err != nil { return err } w.repos = append(w.repos, &pfs.Repo{Name: repoName}) // Start the first commit in the repo (no parent). This is critical to // advanceCommit(), which will try to finish a commit the first time it's // called, and therefore must have an open commit to finish. commit, err := c.StartCommit(repoName, "") if err != nil { return err } w.started = append(w.started, commit) return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/emulation/emulation.go#L406-L408
func (p *SetGeolocationOverrideParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetGeolocationOverride, p, nil) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/obj/obj.go#L286-L288
func NewAmazonClient(region, bucket string, creds *AmazonCreds, distribution string, reversed ...bool) (Client, error) { return newAmazonClient(region, bucket, creds, distribution, reversed...) }
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/rbuf.go#L411-L435
func (f *FixedSizeRingBuf) Nextpos(from int) int { if from >= f.N || from < 0 { return -2 } if f.Readable == 0 { return -1 } last := f.Last() if from == last { return -1 } a0, a1, b0, b1 := f.LegalPos() switch { case from >= a0 && from < a1: return from + 1 case from == a1: return b0 // can be -1 case from >= b0 && from < b1: return from + 1 case from == b1: return -1 } return -1 }
https://github.com/mrd0ll4r/tbotapi/blob/edc257282178bb5cebbfcc41260ec04c1ec7ac19/sendable.go#L108-L117
func (oc *OutgoingChatAction) Send() error { resp := &baseResponse{} _, err := oc.api.c.postJSON(sendChatAction, resp, oc) if err != nil { return err } return check(resp) }
https://github.com/ant0ine/go-json-rest/blob/ebb33769ae013bd5f518a8bac348c310dea768b8/rest/cors.go#L53-L135
func (mw *CorsMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { // precompute as much as possible at init time mw.allowedMethods = map[string]bool{} normedMethods := []string{} for _, allowedMethod := range mw.AllowedMethods { normed := strings.ToUpper(allowedMethod) mw.allowedMethods[normed] = true normedMethods = append(normedMethods, normed) } mw.allowedMethodsCsv = strings.Join(normedMethods, ",") mw.allowedHeaders = map[string]bool{} normedHeaders := []string{} for _, allowedHeader := range mw.AllowedHeaders { normed := http.CanonicalHeaderKey(allowedHeader) mw.allowedHeaders[normed] = true normedHeaders = append(normedHeaders, normed) } mw.allowedHeadersCsv = strings.Join(normedHeaders, ",") return func(writer ResponseWriter, request *Request) { corsInfo := request.GetCorsInfo() // non CORS requests if !corsInfo.IsCors { if mw.RejectNonCorsRequests { Error(writer, "Non CORS request", http.StatusForbidden) return } // continue, execute the wrapped middleware handler(writer, request) return } // Validate the Origin if mw.OriginValidator(corsInfo.Origin, request) == false { Error(writer, "Invalid Origin", http.StatusForbidden) return } if corsInfo.IsPreflight { // check the request methods if mw.allowedMethods[corsInfo.AccessControlRequestMethod] == false { Error(writer, "Invalid Preflight Request", http.StatusForbidden) return } // check the request headers for _, requestedHeader := range corsInfo.AccessControlRequestHeaders { if mw.allowedHeaders[requestedHeader] == false { Error(writer, "Invalid Preflight Request", http.StatusForbidden) return } } writer.Header().Set("Access-Control-Allow-Methods", mw.allowedMethodsCsv) writer.Header().Set("Access-Control-Allow-Headers", mw.allowedHeadersCsv) writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) if mw.AccessControlAllowCredentials == true { writer.Header().Set("Access-Control-Allow-Credentials", "true") } writer.Header().Set("Access-Control-Max-Age", strconv.Itoa(mw.AccessControlMaxAge)) writer.WriteHeader(http.StatusOK) return } // Non-preflight requests for _, exposed := range mw.AccessControlExposeHeaders { writer.Header().Add("Access-Control-Expose-Headers", exposed) } writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) if mw.AccessControlAllowCredentials == true { writer.Header().Set("Access-Control-Allow-Credentials", "true") } // continure, execute the wrapped middleware handler(writer, request) return } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/storage/easyjson.go#L235-L239
func (v *UntrackCacheStorageForOriginParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoStorage2(&r, v) return r.Error() }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/rsapi/auth.go#L298-L301
func (a *rl10Authenticator) CanAuthenticate(host string) error { client := httpclient.New() return testAuth(a, client, host, true) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/animation/easyjson.go#L929-L933
func (v GetPlaybackRateReturns) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoAnimation9(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/spyglass/gcsartifact_fetcher.go#L183-L185
func (src *gcsJobSource) canonicalLink() string { return path.Join(src.linkPrefix, src.bucket, src.jobPrefix) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/obj/obj.go#L222-L224
func NewMicrosoftClient(container string, accountName string, accountKey string) (Client, error) { return newMicrosoftClient(container, accountName, accountKey) }
https://github.com/nwaples/rardecode/blob/197ef08ef68c4454ae5970a9c2692d6056ceb8d7/huffman.go#L142-L208
func readCodeLengthTable(br bitReader, codeLength []byte, addOld bool) error { var bitlength [20]byte for i := 0; i < len(bitlength); i++ { n, err := br.readBits(4) if err != nil { return err } if n == 0xf { cnt, err := br.readBits(4) if err != nil { return err } if cnt > 0 { // array already zero'd dont need to explicitly set i += cnt + 1 continue } } bitlength[i] = byte(n) } var bl huffmanDecoder bl.init(bitlength[:]) for i := 0; i < len(codeLength); i++ { l, err := bl.readSym(br) if err != nil { return err } if l < 16 { if addOld { codeLength[i] = (codeLength[i] + byte(l)) & 0xf } else { codeLength[i] = byte(l) } continue } var count int var value byte switch l { case 16, 18: count, err = br.readBits(3) count += 3 default: count, err = br.readBits(7) count += 11 } if err != nil { return err } if l < 18 { if i == 0 { return errInvalidLengthTable } value = codeLength[i-1] } for ; count > 0 && i < len(codeLength); i++ { codeLength[i] = value count-- } i-- } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/stream.go#L77-L110
func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) { list := &pb.KVList{} for ; itr.Valid(); itr.Next() { item := itr.Item() if item.IsDeletedOrExpired() { break } if !bytes.Equal(key, item.Key()) { // Break out on the first encounter with another key. break } valCopy, err := item.ValueCopy(nil) if err != nil { return nil, err } kv := &pb.KV{ Key: item.KeyCopy(nil), Value: valCopy, UserMeta: []byte{item.UserMeta()}, Version: item.Version(), ExpiresAt: item.ExpiresAt(), } list.Kv = append(list.Kv, kv) if st.db.opt.NumVersionsToKeep == 1 { break } if item.DiscardEarlierVersions() { break } } return list, nil }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/operations.go#L68-L163
func (t OperationType) Description() string { switch t { case OperationClusterBootstrap: return "Creating bootstrap node" case OperationClusterJoin: return "Joining cluster" case OperationBackupCreate: return "Backing up container" case OperationBackupRename: return "Renaming container backup" case OperationBackupRestore: return "Restoring backup" case OperationBackupRemove: return "Removing container backup" case OperationConsoleShow: return "Showing console" case OperationContainerCreate: return "Creating container" case OperationContainerUpdate: return "Updating container" case OperationContainerRename: return "Renaming container" case OperationContainerMigrate: return "Migrating container" case OperationContainerLiveMigrate: return "Live-migrating container" case OperationContainerFreeze: return "Freezing container" case OperationContainerUnfreeze: return "Unfreezing container" case OperationContainerDelete: return "Deleting container" case OperationContainerStart: return "Starting container" case OperationContainerStop: return "Stopping container" case OperationContainerRestart: return "Restarting container" case OperationCommandExec: return "Executing command" case OperationSnapshotCreate: return "Snapshotting container" case OperationSnapshotRename: return "Renaming snapshot" case OperationSnapshotRestore: return "Restoring snapshot" case OperationSnapshotTransfer: return "Transferring snapshot" case OperationSnapshotUpdate: return "Updating snapshot" case OperationSnapshotDelete: return "Deleting snapshot" case OperationImageDownload: return "Downloading image" case OperationImageDelete: return "Deleting image" case OperationImageToken: return "Image download token" case OperationImageRefresh: return "Refreshing image" case OperationVolumeCopy: return "Copying storage volume" case OperationVolumeCreate: return "Creating storage volume" case OperationVolumeMigrate: return "Migrating storage volume" case OperationVolumeMove: return "Moving storage volume" case OperationVolumeSnapshotCreate: return "Creating storage volume snapshot" case OperationVolumeSnapshotDelete: return "Deleting storage volume snapshot" case OperationVolumeSnapshotUpdate: return "Updating storage volume snapshot" case OperationProjectRename: return "Renaming project" case OperationImagesExpire: return "Cleaning up expired images" case OperationImagesPruneLeftover: return "Pruning leftover image files" case OperationImagesUpdate: return "Updating images" case OperationImagesSynchronize: return "Synchronizing images" case OperationLogsExpire: return "Expiring log files" case OperationInstanceTypesUpdate: return "Updating instance types" case OperationBackupsExpire: return "Cleaning up expired backups" case OperationSnapshotsExpire: return "Cleaning up expired snapshots" default: return "Executing operation" } }
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/russian/step3.go#L9-L19
func step3(word *snowballword.SnowballWord) bool { // Search for a DERIVATIONAL ending in R2 (i.e. the entire // ending must lie in R2), and if one is found, remove it. suffix, _ := word.RemoveFirstSuffixIn(word.R2start, "ост", "ость") if suffix != "" { return true } return false }
https://github.com/bitgoin/lyra2rev2/blob/bae9ad2043bb55facb14c4918e909f88a7d3ed84/lyra2.go#L298-L360
func reducedDuplexRowSetup(state []uint64, rowIn []uint64, rowInOut []uint64, rowOut []uint64, nCols int) { ptrIn := 0 ptrInOut := 0 ptrOut := (nCols - 1) * blockLenInt64 for i := 0; i < nCols; i++ { ptrWordIn := rowIn[ptrIn:] //In Lyra2: pointer to prev ptrWordOut := rowOut[ptrOut:] //In Lyra2: pointer to row ptrWordInOut := rowInOut[ptrInOut:] //In Lyra2: pointer to row //Absorbing "M[prev] [+] M[row*]" state[0] ^= (ptrWordIn[0] + ptrWordInOut[0]) state[1] ^= (ptrWordIn[1] + ptrWordInOut[1]) state[2] ^= (ptrWordIn[2] + ptrWordInOut[2]) state[3] ^= (ptrWordIn[3] + ptrWordInOut[3]) state[4] ^= (ptrWordIn[4] + ptrWordInOut[4]) state[5] ^= (ptrWordIn[5] + ptrWordInOut[5]) state[6] ^= (ptrWordIn[6] + ptrWordInOut[6]) state[7] ^= (ptrWordIn[7] + ptrWordInOut[7]) state[8] ^= (ptrWordIn[8] + ptrWordInOut[8]) state[9] ^= (ptrWordIn[9] + ptrWordInOut[9]) state[10] ^= (ptrWordIn[10] + ptrWordInOut[10]) state[11] ^= (ptrWordIn[11] + ptrWordInOut[11]) //Applies the reduced-round transformation f to the sponge's state reducedBlake2bLyra(state) //M[row][col] = M[prev][col] XOR rand ptrWordOut[0] = ptrWordIn[0] ^ state[0] ptrWordOut[1] = ptrWordIn[1] ^ state[1] ptrWordOut[2] = ptrWordIn[2] ^ state[2] ptrWordOut[3] = ptrWordIn[3] ^ state[3] ptrWordOut[4] = ptrWordIn[4] ^ state[4] ptrWordOut[5] = ptrWordIn[5] ^ state[5] ptrWordOut[6] = ptrWordIn[6] ^ state[6] ptrWordOut[7] = ptrWordIn[7] ^ state[7] ptrWordOut[8] = ptrWordIn[8] ^ state[8] ptrWordOut[9] = ptrWordIn[9] ^ state[9] ptrWordOut[10] = ptrWordIn[10] ^ state[10] ptrWordOut[11] = ptrWordIn[11] ^ state[11] //M[row*][col] = M[row*][col] XOR rotW(rand) ptrWordInOut[0] ^= state[11] ptrWordInOut[1] ^= state[0] ptrWordInOut[2] ^= state[1] ptrWordInOut[3] ^= state[2] ptrWordInOut[4] ^= state[3] ptrWordInOut[5] ^= state[4] ptrWordInOut[6] ^= state[5] ptrWordInOut[7] ^= state[6] ptrWordInOut[8] ^= state[7] ptrWordInOut[9] ^= state[8] ptrWordInOut[10] ^= state[9] ptrWordInOut[11] ^= state[10] //Inputs: next column (i.e., next block in sequence) ptrInOut += blockLenInt64 ptrIn += blockLenInt64 //Output: goes to previous column ptrOut -= blockLenInt64 } }
https://github.com/ant0ine/go-json-rest/blob/ebb33769ae013bd5f518a8bac348c310dea768b8/rest/auth_basic.go#L30-L75
func (mw *AuthBasicMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { if mw.Realm == "" { log.Fatal("Realm is required") } if mw.Authenticator == nil { log.Fatal("Authenticator is required") } if mw.Authorizator == nil { mw.Authorizator = func(userId string, request *Request) bool { return true } } return func(writer ResponseWriter, request *Request) { authHeader := request.Header.Get("Authorization") if authHeader == "" { mw.unauthorized(writer) return } providedUserId, providedPassword, err := mw.decodeBasicAuthHeader(authHeader) if err != nil { Error(writer, "Invalid authentication", http.StatusBadRequest) return } if !mw.Authenticator(providedUserId, providedPassword) { mw.unauthorized(writer) return } if !mw.Authorizator(providedUserId, request) { mw.unauthorized(writer) return } request.Env["REMOTE_USER"] = providedUserId handler(writer, request) } }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/goav2gen/api.go#L33-L43
func NewAPIAnalyzer(version, clientName string, doc *Doc, attrOverrides, typeOverrides map[string]string) *APIAnalyzer { return &APIAnalyzer{ Doc: doc, Version: version, ClientName: clientName, AttrOverrides: attrOverrides, TypeOverrides: typeOverrides, refByType: map[string]string{}, } }
https://github.com/TheThingsNetwork/go-utils/blob/aa2a11bd59104d2a8609328c2b2b55da61826470/influx/writer.go#L155-L171
func NewBatchingWriter(log ttnlog.Interface, w BatchPointsWriter, opts ...BatchingWriterOption) *BatchingWriter { bw := &BatchingWriter{ log: log, writer: w, scalingInterval: DefaultScalingInterval, limit: DefaultInstanceLimit, pointChans: make(map[influxdb.BatchPointsConfig]chan *batchPoint), } for _, opt := range opts { opt(bw) } bw.log = bw.log.WithFields(ttnlog.Fields{ "limit": bw.limit, "scalingInterval": bw.scalingInterval, }) return bw }
https://github.com/op/go-logging/blob/970db520ece77730c7e4724c61121037378659d9/logger.go#L233-L235
func (l *Logger) Noticef(format string, args ...interface{}) { l.log(NOTICE, &format, args...) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/page.go#L59-L68
func (p *AddScriptToEvaluateOnNewDocumentParams) Do(ctx context.Context) (identifier ScriptIdentifier, err error) { // execute var res AddScriptToEvaluateOnNewDocumentReturns err = cdp.Execute(ctx, CommandAddScriptToEvaluateOnNewDocument, p, &res) if err != nil { return "", err } return res.Identifier, nil }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L6610-L6617
func (r *NetworkOptionGroup) Locator(api *API) *NetworkOptionGroupLocator { for _, l := range r.Links { if l["rel"] == "self" { return api.NetworkOptionGroupLocator(l["href"]) } } return nil }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/language/go/fix.go#L40-L52
func migrateLibraryEmbed(c *config.Config, f *rule.File) { for _, r := range f.Rules { if !isGoRule(r.Kind()) { continue } libExpr := r.Attr("library") if libExpr == nil || rule.ShouldKeep(libExpr) || r.Attr("embed") != nil { continue } r.DelAttr("library") r.SetAttr("embed", &bzl.ListExpr{List: []bzl.Expr{libExpr}}) } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L7601-L7605
func (v AddScriptToEvaluateOnNewDocumentParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage84(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L1939-L1955
func (c *Client) FindIssues(query, sort string, asc bool) ([]Issue, error) { c.log("FindIssues", query) path := fmt.Sprintf("/search/issues?q=%s", url.QueryEscape(query)) if sort != "" { path += "&sort=" + url.QueryEscape(sort) if asc { path += "&order=asc" } } var issSearchResult IssuesSearchResult _, err := c.request(&request{ method: http.MethodGet, path: path, exitCodes: []int{200}, }, &issSearchResult) return issSearchResult.Issues, err }
https://github.com/skyrings/skyring-common/blob/d1c0bb1cbd5ed8438be1385c85c4f494608cde1e/dbprovider/mongodb/user.go#L39-L49
func (m MongoDb) Users(filter interface{}) (us []models.User, e error) { c := m.Connect(models.COLL_NAME_USER) defer m.Close(c) err := c.Find(filter).All(&us) if err != nil { logger.Get().Error("Error getting record from DB. error: %v", err) return us, mkmgoerror(err.Error()) } return us, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/releasenote/releasenote.go#L325-L331
func getReleaseNote(body string) string { potentialMatch := noteMatcherRE.FindStringSubmatch(body) if potentialMatch == nil { return "" } return strings.TrimSpace(potentialMatch[1]) }
https://github.com/omniscale/go-mapnik/blob/710dfcc5e486e5760d0a5c46be909d91968e1ffb/mapnik.go#L108-L114
func NewSized(width, height int) *Map { return &Map{ m: C.mapnik_map(C.uint(width), C.uint(height)), width: width, height: height, } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L2612-L2630
func (c *Client) GetRepoProjects(owner, repo string) ([]Project, error) { c.log("GetOrgProjects", owner, repo) path := (fmt.Sprintf("/repos/%s/%s/projects", owner, repo)) var projects []Project err := c.readPaginatedResults( path, "application/vnd.github.inertia-preview+json", func() interface{} { return &[]Project{} }, func(obj interface{}) { projects = append(projects, *(obj.(*[]Project))...) }, ) if err != nil { return nil, err } return projects, nil }
https://github.com/jhillyerd/enmime/blob/874cc30e023f36bd1df525716196887b0f04851b/envelope.go#L159-L166
func ReadEnvelope(r io.Reader) (*Envelope, error) { // Read MIME parts from reader root, err := ReadParts(r) if err != nil { return nil, errors.WithMessage(err, "Failed to ReadParts") } return EnvelopeFromPart(root) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/transport.go#L170-L182
func (t *transport) AppendEntries(id raft.ServerID, target raft.ServerAddress, args *raft.AppendEntriesRequest, resp *raft.AppendEntriesResponse) error { ae := appendEntries{ source: t.node, target: target, firstIndex: firstIndex(args), lastIndex: lastIndex(args), commitIndex: args.LeaderCommitIndex, } if len(t.ae) < cap(t.ae) { t.ae = append(t.ae, ae) } return t.sendRPC(string(target), args, resp) }
https://github.com/blacklabeldata/namedtuple/blob/c341f1db44f30b8164294aa8605ede42be604aba/integers.go#L38-L63
func (b *TupleBuilder) PutInt8(field string, value int8) (wrote uint64, err error) { // field type should be if err = b.typeCheck(field, Int8Field); err != nil { return 0, err } // minimum bytes is 2 (type code + value) if b.available() < 2 { return 0, xbinary.ErrOutOfRange } // write type code b.buffer[b.pos] = byte(Int8Code.OpCode) // write value b.buffer[b.pos+1] = byte(value) // set field offset b.offsets[field] = b.pos // incr pos b.pos += 2 return 2, nil }
https://github.com/coryb/figtree/blob/e5fa026ccd54e0a6a99b6d81f73bfcc8e6fe6a6b/gen-rawoption.go#L3918-L3923
func (o *ListUint32Option) Set(value string) error { val := Uint32Option{} val.Set(value) *o = append(*o, val) return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/hashtree/cache.go#L19-L36
func NewCache(size int) (*Cache, error) { c, err := lru.NewWithEvict(size, func(key interface{}, value interface{}) { go func() { tree, ok := value.(*dbHashTree) if !ok { logrus.Infof("non hashtree slice value of type: %v", reflect.TypeOf(value)) return } if err := tree.Destroy(); err != nil { logrus.Infof("failed to destroy hashtree: %v", err) } }() }) if err != nil { return nil, err } return &Cache{c}, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/types.go#L492-L502
func (t *ScreencastFormat) UnmarshalEasyJSON(in *jlexer.Lexer) { switch ScreencastFormat(in.String()) { case ScreencastFormatJpeg: *t = ScreencastFormatJpeg case ScreencastFormatPng: *t = ScreencastFormatPng default: in.AddError(errors.New("unknown ScreencastFormat value")) } }
https://github.com/ant0ine/go-json-rest/blob/ebb33769ae013bd5f518a8bac348c310dea768b8/rest/access_log_apache.go#L191-L196
func (u *accessLogUtil) StartTime() *time.Time { if u.R.Env["START_TIME"] != nil { return u.R.Env["START_TIME"].(*time.Time) } return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L1796-L1800
func (v *QuerySelectorReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDom19(&r, v) return r.Error() }
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/ranges/ranges.go#L224-L244
func (r *InclusiveRange) Value(idx int) (int, error) { if idx < 0 { return 0, fmt.Errorf("Index %d is not >= 0", idx) } // Calculate the value as an offset from the start start := r.Start() end := r.End() step := r.Step() val := start + (step * idx) if start <= end && (val < start || val > end) { return 0, fmt.Errorf("Index %d exceeds max index of %d", idx, r.Len()-1) } else if end < start && (val > start || val < end) { return 0, fmt.Errorf("Index %d exceeds max index of %d", idx, r.Len()-1) } return val, nil }
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/surrogate_gossiper.go#L88-L94
func (d *surrogateGossipData) Merge(other GossipData) GossipData { o := other.(*surrogateGossipData) messages := make([][]byte, 0, len(d.messages)+len(o.messages)) messages = append(messages, d.messages...) messages = append(messages, o.messages...) return &surrogateGossipData{messages: messages} }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/har/easyjson.go#L2401-L2405
func (v *CacheData) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoHar14(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L2741-L2745
func (v *MoveToParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDom30(&r, v) return r.Error() }
https://github.com/Knetic/govaluate/blob/9aa49832a739dcd78a5542ff189fb82c3e423116/parsing.go#L473-L504
func tryParseTime(candidate string) (time.Time, bool) { var ret time.Time var found bool timeFormats := [...]string{ time.ANSIC, time.UnixDate, time.RubyDate, time.Kitchen, time.RFC3339, time.RFC3339Nano, "2006-01-02", // RFC 3339 "2006-01-02 15:04", // RFC 3339 with minutes "2006-01-02 15:04:05", // RFC 3339 with seconds "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone "2006-01-02T15Z0700", // ISO8601 with hour "2006-01-02T15:04Z0700", // ISO8601 with minutes "2006-01-02T15:04:05Z0700", // ISO8601 with seconds "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds } for _, format := range timeFormats { ret, found = tryParseExactTime(candidate, format) if found { return ret, true } } return time.Now(), false }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domdebugger/easyjson.go#L740-L744
func (v *GetEventListenersReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomdebugger8(&r, v) return r.Error() }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/storage/storage_reference.go#L42-L52
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { repo := ref.Name() for _, name := range image.Names { if named, err := reference.ParseNormalizedNamed(name); err == nil { if named.Name() == repo { return true } } } return false }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/manifest/docker_schema1.go#L55-L70
func Schema1FromManifest(manifest []byte) (*Schema1, error) { s1 := Schema1{} if err := json.Unmarshal(manifest, &s1); err != nil { return nil, err } if s1.SchemaVersion != 1 { return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) } if err := s1.initialize(); err != nil { return nil, err } if err := s1.fixManifestLayers(); err != nil { return nil, err } return &s1, nil }
https://github.com/enaml-ops/enaml/blob/4f847ee10b41afca41fe09fa839cb2f6ade06fb5/pull/releasepull.go#L26-L36
func (r *Release) Read(releaseLocation string) (io.ReadCloser, error) { local, err := r.Pull(releaseLocation) if err != nil { return nil, err } rr, err := os.Open(local) if err != nil { return nil, err } return rr, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/experiment/resultstore/convert.go#L108-L233
func convert(project, details string, url gcs.Path, result downloadResult) (resultstore.Invocation, resultstore.Target, resultstore.Test) { started := result.started finished := result.finished artifacts := result.artifactURLs basePath := trailingSlash(url.String()) artifactsPath := basePath + "artifacts/" buildLog := basePath + "build-log.txt" bucket := url.Bucket() inv := resultstore.Invocation{ Project: project, Details: details, Files: []resultstore.File{ { ID: resultstore.InvocationLog, ContentType: "text/plain", URL: buildLog, // ensure build-log.txt appears as the invocation log }, }, } // Files need a unique identifier, trim the common prefix and provide this. uniqPath := func(s string) string { return strings.TrimPrefix(s, basePath) } for i, a := range artifacts { artifacts[i] = "gs://" + bucket + "/" + a } for _, a := range artifacts { // add started.json, etc to the invocation artifact list. if strings.HasPrefix(a, artifactsPath) { continue // things under artifacts/ are owned by the test } if a == buildLog { continue // Handle this in InvocationLog } inv.Files = append(inv.Files, resultstore.File{ ID: uniqPath(a), ContentType: "text/plain", URL: a, }) } if started.Timestamp > 0 { inv.Start = time.Unix(started.Timestamp, 0) if finished.Timestamp != nil { inv.Duration = time.Duration(*finished.Timestamp-started.Timestamp) * time.Second } } const day = 24 * 60 * 60 switch { case finished.Timestamp == nil && started.Timestamp < time.Now().Unix()+day: inv.Status = resultstore.Running inv.Description = "In progress..." case finished.Passed != nil && *finished.Passed: inv.Status = resultstore.Passed inv.Description = "Passed" case finished.Timestamp == nil: inv.Status = resultstore.Failed inv.Description = "Timed out" default: inv.Status = resultstore.Failed inv.Description = "Failed" } test := resultstore.Test{ Action: resultstore.Action{ Node: started.Node, }, Suite: resultstore.Suite{ Name: "test", Files: []resultstore.File{ { ID: resultstore.TargetLog, ContentType: "text/plain", URL: buildLog, // ensure build-log.txt appears as the target log. }, }, }, } for _, suiteMeta := range result.suiteMetas { child := convertSuiteMeta(suiteMeta) test.Suite.Suites = append(test.Suite.Suites, child) test.Suite.Files = append(test.Suite.Files, child.Files...) } for _, a := range artifacts { if !strings.HasPrefix(a, artifactsPath) { continue // Non-artifacts (started.json, etc) are owned by the invocation } if a == buildLog { continue // Already in the list. } // TODO(fejta): use set.Strings instead var found bool for _, sm := range result.suiteMetas { if sm.Path == a { found = true break } } if found { continue } test.Suite.Files = append(test.Suite.Files, resultstore.File{ ID: uniqPath(a), ContentType: "text/plain", URL: a, }) } test.Suite.Start = inv.Start test.Action.Start = inv.Start test.Suite.Duration = inv.Duration test.Action.Duration = inv.Duration test.Status = inv.Status test.Description = inv.Description target := resultstore.Target{ Start: inv.Start, Duration: inv.Duration, Status: inv.Status, Description: inv.Description, } return inv, target, test }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/sync/sync.go#L146-L196
func (p *Puller) Pull(client *pachclient.APIClient, root string, repo, commit, file string, pipes bool, emptyFiles bool, concurrency int, statsTree *hashtree.Ordered, statsRoot string) error { limiter := limit.New(concurrency) var eg errgroup.Group if err := client.Walk(repo, commit, file, func(fileInfo *pfs.FileInfo) error { basepath, err := filepath.Rel(file, fileInfo.File.Path) if err != nil { return err } if statsTree != nil { statsPath := filepath.Join(statsRoot, basepath) if fileInfo.FileType == pfs.FileType_DIR { statsTree.PutDir(statsPath) } else { var blockRefs []*pfs.BlockRef for _, object := range fileInfo.Objects { objectInfo, err := client.InspectObject(object.Hash) if err != nil { return err } blockRefs = append(blockRefs, objectInfo.BlockRef) } blockRefs = append(blockRefs, fileInfo.BlockRefs...) statsTree.PutFile(statsPath, fileInfo.Hash, int64(fileInfo.SizeBytes), &hashtree.FileNodeProto{BlockRefs: blockRefs}) } } path := filepath.Join(root, basepath) if fileInfo.FileType == pfs.FileType_DIR { return os.MkdirAll(path, 0700) } if pipes { return p.makePipe(path, func(w io.Writer) error { return client.GetFile(repo, commit, fileInfo.File.Path, 0, 0, w) }) } if emptyFiles { return p.makeFile(path, func(w io.Writer) error { return nil }) } eg.Go(func() (retErr error) { limiter.Acquire() defer limiter.Release() return p.makeFile(path, func(w io.Writer) error { return client.GetFile(repo, commit, fileInfo.File.Path, 0, 0, w) }) }) return nil }); err != nil { return err } return eg.Wait() }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/sys/apparmor.go#L78-L118
func appArmorCanStack() bool { contentBytes, err := ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/stack") if err != nil { return false } if string(contentBytes) != "yes\n" { return false } contentBytes, err = ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/version") if err != nil { return false } content := string(contentBytes) parts := strings.Split(strings.TrimSpace(content), ".") if len(parts) == 0 { logger.Warn("Unknown apparmor domain version", log.Ctx{"version": content}) return false } major, err := strconv.Atoi(parts[0]) if err != nil { logger.Warn("Unknown apparmor domain version", log.Ctx{"version": content}) return false } minor := 0 if len(parts) == 2 { minor, err = strconv.Atoi(parts[1]) if err != nil { logger.Warn("Unknown apparmor domain version", log.Ctx{"version": content}) return false } } return major >= 1 && minor >= 2 }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/token-counter/token-counter.go#L97-L111
func CreateTokenHandlers(tokenFiles []string, influxdb *InfluxDB) ([]TokenHandler, error) { tokens := []TokenHandler{} for _, tokenFile := range tokenFiles { f, err := os.Open(tokenFile) if err != nil { return nil, fmt.Errorf("Can't open token-file (%s): %s", tokenFile, err) } token, err := CreateTokenHandler(f, influxdb) if err != nil { return nil, fmt.Errorf("Failed to create token (%s): %s", tokenFile, err) } tokens = append(tokens, *token) } return tokens, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/webaudio/easyjson.go#L160-L164
func (v *GetRealtimeDataParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoWebaudio1(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/types.go#L203-L205
func (t *DialogType) UnmarshalJSON(buf []byte) error { return easyjson.Unmarshal(buf, t) }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L3500-L3502
func (api *API) IdentityProviderLocator(href string) *IdentityProviderLocator { return &IdentityProviderLocator{Href(href), api} }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/worker/api_server.go#L2202-L2216
func mergeStats(x, y *pps.ProcessStats) error { var err error if x.DownloadTime, err = plusDuration(x.DownloadTime, y.DownloadTime); err != nil { return err } if x.ProcessTime, err = plusDuration(x.ProcessTime, y.ProcessTime); err != nil { return err } if x.UploadTime, err = plusDuration(x.UploadTime, y.UploadTime); err != nil { return err } x.DownloadBytes += y.DownloadBytes x.UploadBytes += y.UploadBytes return nil }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/archive/src.go#L17-L29
func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { if ref.destinationRef != nil { logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") } src, err := tarfile.NewSourceFromFile(ref.path) if err != nil { return nil, err } return &archiveImageSource{ Source: src, ref: ref, }, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L1223-L1227
func (v SetBreakpointOnFunctionCallParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoDebugger13(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/rule/rule.go#L693-L696
func (r *Rule) DelAttr(key string) { delete(r.attrs, key) r.updated = true }
https://github.com/coryb/figtree/blob/e5fa026ccd54e0a6a99b6d81f73bfcc8e6fe6a6b/gen-rawoption.go#L4384-L4389
func (o *ListUint8Option) Set(value string) error { val := Uint8Option{} val.Set(value) *o = append(*o, val) return nil }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dpdf/gc.go#L364-L379
func (gc *GraphicContext) Restore() { gc.pdf.TransformEnd() gc.StackGraphicContext.Restore() c := gc.Current gc.SetFontSize(c.FontSize) // gc.SetFontData(c.FontData) unsupported, causes bug (do not enable) gc.SetLineWidth(c.LineWidth) gc.SetStrokeColor(c.StrokeColor) gc.SetFillColor(c.FillColor) gc.SetFillRule(c.FillRule) // gc.SetLineDash(c.Dash, c.DashOffset) // TODO gc.SetLineCap(c.Cap) gc.SetLineJoin(c.Join) // c.Path unsupported // c.Font unsupported }
https://github.com/profitbricks/profitbricks-sdk-go/blob/1d2db5f00bf5dd0b6c29273541c71c60cdf4d4d4/request.go#L88-L93
func (c *Client) GetRequestStatus(path string) (*RequestStatus, error) { url := path + `?depth=` + c.client.depth + `&pretty=` + strconv.FormatBool(c.client.pretty) ret := &RequestStatus{} err := c.client.GetRequestStatus(url, ret, http.StatusOK) return ret, err }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/admin.go#L114-L138
func (c APIClient) RestoreReader(r io.Reader) (retErr error) { restoreClient, err := c.AdminAPIClient.Restore(c.Ctx()) if err != nil { return grpcutil.ScrubGRPC(err) } defer func() { if _, err := restoreClient.CloseAndRecv(); err != nil && retErr == nil { retErr = grpcutil.ScrubGRPC(err) } }() reader := pbutil.NewReader(r) op := &admin.Op{} for { if err := reader.Read(op); err != nil { if err == io.EOF { break } return err } if err := restoreClient.Send(&admin.RestoreRequest{Op: op}); err != nil { return grpcutil.ScrubGRPC(err) } } return nil }
https://github.com/golang/debug/blob/19561fee47cf8cd0400d1b094c5898002f97cf90/internal/gocore/module.go#L58-L86
func (m *module) readFunc(r region, pcln region) *Func { f := &Func{module: m, r: r} f.entry = core.Address(r.Field("entry").Uintptr()) f.name = r.p.proc.ReadCString(pcln.SliceIndex(int64(r.Field("nameoff").Int32())).a) f.frameSize.read(r.p.proc, pcln.SliceIndex(int64(r.Field("pcsp").Int32())).a) // Parse pcdata and funcdata, which are laid out beyond the end of the _func. a := r.a.Add(int64(r.p.findType("runtime._func").Size)) n := r.Field("npcdata").Int32() for i := int32(0); i < n; i++ { f.pcdata = append(f.pcdata, r.p.proc.ReadInt32(a)) a = a.Add(4) } a = a.Align(r.p.proc.PtrSize()) n = r.Field("nfuncdata").Int32() for i := int32(0); i < n; i++ { f.funcdata = append(f.funcdata, r.p.proc.ReadPtr(a)) a = a.Add(r.p.proc.PtrSize()) } // Read pcln tables we need. if stackmap := int(r.p.rtConstants["_PCDATA_StackMapIndex"]); stackmap < len(f.pcdata) { f.stackMap.read(r.p.proc, pcln.SliceIndex(int64(f.pcdata[stackmap])).a) } else { f.stackMap.setEmpty() } return f }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/pkg/io/opener.go#L101-L110
func (o opener) Reader(ctx context.Context, path string) (io.ReadCloser, error) { g, err := o.openGCS(path) if err != nil { return nil, fmt.Errorf("bad gcs path: %v", err) } if g == nil { return os.Open(path) } return g.NewReader(ctx) }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/config/config.go#L105-L116
func (c *Config) Clone() *Config { cc := *c cc.Exts = make(map[string]interface{}) for k, v := range c.Exts { cc.Exts[k] = v } cc.KindMap = make(map[string]MappedKind) for k, v := range c.KindMap { cc.KindMap[k] = v } return &cc }
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/snowballword/snowballword.go#L75-L89
func (w *SnowballWord) ReplaceSuffixRunes(suffixRunes []rune, replacementRunes []rune, force bool) bool { if force || w.HasSuffixRunes(suffixRunes) { lenWithoutSuffix := len(w.RS) - len(suffixRunes) w.RS = append(w.RS[:lenWithoutSuffix], replacementRunes...) // If R, R2, & RV are now beyond the length // of the word, they are set to the length // of the word. Otherwise, they are left // as they were. w.resetR1R2() return true } return false }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/cmd/autogazelle/autogazelle.go#L120-L132
func restoreBuildFilesInRepo() { err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { if err != nil { log.Print(err) return nil } restoreBuildFilesInDir(path) return nil }) if err != nil { log.Print(err) } }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/images.go#L158-L191
func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string) (string, error) { protocolInt := -1 for protoInt, protoString := range ImageSourceProtocol { if protoString == protocol { protocolInt = protoInt } } if protocolInt == -1 { return "", fmt.Errorf("Invalid protocol: %s", protocol) } q := `SELECT images.fingerprint FROM images_source INNER JOIN images ON images_source.image_id=images.id WHERE server=? AND protocol=? AND alias=? AND auto_update=1 ORDER BY creation_date DESC` fingerprint := "" arg1 := []interface{}{server, protocolInt, alias} arg2 := []interface{}{&fingerprint} err := dbQueryRowScan(c.db, q, arg1, arg2) if err != nil { if err == sql.ErrNoRows { return "", ErrNoSuchObject } return "", err } return fingerprint, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pod-utils/gcs/upload.go#L98-L107
func DataUploadWithMetadata(src io.Reader, metadata map[string]string) UploadFunc { return func(obj *storage.ObjectHandle) error { writer := obj.NewWriter(context.Background()) writer.Metadata = metadata _, copyErr := io.Copy(writer, src) closeErr := writer.Close() return errorutil.NewAggregate(copyErr, closeErr) } }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/container_metadata.go#L266-L317
func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response { project := projectParam(r) name := mux.Vars(r)["name"] // Handle requests targeted to a container on a different node response, err := ForwardedResponseIfContainerIsRemote(d, r, project, name) if err != nil { return SmartError(err) } if response != nil { return response } // Load the container c, err := containerLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } // Start the storage if needed ourStart, err := c.StorageStart() if err != nil { return SmartError(err) } if ourStart { defer c.StorageStop() } // Look at the request templateName := r.FormValue("path") if templateName == "" { return BadRequest(fmt.Errorf("missing path argument")) } templatePath, err := getContainerTemplatePath(c, templateName) if err != nil { return SmartError(err) } if !shared.PathExists(templatePath) { return NotFound(fmt.Errorf("Path '%s' not found", templatePath)) } // Delete the template err = os.Remove(templatePath) if err != nil { return InternalError(err) } return EmptySyncResponse }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/obj/obj.go#L405-L425
func NewAmazonClientFromEnv() (Client, error) { region, ok := os.LookupEnv(AmazonRegionEnvVar) if !ok { return nil, fmt.Errorf("%s not found", AmazonRegionEnvVar) } bucket, ok := os.LookupEnv(AmazonBucketEnvVar) if !ok { return nil, fmt.Errorf("%s not found", AmazonBucketEnvVar) } var creds AmazonCreds creds.ID, _ = os.LookupEnv(AmazonIDEnvVar) creds.Secret, _ = os.LookupEnv(AmazonSecretEnvVar) creds.Token, _ = os.LookupEnv(AmazonTokenEnvVar) creds.VaultAddress, _ = os.LookupEnv(AmazonVaultAddrEnvVar) creds.VaultRole, _ = os.LookupEnv(AmazonVaultRoleEnvVar) creds.VaultToken, _ = os.LookupEnv(AmazonVaultTokenEnvVar) distribution, _ := os.LookupEnv(AmazonDistributionEnvVar) return NewAmazonClient(region, bucket, &creds, distribution) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/profiles.go#L414-L447
func profileDelete(d *Daemon, r *http.Request) Response { project := projectParam(r) name := mux.Vars(r)["name"] if name == "default" { return Forbidden(errors.New("The 'default' profile cannot be deleted")) } err := d.cluster.Transaction(func(tx *db.ClusterTx) error { hasProfiles, err := tx.ProjectHasProfiles(project) if err != nil { return errors.Wrap(err, "Check project features") } if !hasProfiles { project = "default" } profile, err := tx.ProfileGet(project, name) if err != nil { return err } if len(profile.UsedBy) > 0 { return fmt.Errorf("Profile is currently in use") } return tx.ProfileDelete(project, name) }) if err != nil { return SmartError(err) } return EmptySyncResponse }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/worker/master.go#L766-L780
func (a *APIServer) deleteJob(stm col.STM, jobPtr *pps.EtcdJobInfo) error { pipelinePtr := &pps.EtcdPipelineInfo{} if err := a.pipelines.ReadWrite(stm).Update(jobPtr.Pipeline.Name, pipelinePtr, func() error { if pipelinePtr.JobCounts == nil { pipelinePtr.JobCounts = make(map[int32]int32) } if pipelinePtr.JobCounts[int32(jobPtr.State)] != 0 { pipelinePtr.JobCounts[int32(jobPtr.State)]-- } return nil }); err != nil { return err } return a.jobs.ReadWrite(stm).Delete(jobPtr.Job.ID) }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/json/call.go#L165-L172
func CallSC(ctx Context, sc *tchannel.SubChannel, method string, arg, resp interface{}) error { call, err := sc.BeginCall(ctx, method, &tchannel.CallOptions{Format: tchannel.JSON}) if err != nil { return err } return wrapCall(ctx, call, method, arg, resp) }
https://github.com/intelsdi-x/gomit/blob/286c3ad6599724faed681cd18a9ff595b00d9f3f/event_controller.go#L104-L111
func (e *EventController) UnregisterHandler(n string) error { e.handlerMutex.Lock() delete(e.Handlers, n) e.handlerMutex.Unlock() return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/check.go#L106-L116
func NewCheckCommand() *cobra.Command { cc := &cobra.Command{ Use: "check <subcommand>", Short: "commands for checking properties of the etcd cluster", } cc.AddCommand(NewCheckPerfCommand()) cc.AddCommand(NewCheckDatascaleCommand()) return cc }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm16/codegen_client.go#L391-L393
func (r *Deployment) Locator(api *API) *DeploymentLocator { return api.DeploymentLocator(r.Href) }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dpdf/gc.go#L194-L198
func (gc *GraphicContext) Stroke(paths ...*draw2d.Path) { _, _, _, alphaS := gc.Current.StrokeColor.RGBA() gc.draw("D", alphaS, paths...) gc.Current.Path.Clear() }
https://github.com/pandemicsyn/oort/blob/fca1d3baddc1d944387cc8bbe8b21f911ec9091b/oort/cmdctrl.go#L129-L144
func (o *Server) Exit() error { o.cmdCtrlLock.Lock() defer o.cmdCtrlLock.Unlock() if o.stopped { o.backend.Stop() defer o.shutdownFinished() return nil } close(o.ch) o.backend.StopListenAndServe() o.backend.Wait() o.backend.Stop() o.stopped = true defer o.shutdownFinished() return nil }
https://github.com/xwb1989/sqlparser/blob/120387863bf27d04bc07db8015110a6e96d0146c/ast.go#L3220-L3227
func (node *SetExpr) Format(buf *TrackedBuffer) { // We don't have to backtick set variable names. if node.Name.EqualString("charset") || node.Name.EqualString("names") { buf.Myprintf("%s %v", node.Name.String(), node.Expr) } else { buf.Myprintf("%s = %v", node.Name.String(), node.Expr) } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domsnapshot/easyjson.go#L1606-L1610
func (v *LayoutTreeSnapshot) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomsnapshot6(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L7281-L7285
func (v CaptureScreenshotParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage80(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/input/input.go#L286-L288
func (p *DispatchTouchEventParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandDispatchTouchEvent, p, nil) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/arena.go#L108-L114
func (s *Arena) getNode(offset uint32) *node { if offset == 0 { return nil } return (*node)(unsafe.Pointer(&s.buf[offset])) }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/copy/copy.go#L435-L522
func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { return err } srcInfosUpdated := false if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { if !ic.canModifyManifest { return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") } srcInfos = updatedSrcInfos srcInfosUpdated = true } type copyLayerData struct { destInfo types.BlobInfo diffID digest.Digest err error } // copyGroup is used to determine if all layers are copied copyGroup := sync.WaitGroup{} copyGroup.Add(numLayers) // copySemaphore is used to limit the number of parallel downloads to // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted if ic.c.copyInParallel { copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) } else { copySemaphore = semaphore.NewWeighted(int64(1)) } data := make([]copyLayerData, numLayers) copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { defer copySemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. if ic.diffIDsAreNeeded { cld.err = errors.New("getting DiffID for foreign layers is unimplemented") } else { cld.destInfo = srcLayer logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } } else { cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) } data[index] = cld } func() { // A scope for defer progressPool, progressCleanup := ic.c.newProgressPool(ctx) defer progressCleanup() for i, srcLayer := range srcInfos { copySemaphore.Acquire(ctx, 1) go copyLayerHelper(i, srcLayer, progressPool) } // Wait for all layers to be copied copyGroup.Wait() }() destInfos := make([]types.BlobInfo, numLayers) diffIDs := make([]digest.Digest, numLayers) for i, cld := range data { if cld.err != nil { return cld.err } destInfos[i] = cld.destInfo diffIDs[i] = cld.diffID } ic.manifestUpdates.InformationOnly.LayerInfos = destInfos if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/worker/client.go#L47-L70
func Cancel(ctx context.Context, pipelineRcName string, etcdClient *etcd.Client, etcdPrefix string, workerGrpcPort uint16, jobID string, dataFilter []string) error { workerClients, err := Clients(ctx, pipelineRcName, etcdClient, etcdPrefix, workerGrpcPort) if err != nil { return err } success := false for _, workerClient := range workerClients { resp, err := workerClient.Cancel(ctx, &CancelRequest{ JobID: jobID, DataFilters: dataFilter, }) if err != nil { return err } if resp.Success { success = true } } if !success { return fmt.Errorf("datum matching filter %+v could not be found for jobID %s", dataFilter, jobID) } return nil }
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/sequence.go#L270-L275
func (s *FileSequence) Start() int { if s.frameSet == nil { return 0 } return s.frameSet.Start() }
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/callbacks.go#L254-L284
func DependentResourcesValidator(pairs map[coal.Model]string) *Callback { return C("DependentResourcesValidator", Only(Delete), func(ctx *Context) error { // check all relations for model, field := range pairs { // prepare query query := bson.M{coal.F(model, field): ctx.Model.ID()} // exclude soft deleted documents if supported if sdm := coal.L(model, "fire-soft-delete", false); sdm != "" { query[coal.F(model, sdm)] = nil } // count referencing documents ctx.Tracer.Push("mgo/Query.Count") ctx.Tracer.Tag("query", query) n, err := ctx.Store.DB().C(coal.C(model)).Find(query).Limit(1).Count() if err != nil { return err } ctx.Tracer.Pop() // return err of documents are found if n != 0 { return E("resource has dependent resources") } } // pass validation return nil }) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L766-L776
func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { if r.protocolVersion < 2 { return errorFuture{ErrUnsupportedProtocol} } return r.requestConfigChange(configurationChangeRequest{ command: RemoveServer, serverID: id, prevIndex: prevIndex, }, timeout) }
https://github.com/DamienFontaine/lunarc/blob/2e7332a51f554794a549a313430eaa7dec8d13cc/smtp/config.go#L58-L65
func (se *SMTPEnvironment) GetEnvironment(environment string) interface{} { for env, conf := range se.Env { if strings.Compare(environment, env) == 0 { return conf } } return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L7352-L7356
func (v *BringToFrontParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage81(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/profiler/profiler.go#L112-L114
func (p *StartParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandStart, nil, nil) }
https://github.com/DamienFontaine/lunarc/blob/2e7332a51f554794a549a313430eaa7dec8d13cc/security/response.go#L29-L31
func NewResponse(clientID, redirectURI, userID, exp, code string) Response { return Response{ClientID: clientID, RedirectURI: redirectURI, UserID: userID, Exp: exp, Code: code} }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/fetch/easyjson.go#L1363-L1367
func (v *ContinueWithAuthParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoFetch12(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/debugger.go#L250-L259
func (p *GetPossibleBreakpointsParams) Do(ctx context.Context) (locations []*BreakLocation, err error) { // execute var res GetPossibleBreakpointsReturns err = cdp.Execute(ctx, CommandGetPossibleBreakpoints, p, &res) if err != nil { return nil, err } return res.Locations, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L714-L742
func (c *Client) GetBuilds(job string) (map[string]Build, error) { c.logger.Debugf("GetBuilds(%v)", job) data, err := c.Get(fmt.Sprintf("/job/%s/api/json?tree=builds[number,result,actions[parameters[name,value]]]", job)) if err != nil { // Ignore 404s so we will not block processing the rest of the jobs. if _, isNotFound := err.(NotFoundError); isNotFound { c.logger.WithError(err).Warnf("Cannot list builds for job %q", job) return nil, nil } return nil, fmt.Errorf("cannot list builds for job %q: %v", job, err) } page := struct { Builds []Build `json:"builds"` }{} if err := json.Unmarshal(data, &page); err != nil { return nil, fmt.Errorf("cannot unmarshal builds for job %q: %v", job, err) } jenkinsBuilds := make(map[string]Build) for _, jb := range page.Builds { prowJobID := jb.ProwJobID() // Ignore builds with missing buildID parameters. if prowJobID == "" { continue } jenkinsBuilds[prowJobID] = jb } return jenkinsBuilds, nil }
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/spark/watcher.go#L38-L87
func (w *Watcher) Add(stream *Stream) { // initialize model coal.Init(stream.Model) // check existence if w.streams[stream.Name()] != nil { panic(fmt.Sprintf(`spark: stream with name "%s" already exists`, stream.Name())) } // save stream w.streams[stream.Name()] = stream // open stream coal.OpenStream(stream.Store, stream.Model, nil, func(e coal.Event, id bson.ObjectId, m coal.Model, token []byte) { // ignore real deleted events when soft delete has been enabled if stream.SoftDelete && e == coal.Deleted { return } // handle soft deleted documents if stream.SoftDelete && e == coal.Updated { // get soft delete field softDeleteField := coal.L(stream.Model, "fire-soft-delete", true) // get deleted time t := m.MustGet(softDeleteField).(*time.Time) // change type if document has been soft deleted if t != nil && !t.IsZero() { e = coal.Deleted } } // create event evt := &Event{ Type: e, ID: id, Model: m, Stream: stream, } // broadcast event w.manager.broadcast(evt) }, nil, func(err error) bool { // report error w.Reporter(err) return true }) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/worker/master.go#L393-L745
func (a *APIServer) waitJob(pachClient *client.APIClient, jobInfo *pps.JobInfo, logger *taggedLogger) (retErr error) { logger.Logf("waitJob: %s", jobInfo.Job.ID) ctx, cancel := context.WithCancel(pachClient.Ctx()) pachClient = pachClient.WithCtx(ctx) // Watch the output commit to see if it's terminated (KILLED, FAILED, or // SUCCESS) and if so, cancel the current context go func() { backoff.RetryNotify(func() error { commitInfo, err := pachClient.PfsAPIClient.InspectCommit(ctx, &pfs.InspectCommitRequest{ Commit: jobInfo.OutputCommit, BlockState: pfs.CommitState_FINISHED, }) if err != nil { if pfsserver.IsCommitNotFoundErr(err) || pfsserver.IsCommitDeletedErr(err) { defer cancel() // whether we return error or nil, job is done // Output commit was deleted. Delete job as well if _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error { // Delete the job if no other worker has deleted it yet jobPtr := &pps.EtcdJobInfo{} if err := a.jobs.ReadWrite(stm).Get(jobInfo.Job.ID, jobPtr); err != nil { return err } return a.deleteJob(stm, jobPtr) }); err != nil && !col.IsErrNotFound(err) { return err } return nil } return err } if commitInfo.Trees == nil { defer cancel() // whether job state update succeeds or not, job is done if _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error { // Read an up to date version of the jobInfo so that we // don't overwrite changes that have happened since this // function started. jobPtr := &pps.EtcdJobInfo{} if err := a.jobs.ReadWrite(stm).Get(jobInfo.Job.ID, jobPtr); err != nil { return err } if !ppsutil.IsTerminal(jobPtr.State) { return ppsutil.UpdateJobState(a.pipelines.ReadWrite(stm), a.jobs.ReadWrite(stm), jobPtr, pps.JobState_JOB_KILLED, "") } return nil }); err != nil { return err } } return nil }, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error { if isDone(ctx) { return err // exit retry loop } return nil // retry again }) }() if jobInfo.JobTimeout != nil { startTime, err := types.TimestampFromProto(jobInfo.Started) if err != nil { return err } timeout, err := types.DurationFromProto(jobInfo.JobTimeout) if err != nil { return err } afterTime := startTime.Add(timeout).Sub(time.Now()) logger.Logf("cancelling job at: %+v", afterTime) timer := time.AfterFunc(afterTime, func() { if _, err := pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: jobInfo.OutputCommit, Empty: true, }); err != nil { logger.Logf("error from FinishCommit while timing out job: %+v", err) } }) defer timer.Stop() } backoff.RetryNotify(func() (retErr error) { // block until job inputs are ready // TODO(bryce) This should be removed because it is no longer applicable. failedInputs, err := a.failedInputs(ctx, jobInfo) if err != nil { return err } if len(failedInputs) > 0 { reason := fmt.Sprintf("inputs %s failed", strings.Join(failedInputs, ", ")) if err := a.updateJobState(ctx, jobInfo, nil, pps.JobState_JOB_FAILURE, reason); err != nil { return err } _, err := pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: jobInfo.OutputCommit, Empty: true, }) return err } // Create a datum factory pointing at the job's inputs and split up the // input data into chunks df, err := NewDatumFactory(pachClient, jobInfo.Input) if err != nil { return err } parallelism, err := ppsutil.GetExpectedNumWorkers(a.kubeClient, a.pipelineInfo.ParallelismSpec) if err != nil { return fmt.Errorf("error from GetExpectedNumWorkers: %v", err) } numHashtrees, err := ppsutil.GetExpectedNumHashtrees(a.pipelineInfo.HashtreeSpec) if err != nil { return fmt.Errorf("error from GetExpectedNumHashtrees: %v", err) } plan := &Plan{} // Get stats commit var statsCommit *pfs.Commit var statsTrees []*pfs.Object var statsSize uint64 if jobInfo.EnableStats { ci, err := pachClient.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID) if err != nil { return err } for _, commitRange := range ci.Subvenance { if commitRange.Lower.Repo.Name == jobInfo.OutputRepo.Name && commitRange.Upper.Repo.Name == jobInfo.OutputRepo.Name { statsCommit = commitRange.Lower } } } // Read the job document, and either resume (if we're recovering from a // crash) or mark it running. Also write the input chunks calculated above // into plansCol jobID := jobInfo.Job.ID if _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error { jobs := a.jobs.ReadWrite(stm) jobPtr := &pps.EtcdJobInfo{} if err := jobs.Get(jobID, jobPtr); err != nil { return err } if jobPtr.State == pps.JobState_JOB_KILLED { return nil } jobPtr.DataTotal = int64(df.Len()) jobPtr.StatsCommit = statsCommit if err := ppsutil.UpdateJobState(a.pipelines.ReadWrite(stm), a.jobs.ReadWrite(stm), jobPtr, pps.JobState_JOB_RUNNING, ""); err != nil { return err } plansCol := a.plans.ReadWrite(stm) if err := plansCol.Get(jobID, plan); err == nil { return nil } plan = newPlan(df, jobInfo.ChunkSpec, parallelism, numHashtrees) return plansCol.Put(jobID, plan) }); err != nil { return err } defer func() { if retErr == nil { if _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error { chunksCol := a.chunks(jobID).ReadWrite(stm) chunksCol.DeleteAll() plansCol := a.plans.ReadWrite(stm) return plansCol.Delete(jobID) }); err != nil { retErr = err } } }() // Handle the case when there are no datums if df.Len() == 0 { if err := a.updateJobState(ctx, jobInfo, nil, pps.JobState_JOB_SUCCESS, ""); err != nil { return err } if jobInfo.EnableStats { if _, err = pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: statsCommit, Trees: statsTrees, SizeBytes: statsSize, }); err != nil { return err } } _, err := pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: jobInfo.OutputCommit, Empty: true, }) return err } // Watch the chunks in order chunks := a.chunks(jobInfo.Job.ID).ReadOnly(ctx) var failedDatumID string for _, high := range plan.Chunks { chunkState := &ChunkState{} if err := chunks.WatchOneF(fmt.Sprint(high), func(e *watch.Event) error { var key string if err := e.Unmarshal(&key, chunkState); err != nil { return err } if key != fmt.Sprint(high) { return nil } if chunkState.State != State_RUNNING { if chunkState.State == State_FAILED { failedDatumID = chunkState.DatumID } return errutil.ErrBreak } return nil }); err != nil { return err } } if err := a.updateJobState(ctx, jobInfo, nil, pps.JobState_JOB_MERGING, ""); err != nil { return err } var trees []*pfs.Object var size uint64 if failedDatumID == "" || jobInfo.EnableStats { // Wait for all merges to happen. merges := a.merges(jobInfo.Job.ID).ReadOnly(ctx) for merge := int64(0); merge < plan.Merges; merge++ { mergeState := &MergeState{} if err := merges.WatchOneF(fmt.Sprint(merge), func(e *watch.Event) error { var key string if err := e.Unmarshal(&key, mergeState); err != nil { return err } if key != fmt.Sprint(merge) { return nil } if mergeState.State != State_RUNNING { trees = append(trees, mergeState.Tree) size += mergeState.SizeBytes statsTrees = append(statsTrees, mergeState.StatsTree) statsSize += mergeState.StatsSizeBytes return errutil.ErrBreak } return nil }); err != nil { return err } } } if jobInfo.EnableStats { if _, err = pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: statsCommit, Trees: statsTrees, SizeBytes: statsSize, }); err != nil { return err } } // If the job failed we finish the commit with an empty tree but only // after we've set the state, otherwise the job will be considered // killed. if failedDatumID != "" { reason := fmt.Sprintf("failed to process datum: %v", failedDatumID) if err := a.updateJobState(ctx, jobInfo, statsCommit, pps.JobState_JOB_FAILURE, reason); err != nil { return err } _, err = pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: jobInfo.OutputCommit, Empty: true, }) return err } // Write out the datums processed/skipped and merged for this job buf := &bytes.Buffer{} pbw := pbutil.NewWriter(buf) for i := 0; i < df.Len(); i++ { files := df.Datum(i) datumHash := HashDatum(a.pipelineInfo.Pipeline.Name, a.pipelineInfo.Salt, files) if _, err := pbw.WriteBytes([]byte(datumHash)); err != nil { return err } } datums, _, err := pachClient.PutObject(buf) if err != nil { return err } // Finish the job's output commit _, err = pachClient.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: jobInfo.OutputCommit, Trees: trees, SizeBytes: size, Datums: datums, }) if err != nil && !pfsserver.IsCommitFinishedErr(err) { if pfsserver.IsCommitNotFoundErr(err) || pfsserver.IsCommitDeletedErr(err) { // output commit was deleted during e.g. FinishCommit, which means this job // should be deleted. Goro from top of waitJob() will observe the deletion, // delete the jobPtr and call cancel()--wait for that. <-ctx.Done() // wait for cancel() return nil } return err } // Handle egress if err := a.egress(pachClient, logger, jobInfo); err != nil { reason := fmt.Sprintf("egress error: %v", err) return a.updateJobState(ctx, jobInfo, statsCommit, pps.JobState_JOB_FAILURE, reason) } return a.updateJobState(ctx, jobInfo, statsCommit, pps.JobState_JOB_SUCCESS, "") }, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error { logger.Logf("error in waitJob %v, retrying in %v", err, d) select { case <-ctx.Done(): if err := ctx.Err(); err != nil { if err == context.DeadlineExceeded { reason := fmt.Sprintf("job exceeded timeout (%v)", jobInfo.JobTimeout) // Mark the job as failed. // Workers subscribe to etcd for this state change to cancel their work _, err := col.NewSTM(context.Background(), a.etcdClient, func(stm col.STM) error { jobs := a.jobs.ReadWrite(stm) jobID := jobInfo.Job.ID jobPtr := &pps.EtcdJobInfo{} if err := jobs.Get(jobID, jobPtr); err != nil { return err } err = ppsutil.UpdateJobState(a.pipelines.ReadWrite(stm), a.jobs.ReadWrite(stm), jobPtr, pps.JobState_JOB_FAILURE, reason) if err != nil { return nil } return nil }) if err != nil { return err } } return err } return err default: } // Increment the job's restart count _, err = col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error { jobs := a.jobs.ReadWrite(stm) jobID := jobInfo.Job.ID jobPtr := &pps.EtcdJobInfo{} if err := jobs.Get(jobID, jobPtr); err != nil { return err } jobPtr.Restart++ return jobs.Put(jobID, jobPtr) }) if err != nil { logger.Logf("error incrementing job %s's restart count", jobInfo.Job.ID) } return nil }) return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L883-L887
func (v SetBreakpointsActiveParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoDebugger9(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/kpango/glg/blob/68d2670cb2dbff047331daad841149a82ac37796/glg.go#L493-L502
func (g *Glg) DisableLevelColor(lv LEVEL) *Glg { ins, ok := g.logger.Load(lv) if ok { l := ins.(*logger) l.isColor = false l.updateMode() g.logger.Store(lv, l) } return g }
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/exp/cpp/export/export.go#L502-L507
func FileSequence_SetBasename(id FileSeqId, base *C.char) { if fs, ok := sFileSeqs.Get(id); ok { str := C.GoString(base) fs.SetBasename(str) } }