_id
stringlengths
86
170
text
stringlengths
54
39.3k
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/praxisgen/main.go#L159-L169
func unmarshal(path string, target *map[string]interface{}) error { content, err := loadFile(path) if err != nil { return fmt.Errorf("Failed to load media type JSON from '%s': %s", path, err) } err = json.Unmarshal(content, target) if err != nil { return fmt.Errorf("Cannot unmarshal JSON read from '%s': %s", path, err) } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L760-L789
func (db *DB) ensureRoomForWrite() error { var err error db.Lock() defer db.Unlock() if db.mt.MemSize() < db.opt.MaxTableSize { return nil } y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed. select { case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}: db.elog.Printf("Flushing value log to disk if async mode.") // Ensure value log is synced to disk so this memtable's contents wouldn't be lost. err = db.vlog.sync(db.vhead.Fid) if err != nil { return err } db.elog.Printf("Flushing memtable, mt.size=%d size of flushChan: %d\n", db.mt.MemSize(), len(db.flushChan)) // We manage to push this task. Let's modify imm. db.imm = append(db.imm, db.mt) db.mt = skl.NewSkiplist(arenaSize(db.opt)) // New memtable is empty. We certainly have room. return nil default: // We need to do this to unlock and allow the flusher to modify imm. return errNoRoom } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L229-L231
func (f *FakeClient) GetSingleCommit(org, repo, SHA string) (github.SingleCommit, error) { return f.Commits[SHA], nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/indexeddb/easyjson.go#L90-L94
func (v *RequestDatabaseReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoIndexeddb(&r, v) return r.Error() }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/ca/cac/codegen_client.go#L355-L357
func (api *API) BudgetAlertLocator(href string) *BudgetAlertLocator { return &BudgetAlertLocator{Href(href), api} }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/walk/walk.go#L105-L172
func Walk(c *config.Config, cexts []config.Configurer, dirs []string, mode Mode, wf WalkFunc) { knownDirectives := make(map[string]bool) for _, cext := range cexts { for _, d := range cext.KnownDirectives() { knownDirectives[d] = true } } symlinks := symlinkResolver{visited: []string{c.RepoRoot}} updateRels := buildUpdateRelMap(c.RepoRoot, dirs) var visit func(*config.Config, string, string, bool) visit = func(c *config.Config, dir, rel string, updateParent bool) { haveError := false // TODO: OPT: ReadDir stats all the files, which is slow. We just care about // names and modes, so we should use something like // golang.org/x/tools/internal/fastwalk to speed this up. files, err := ioutil.ReadDir(dir) if err != nil { log.Print(err) return } f, err := loadBuildFile(c, rel, dir, files) if err != nil { log.Print(err) haveError = true } c = configure(cexts, knownDirectives, c, rel, f) wc := getWalkConfig(c) if wc.isExcluded(rel, ".") { return } var subdirs, regularFiles []string for _, fi := range files { base := fi.Name() switch { case base == "" || wc.isExcluded(rel, base): continue case fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 && symlinks.follow(c, dir, rel, base): subdirs = append(subdirs, base) default: regularFiles = append(regularFiles, base) } } shouldUpdate := shouldUpdate(rel, mode, updateParent, updateRels) for _, sub := range subdirs { if subRel := path.Join(rel, sub); shouldVisit(subRel, mode, updateRels) { visit(c, filepath.Join(dir, sub), subRel, shouldUpdate) } } update := !haveError && !wc.ignore && shouldUpdate if shouldCall(rel, mode, updateRels) { genFiles := findGenFiles(wc, f) wf(dir, rel, c, update, f, subdirs, regularFiles, genFiles) } } visit(c, c.RepoRoot, "", false) }
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/spanish/common.go#L98-L146
func isStopWord(word string) bool { switch word { case "de", "la", "que", "el", "en", "y", "a", "los", "del", "se", "las", "por", "un", "para", "con", "no", "una", "su", "al", "lo", "como", "más", "pero", "sus", "le", "ya", "o", "este", "sí", "porque", "esta", "entre", "cuando", "muy", "sin", "sobre", "también", "me", "hasta", "hay", "donde", "quien", "desde", "todo", "nos", "durante", "todos", "uno", "les", "ni", "contra", "otros", "ese", "eso", "ante", "ellos", "e", "esto", "mí", "antes", "algunos", "qué", "unos", "yo", "otro", "otras", "otra", "él", "tanto", "esa", "estos", "mucho", "quienes", "nada", "muchos", "cual", "poco", "ella", "estar", "estas", "algunas", "algo", "nosotros", "mi", "mis", "tú", "te", "ti", "tu", "tus", "ellas", "nosotras", "vosostros", "vosostras", "os", "mío", "mía", "míos", "mías", "tuyo", "tuya", "tuyos", "tuyas", "suyo", "suya", "suyos", "suyas", "nuestro", "nuestra", "nuestros", "nuestras", "vuestro", "vuestra", "vuestros", "vuestras", "esos", "esas", "estoy", "estás", "está", "estamos", "estáis", "están", "esté", "estés", "estemos", "estéis", "estén", "estaré", "estarás", "estará", "estaremos", "estaréis", "estarán", "estaría", "estarías", "estaríamos", "estaríais", "estarían", "estaba", "estabas", "estábamos", "estabais", "estaban", "estuve", "estuviste", "estuvo", "estuvimos", "estuvisteis", "estuvieron", "estuviera", "estuvieras", "estuviéramos", "estuvierais", "estuvieran", "estuviese", "estuvieses", "estuviésemos", "estuvieseis", "estuviesen", "estando", "estado", "estada", "estados", "estadas", "estad", "he", "has", "ha", "hemos", "habéis", "han", "haya", "hayas", "hayamos", "hayáis", "hayan", "habré", "habrás", "habrá", "habremos", "habréis", "habrán", "habría", "habrías", "habríamos", "habríais", "habrían", "había", "habías", "habíamos", "habíais", "habían", "hube", "hubiste", "hubo", "hubimos", "hubisteis", "hubieron", "hubiera", "hubieras", "hubiéramos", "hubierais", "hubieran", "hubiese", "hubieses", "hubiésemos", "hubieseis", "hubiesen", "habiendo", "habido", "habida", "habidos", "habidas", "soy", "eres", "es", "somos", "sois", "son", "sea", "seas", "seamos", "seáis", "sean", "seré", "serás", "será", "seremos", "seréis", "serán", "sería", "serías", "seríamos", "seríais", "serían", "era", "eras", "éramos", "erais", "eran", "fui", "fuiste", "fue", "fuimos", "fuisteis", "fueron", "fuera", "fueras", "fuéramos", "fuerais", "fueran", "fuese", "fueses", "fuésemos", "fueseis", "fuesen", "sintiendo", "sentido", "sentida", "sentidos", "sentidas", "siente", "sentid", "tengo", "tienes", "tiene", "tenemos", "tenéis", "tienen", "tenga", "tengas", "tengamos", "tengáis", "tengan", "tendré", "tendrás", "tendrá", "tendremos", "tendréis", "tendrán", "tendría", "tendrías", "tendríamos", "tendríais", "tendrían", "tenía", "tenías", "teníamos", "teníais", "tenían", "tuve", "tuviste", "tuvo", "tuvimos", "tuvisteis", "tuvieron", "tuviera", "tuvieras", "tuviéramos", "tuvierais", "tuvieran", "tuviese", "tuvieses", "tuviésemos", "tuvieseis", "tuviesen", "teniendo", "tenido", "tenida", "tenidos", "tenidas", "tened": return true } return false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/cluster.go#L654-L676
func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error { ems := existing.Members() lms := local.Members() if len(ems) != len(lms) { return fmt.Errorf("member count is unequal") } sort.Sort(MembersByPeerURLs(ems)) sort.Sort(MembersByPeerURLs(lms)) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() for i := range ems { if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok { return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err) } lms[i].ID = ems[i].ID } local.members = make(map[types.ID]*Member) for _, m := range lms { local.members[m.ID] = m } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L1677-L1687
func (c *Client) CreateReview(org, repo string, number int, r DraftReview) error { c.log("CreateReview", org, repo, number, r) _, err := c.request(&request{ method: http.MethodPost, path: fmt.Sprintf("/repos/%s/%s/pulls/%d/reviews", org, repo, number), accept: "application/vnd.github.black-cat-preview+json", requestBody: r, exitCodes: []int{200}, }, nil) return err }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/emulation/easyjson.go#L909-L913
func (v SetEmitTouchEventsForMouseParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoEmulation10(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L101-L113
func (o Owners) GetReverseMap(approvers map[string]sets.String) map[string]sets.String { approverOwnersfiles := map[string]sets.String{} for ownersFile, approvers := range approvers { for approver := range approvers { if _, ok := approverOwnersfiles[approver]; ok { approverOwnersfiles[approver].Insert(ownersFile) } else { approverOwnersfiles[approver] = sets.NewString(ownersFile) } } } return approverOwnersfiles }
https://github.com/jhillyerd/enmime/blob/874cc30e023f36bd1df525716196887b0f04851b/match.go#L87-L107
func (p *Part) DepthMatchAll(matcher PartMatcher) []*Part { root := p matches := make([]*Part, 0, 10) for { if matcher(p) { matches = append(matches, p) } c := p.FirstChild if c != nil { p = c } else { for p.NextSibling == nil { if p == root { return matches } p = p.Parent } p = p.NextSibling } } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/git/git.go#L101-L106
func (c *Client) SetCredentials(user string, tokenGenerator func() []byte) { c.credLock.Lock() defer c.credLock.Unlock() c.user = user c.tokenGenerator = tokenGenerator }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/branchprotector/request.go#L84-L103
func makeReviews(rp *branchprotection.ReviewPolicy) *github.RequiredPullRequestReviews { switch { case rp == nil: return nil case rp.Approvals == nil: logrus.Warn("WARNING: required_pull_request_reviews policy does not specify required_approving_review_count, disabling") return nil case *rp.Approvals == 0: return nil } rprr := github.RequiredPullRequestReviews{ DismissStaleReviews: makeBool(rp.DismissStale), RequireCodeOwnerReviews: makeBool(rp.RequireOwners), RequiredApprovingReviewCount: *rp.Approvals, } if rp.DismissalRestrictions != nil { rprr.DismissalRestrictions = *makeRestrictions(rp.DismissalRestrictions) } return &rprr }
https://github.com/pandemicsyn/oort/blob/fca1d3baddc1d944387cc8bbe8b21f911ec9091b/api/valuestore_GEN_.go#L151-L156
func (stor *valueStore) Close() { stor.lock.Lock() stor.shutdown() close(stor.handlersDoneChan) stor.lock.Unlock() }
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/encoder.go#L15-L17
func EncodeInt8(w io.Writer, i int8) (err error) { return EncodeVarint(w, int64(i)) }
https://github.com/hooklift/govix/blob/063702285520a992b920fc1575e305dc9ffd6ffe/vm.go#L1872-L1894
func (v *VM) InstallTools(options InstallToolsOption) error { var jobHandle C.VixHandle = C.VIX_INVALID_HANDLE var err C.VixError = C.VIX_OK jobHandle = C.VixVM_InstallTools(v.handle, C.int(options), //options nil, //commandLineArgs nil, //callbackProc nil) //clientData defer C.Vix_ReleaseHandle(jobHandle) err = C.vix_job_wait(jobHandle) if C.VIX_OK != err { return &Error{ Operation: "vm.InstallTools", Code: int(err & 0xFFFF), Text: C.GoString(C.Vix_GetErrorText(err, nil)), } } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L1101-L1110
func (c *Client) GetPullRequestPatch(org, repo string, number int) ([]byte, error) { c.log("GetPullRequestPatch", org, repo, number) _, patch, err := c.requestRaw(&request{ accept: "application/vnd.github.VERSION.patch", method: http.MethodGet, path: fmt.Sprintf("/repos/%s/%s/pulls/%d", org, repo, number), exitCodes: []int{200}, }) return patch, err }
https://github.com/TheThingsNetwork/go-utils/blob/aa2a11bd59104d2a8609328c2b2b55da61826470/influx/writer.go#L58-L62
func (w *SinglePointWriter) Write(bpConf influxdb.BatchPointsConfig, p *influxdb.Point) error { bp := newBatchPoints(bpConf) bp.AddPoint(p) return w.writer.Write(bp) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/shared/util_linux.go#L128-L146
func llistxattr(path string, list []byte) (sz int, err error) { var _p0 *byte _p0, err = syscall.BytePtrFromString(path) if err != nil { return } var _p1 unsafe.Pointer if len(list) > 0 { _p1 = unsafe.Pointer(&list[0]) } else { _p1 = unsafe.Pointer(nil) } r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(list))) sz = int(r0) if e1 != 0 { err = e1 } return }
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/build/payment.go#L87-L97
func (m Destination) MutatePayment(o interface{}) error { switch o := o.(type) { default: return errors.New("Unexpected operation type") case *xdr.PaymentOp: return setAccountId(m.AddressOrSeed, &o.Destination) case *xdr.PathPaymentOp: return setAccountId(m.AddressOrSeed, &o.Destination) } return nil }
https://github.com/google/subcommands/blob/d47216cd17848d55a33e6f651cbe408243ed55b8/subcommands.go#L419-L421
func Execute(ctx context.Context, args ...interface{}) ExitStatus { return DefaultCommander.Execute(ctx, args...) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/page.go#L1114-L1116
func (p *SetLifecycleEventsEnabledParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetLifecycleEventsEnabled, p, nil) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/util.go#L61-L78
func isValidSeq(lg *zap.Logger, names []string) bool { var lastSeq uint64 for _, name := range names { curSeq, _, err := parseWALName(name) if err != nil { if lg != nil { lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) } else { plog.Panicf("parse correct name should never fail: %v", err) } } if lastSeq != 0 && lastSeq != curSeq-1 { return false } lastSeq = curSeq } return true }
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L3338-L3345
func (u ChangeTrustResult) ArmForSwitch(sw int32) (string, bool) { switch ChangeTrustResultCode(sw) { case ChangeTrustResultCodeChangeTrustSuccess: return "", true default: return "", true } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L1658-L1662
func (v PropertyDescriptor) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime15(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/pivotal-pez/pezdispenser/blob/768e2777520868857916b66cfd4cfb7149383ca5/fakes/fake.go#L170-L177
func FakeNewCollectionDialer(c taskmanager.Task) func(url, dbname, collectionname string) (col integrations.Collection, err error) { return func(url, dbname, collectionname string) (col integrations.Collection, err error) { col = &FakeCollection{ ControlTask: c, } return } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/debugger.go#L551-L553
func (p *SetBlackboxPatternsParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetBlackboxPatterns, p, nil) }
https://github.com/taskcluster/taskcluster-client-go/blob/ef6acd428ae5844a933792ed6479d0e7dca61ef8/tcec2manager/types.go#L526-L529
func (this *Var1) MarshalJSON() ([]byte, error) { x := json.RawMessage(*this) return (&x).MarshalJSON() }
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/cmd/seqls/manager.go#L57-L99
func (w *workManager) Process(rootPaths []string) error { if w.hasRun { return errors.New("Process() cannot be called more than once") } w.hasRun = true // Start the workers to find sequences var wg sync.WaitGroup for i := 0; i < numWorkers; i++ { wg.Add(1) go func() { // Processes work from the input chans // and places them into the output chans w.processSources() wg.Done() }() } // Load the root paths into the source channel go func() { if Options.Recurse { // Perform a recursive walk on all paths w.loadRecursive(rootPaths) } else { // Load each root path directly into chan w.load(rootPaths) } w.closeInputs() }() // Will close the output channel when no more source // paths are being added go func() { wg.Wait() w.closeOutput() }() // Pull out all processed sequences and print w.processResults() return nil }
https://github.com/xwb1989/sqlparser/blob/120387863bf27d04bc07db8015110a6e96d0146c/ast.go#L63-L69
func ParseStrictDDL(sql string) (Statement, error) { tokenizer := NewStringTokenizer(sql) if yyParse(tokenizer) != 0 { return nil, tokenizer.LastError } return tokenizer.ParseTree, nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/grift/options.go#L25-L37
func (opts *Options) Validate() error { if len(opts.Args) == 0 { return errors.New("you need to provide a name for the grift task") } opts.Namespaced = strings.Contains(opts.Args[0], ":") for _, n := range strings.Split(opts.Args[0], ":") { opts.Parts = append(opts.Parts, name.New(n)) } opts.Name = opts.Parts[len(opts.Parts)-1] return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/localcache/cache.go#L30-L49
func (c *Cache) Put(key string, value io.Reader) (retErr error) { c.mu.Lock() defer c.mu.Unlock() f, err := os.Create(filepath.Join(c.root, key)) if err != nil { return err } defer func() { if err := f.Close(); err != nil && retErr == nil { retErr = err } }() buf := grpcutil.GetBuffer() defer grpcutil.PutBuffer(buf) if _, err := io.CopyBuffer(f, value, buf); err != nil { return err } c.keys[key] = true return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L136-L140
func (rn *RawNode) Campaign() error { return rn.raft.Step(pb.Message{ Type: pb.MsgHup, }) }
https://github.com/urandom/handler/blob/61508044a5569d1609521d81e81f6737567fd104/security/nonce.go#L180-L188
func StoreNonce(w http.ResponseWriter, r *http.Request) (err error) { if c := r.Context().Value(nonceSetterKey); c != nil { if setter, ok := c.(func(http.ResponseWriter, *http.Request) error); ok { err = setter(w, r) } } return err }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/debugger.go#L790-L792
func (p *SetBreakpointsActiveParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetBreakpointsActive, p, nil) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/container_lxc.go#L1925-L2485
func (c *containerLXC) startCommon() (string, error) { // Load the go-lxc struct err := c.initLXC(true) if err != nil { return "", errors.Wrap(err, "Load go-lxc struct") } // Check that we're not already running if c.IsRunning() { return "", fmt.Errorf("The container is already running") } // Sanity checks for devices for name, m := range c.expandedDevices { switch m["type"] { case "disk": // When we want to attach a storage volume created via // the storage api m["source"] only contains the name of // the storage volume, not the path where it is mounted. // So do only check for the existence of m["source"] // when m["pool"] is empty. if m["pool"] == "" && m["source"] != "" && !shared.IsTrue(m["optional"]) && !shared.PathExists(shared.HostPath(m["source"])) { return "", fmt.Errorf("Missing source '%s' for disk '%s'", m["source"], name) } case "nic": if m["parent"] != "" && !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", m["parent"])) { return "", fmt.Errorf("Missing parent '%s' for nic '%s'", m["parent"], name) } case "unix-char", "unix-block": srcPath, exist := m["source"] if !exist { srcPath = m["path"] } if srcPath != "" && m["required"] != "" && !shared.IsTrue(m["required"]) { err = deviceInotifyAddClosestLivingAncestor(c.state, filepath.Dir(srcPath)) if err != nil { logger.Errorf("Failed to add \"%s\" to inotify targets", srcPath) return "", fmt.Errorf("Failed to setup inotify watch for '%s': %v", srcPath, err) } } else if srcPath != "" && m["major"] == "" && m["minor"] == "" && !shared.PathExists(srcPath) { return "", fmt.Errorf("Missing source '%s' for device '%s'", srcPath, name) } } } // Load any required kernel modules kernelModules := c.expandedConfig["linux.kernel_modules"] if kernelModules != "" { for _, module := range strings.Split(kernelModules, ",") { module = strings.TrimPrefix(module, " ") err := util.LoadModule(module) if err != nil { return "", fmt.Errorf("Failed to load kernel module '%s': %s", module, err) } } } var ourStart bool newSize, ok := c.LocalConfig()["volatile.apply_quota"] if ok { err := c.initStorage() if err != nil { return "", errors.Wrap(err, "Initialize storage") } size, err := shared.ParseByteSizeString(newSize) if err != nil { return "", err } err = c.storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, c) if err != nil { return "", errors.Wrap(err, "Set storage quota") } // Remove the volatile key from the DB err = c.state.Cluster.ContainerConfigRemove(c.id, "volatile.apply_quota") if err != nil { return "", errors.Wrap(err, "Remove volatile.apply_quota config key") } // Remove the volatile key from the in-memory configs delete(c.localConfig, "volatile.apply_quota") delete(c.expandedConfig, "volatile.apply_quota") } /* Deal with idmap changes */ nextIdmap, err := c.NextIdmap() if err != nil { return "", errors.Wrap(err, "Set ID map") } diskIdmap, err := c.DiskIdmap() if err != nil { return "", errors.Wrap(err, "Set last ID map") } if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && c.state.OS.Shiftfs) { if shared.IsTrue(c.expandedConfig["security.protection.shift"]) { return "", fmt.Errorf("Container is protected against filesystem shifting") } logger.Debugf("Container idmap changed, remapping") c.updateProgress("Remapping container filesystem") ourStart, err = c.StorageStart() if err != nil { return "", errors.Wrap(err, "Storage start") } if diskIdmap != nil { if c.Storage().GetStorageType() == storageTypeZfs { err = diskIdmap.UnshiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper) } else { err = diskIdmap.UnshiftRootfs(c.RootfsPath(), nil) } if err != nil { if ourStart { c.StorageStop() } return "", err } } if nextIdmap != nil && !c.state.OS.Shiftfs { if c.Storage().GetStorageType() == storageTypeZfs { err = nextIdmap.ShiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper) } else { err = nextIdmap.ShiftRootfs(c.RootfsPath(), nil) } if err != nil { if ourStart { c.StorageStop() } return "", err } } jsonDiskIdmap := "[]" if nextIdmap != nil && !c.state.OS.Shiftfs { idmapBytes, err := json.Marshal(nextIdmap.Idmap) if err != nil { return "", err } jsonDiskIdmap = string(idmapBytes) } err = c.ConfigKeySet("volatile.last_state.idmap", jsonDiskIdmap) if err != nil { return "", errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", c.name, c.id) } c.updateProgress("") } var idmapBytes []byte if nextIdmap == nil { idmapBytes = []byte("[]") } else { idmapBytes, err = json.Marshal(nextIdmap.Idmap) if err != nil { return "", err } } if c.localConfig["volatile.idmap.current"] != string(idmapBytes) { err = c.ConfigKeySet("volatile.idmap.current", string(idmapBytes)) if err != nil { return "", errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", c.name, c.id) } } // Generate the Seccomp profile if err := SeccompCreateProfile(c); err != nil { return "", err } // Cleanup any existing leftover devices c.removeUnixDevices() c.removeDiskDevices() c.removeNetworkFilters() c.removeProxyDevices() var usbs []usbDevice var sriov []string diskDevices := map[string]types.Device{} // Create the devices for _, k := range c.expandedDevices.DeviceNames() { m := c.expandedDevices[k] if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { // Unix device paths, err := c.createUnixDevice(fmt.Sprintf("unix.%s", k), m, true) if err != nil { // Deal with device hotplug if m["required"] == "" || shared.IsTrue(m["required"]) { return "", err } srcPath := m["source"] if srcPath == "" { srcPath = m["path"] } srcPath = shared.HostPath(srcPath) err = deviceInotifyAddClosestLivingAncestor(c.state, srcPath) if err != nil { logger.Errorf("Failed to add \"%s\" to inotify targets", srcPath) return "", err } continue } devPath := paths[0] if c.isCurrentlyPrivileged() && !c.state.OS.RunningInUserNS && c.state.OS.CGroupDevicesController { // Add the new device cgroup rule dType, dMajor, dMinor, err := deviceGetAttributes(devPath) if err != nil { if m["required"] == "" || shared.IsTrue(m["required"]) { return "", err } } else { err = lxcSetConfigItem(c.c, "lxc.cgroup.devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) if err != nil { return "", fmt.Errorf("Failed to add cgroup rule for device") } } } } else if m["type"] == "usb" { if usbs == nil { usbs, err = deviceLoadUsb() if err != nil { return "", err } } for _, usb := range usbs { if (m["vendorid"] != "" && usb.vendor != m["vendorid"]) || (m["productid"] != "" && usb.product != m["productid"]) { continue } err := c.setupUnixDevice(fmt.Sprintf("unix.%s", k), m, usb.major, usb.minor, usb.path, shared.IsTrue(m["required"]), false) if err != nil { return "", err } } } else if m["type"] == "gpu" { allGpus := deviceWantsAllGPUs(m) gpus, nvidiaDevices, err := deviceLoadGpu(allGpus) if err != nil { return "", err } sawNvidia := false found := false for _, gpu := range gpus { if (m["vendorid"] != "" && gpu.vendorID != m["vendorid"]) || (m["pci"] != "" && gpu.pci != m["pci"]) || (m["productid"] != "" && gpu.productID != m["productid"]) || (m["id"] != "" && gpu.id != m["id"]) { continue } found = true err := c.setupUnixDevice(fmt.Sprintf("unix.%s", k), m, gpu.major, gpu.minor, gpu.path, true, false) if err != nil { return "", err } if !gpu.isNvidia { continue } if gpu.nvidia.path != "" { err = c.setupUnixDevice(fmt.Sprintf("unix.%s", k), m, gpu.nvidia.major, gpu.nvidia.minor, gpu.nvidia.path, true, false) if err != nil { return "", err } } else if !allGpus { errMsg := fmt.Errorf("Failed to detect correct \"/dev/nvidia\" path") logger.Errorf("%s", errMsg) return "", errMsg } sawNvidia = true } if sawNvidia { for _, gpu := range nvidiaDevices { if shared.IsTrue(c.expandedConfig["nvidia.runtime"]) { if !gpu.isCard { continue } } err := c.setupUnixDevice(fmt.Sprintf("unix.%s", k), m, gpu.major, gpu.minor, gpu.path, true, false) if err != nil { return "", err } } } if !found { msg := "Failed to detect requested GPU device" logger.Error(msg) return "", fmt.Errorf(msg) } } else if m["type"] == "disk" { if m["path"] != "/" { diskDevices[k] = m } } else if m["type"] == "nic" || m["type"] == "infiniband" { var err error var infiniband map[string]IBF if m["type"] == "infiniband" { infiniband, err = deviceLoadInfiniband() if err != nil { return "", err } } networkKeyPrefix := "lxc.net" if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) { networkKeyPrefix = "lxc.network" } m, err = c.fillNetworkDevice(k, m) if err != nil { return "", err } networkidx := -1 reserved := []string{} // Record nictype == physical devices since those won't // be available for nictype == sriov. for _, dName := range c.expandedDevices.DeviceNames() { m := c.expandedDevices[dName] if m["type"] != "nic" && m["type"] != "infiniband" { continue } if m["nictype"] != "physical" { continue } reserved = append(reserved, m["parent"]) } for _, dName := range c.expandedDevices.DeviceNames() { m := c.expandedDevices[dName] if m["type"] != "nic" && m["type"] != "infiniband" { continue } networkidx++ if shared.StringInSlice(dName, sriov) { continue } else { sriov = append(sriov, dName) } if m["nictype"] != "sriov" { continue } m, err = c.fillSriovNetworkDevice(dName, m, reserved) if err != nil { return "", err } // Make sure that no one called dibs. reserved = append(reserved, m["host_name"]) val := c.c.ConfigItem(fmt.Sprintf("%s.%d.type", networkKeyPrefix, networkidx)) if len(val) == 0 || val[0] != "phys" { return "", fmt.Errorf("Network index corresponds to false network") } // Fill in correct name right now err = lxcSetConfigItem(c.c, fmt.Sprintf("%s.%d.link", networkKeyPrefix, networkidx), m["host_name"]) if err != nil { return "", err } if m["type"] == "infiniband" { key := m["host_name"] ifDev, ok := infiniband[key] if !ok { return "", fmt.Errorf("Specified infiniband device \"%s\" not found", key) } err := c.addInfinibandDevices(dName, &ifDev, false) if err != nil { return "", err } } } if m["type"] == "infiniband" && m["nictype"] == "physical" { key := m["parent"] ifDev, ok := infiniband[key] if !ok { return "", fmt.Errorf("Specified infiniband device \"%s\" not found", key) } err := c.addInfinibandDevices(k, &ifDev, false) if err != nil { return "", err } } if m["nictype"] == "bridged" && shared.IsTrue(m["security.mac_filtering"]) { // Read device name from config vethName := "" for i := 0; i < len(c.c.ConfigItem(networkKeyPrefix)); i++ { val := c.c.ConfigItem(fmt.Sprintf("%s.%d.hwaddr", networkKeyPrefix, i)) if len(val) == 0 || val[0] != m["hwaddr"] { continue } val = c.c.ConfigItem(fmt.Sprintf("%s.%d.link", networkKeyPrefix, i)) if len(val) == 0 || val[0] != m["parent"] { continue } val = c.c.ConfigItem(fmt.Sprintf("%s.%d.veth.pair", networkKeyPrefix, i)) if len(val) == 0 { continue } vethName = val[0] break } if vethName == "" { return "", fmt.Errorf("Failed to find device name for mac_filtering") } err = c.createNetworkFilter(vethName, m["parent"], m["hwaddr"]) if err != nil { return "", err } } // Create VLAN devices if shared.StringInSlice(m["nictype"], []string{"macvlan", "physical"}) && m["vlan"] != "" { device := networkGetHostDevice(m["parent"], m["vlan"]) if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", device)) { _, err := shared.RunCommand("ip", "link", "add", "link", m["parent"], "name", device, "up", "type", "vlan", "id", m["vlan"]) if err != nil { return "", err } // Attempt to disable IPv6 router advertisement acceptance networkSysctlSet(fmt.Sprintf("ipv6/conf/%s/accept_ra", device), "0") } } } } err = c.addDiskDevices(diskDevices, func(name string, d types.Device) error { _, err := c.createDiskDevice(name, d) return err }) if err != nil { return "", err } // Create any missing directory err = os.MkdirAll(c.LogPath(), 0700) if err != nil { return "", err } err = os.MkdirAll(c.DevicesPath(), 0711) if err != nil { return "", err } err = os.MkdirAll(c.ShmountsPath(), 0711) if err != nil { return "", err } // Rotate the log file logfile := c.LogFilePath() if shared.PathExists(logfile) { os.Remove(logfile + ".old") err := os.Rename(logfile, logfile+".old") if err != nil { return "", err } } // Storage is guaranteed to be mountable now. ourStart, err = c.StorageStart() if err != nil { return "", err } // Generate the LXC config configPath := filepath.Join(c.LogPath(), "lxc.conf") err = c.c.SaveConfigFile(configPath) if err != nil { os.Remove(configPath) return "", err } // Undo liblxc modifying container directory ownership err = os.Chown(c.Path(), 0, 0) if err != nil { if ourStart { c.StorageStop() } return "", err } // Set right permission to allow traversal var mode os.FileMode if c.isCurrentlyPrivileged() { mode = 0700 } else { mode = 0711 } err = os.Chmod(c.Path(), mode) if err != nil { if ourStart { c.StorageStop() } return "", err } // Update the backup.yaml file err = writeBackupFile(c) if err != nil { if ourStart { c.StorageStop() } return "", err } if !c.IsStateful() && shared.PathExists(c.StatePath()) { os.RemoveAll(c.StatePath()) } _, err = c.StorageStop() if err != nil { return "", err } // Update time container was last started err = c.state.Cluster.ContainerLastUsedUpdate(c.id, time.Now().UTC()) if err != nil { return "", fmt.Errorf("Error updating last used: %v", err) } // Unmount any previously mounted shiftfs syscall.Unmount(c.RootfsPath(), syscall.MNT_DETACH) return configPath, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/util.go#L56-L68
func generateUUID() string { buf := make([]byte, 16) if _, err := crand.Read(buf); err != nil { panic(fmt.Errorf("failed to read random bytes: %v", err)) } return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", buf[0:4], buf[4:6], buf[6:8], buf[8:10], buf[10:16]) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/client/lxd_containers.go#L1784-L1797
func (r *ProtocolLXD) GetContainerBackup(containerName string, name string) (*api.ContainerBackup, string, error) { if !r.HasExtension("container_backup") { return nil, "", fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Fetch the raw value backup := api.ContainerBackup{} etag, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/backups/%s", url.QueryEscape(containerName), url.QueryEscape(name)), nil, "", &backup) if err != nil { return nil, "", err } return &backup, etag, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domdebugger/easyjson.go#L236-L240
func (v *SetEventListenerBreakpointParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomdebugger2(&r, v) return r.Error() }
https://github.com/kubicorn/kubicorn/blob/c4a4b80994b4333709c0f8164faabd801866b986/pkg/agent/agent.go#L122-L134
func (k *Keyring) RemoveKeyUsingFile(pubkey string) error { p, err := ioutil.ReadFile(pubkey) if err != nil { return err } key, _, _, _, _ := ssh.ParseAuthorizedKey(p) if err != nil { return err } return k.RemoveKey(key) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pkg/shard/shard.go#L20-L22
func NewSharder(discoveryClient discovery.Client, numShards uint64, namespace string) Sharder { return newSharder(discoveryClient, numShards, namespace) }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/docker_client.go#L449-L454
func (c *dockerClient) doHTTP(req *http.Request) (*http.Response, error) { tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig httpClient := &http.Client{Transport: tr} return httpClient.Do(req) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/webaudio/easyjson.go#L89-L93
func (v *GetRealtimeDataReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoWebaudio(&r, v) return r.Error() }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/containers.go#L1092-L1104
func (c *Cluster) ContainerBackupRemove(name string) error { id, err := c.ContainerBackupID(name) if err != nil { return err } err = exec(c.db, "DELETE FROM containers_backups WHERE id=?", id) if err != nil { return err } return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/worker/api_server.go#L1680-L1829
func (a *APIServer) worker() { logger := a.getWorkerLogger() // this worker's formatting logger // claim a shard if one is available or becomes available go a.claimShard(a.pachClient.Ctx()) // Process incoming jobs backoff.RetryNotify(func() (retErr error) { retryCtx, retryCancel := context.WithCancel(a.pachClient.Ctx()) defer retryCancel() watcher, err := a.jobs.ReadOnly(retryCtx).WatchByIndex(ppsdb.JobsPipelineIndex, a.pipelineInfo.Pipeline) if err != nil { return fmt.Errorf("error creating watch: %v", err) } defer watcher.Close() NextJob: for e := range watcher.Watch() { // Clear chunk caches from previous job if err := a.chunkCache.Clear(); err != nil { logger.Logf("error clearing chunk cache: %v", err) } if err := a.chunkStatsCache.Clear(); err != nil { logger.Logf("error clearing chunk stats cache: %v", err) } if e.Type == watch.EventError { return fmt.Errorf("worker watch error: %v", e.Err) } else if e.Type == watch.EventDelete { // Job was deleted, e.g. because input commit was deleted. This is // handled by cancelCtxIfJobFails goro, which was spawned when job was // created. Nothing to do here continue } // 'e' is a Put event -- new job var jobID string jobPtr := &pps.EtcdJobInfo{} if err := e.Unmarshal(&jobID, jobPtr); err != nil { return fmt.Errorf("error unmarshalling: %v", err) } if ppsutil.IsTerminal(jobPtr.State) { // previously-created job has finished, or job was finished during backoff // or in the 'watcher' queue logger.Logf("skipping job %v as it is already in state %v", jobID, jobPtr.State) continue NextJob } // create new ctx for this job, and don't use retryCtx as the // parent. Just because another job's etcd write failed doesn't // mean this job shouldn't run jobCtx, jobCancel := context.WithCancel(a.pachClient.Ctx()) defer jobCancel() // cancel the job ctx pachClient := a.pachClient.WithCtx(jobCtx) // Watch for any changes to EtcdJobInfo corresponding to jobID; if // the EtcdJobInfo is marked 'FAILED', call jobCancel(). // ('watcher' above can't detect job state changes--it's watching // an index and so only emits when jobs are created or deleted). go a.cancelCtxIfJobFails(jobCtx, jobCancel, jobID) // Inspect the job and make sure it's relevant, as this worker may be old jobInfo, err := pachClient.InspectJob(jobID, false) if err != nil { if col.IsErrNotFound(err) { continue NextJob // job was deleted--no sense retrying } return fmt.Errorf("error from InspectJob(%v): %+v", jobID, err) } if jobInfo.PipelineVersion < a.pipelineInfo.Version { continue } if jobInfo.PipelineVersion > a.pipelineInfo.Version { return fmt.Errorf("job %s's version (%d) greater than pipeline's "+ "version (%d), this should automatically resolve when the worker "+ "is updated", jobID, jobInfo.PipelineVersion, a.pipelineInfo.Version) } // Read the chunks laid out by the master and create the datum factory plan := &Plan{} if err := a.plans.ReadOnly(jobCtx).GetBlock(jobInfo.Job.ID, plan); err != nil { return err } df, err := NewDatumFactory(pachClient, jobInfo.Input) if err != nil { return fmt.Errorf("error from NewDatumFactory: %v", err) } // Compute the datums to skip skip := make(map[string]struct{}) var useParentHashTree bool parentCommitInfo, err := a.getParentCommitInfo(jobCtx, pachClient, jobInfo.OutputCommit) if err != nil { return err } if parentCommitInfo != nil { var err error skip, err = a.getCommitDatums(jobCtx, pachClient, parentCommitInfo) if err != nil { return err } var count int for i := 0; i < df.Len(); i++ { files := df.Datum(i) datumHash := HashDatum(a.pipelineInfo.Pipeline.Name, a.pipelineInfo.Salt, files) if _, ok := skip[datumHash]; ok { count++ } } if len(skip) == count { useParentHashTree = true } } // Get updated job info from master jobInfo, err = pachClient.InspectJob(jobID, false) if err != nil { return err } eg, ctx := errgroup.WithContext(jobCtx) // If a datum fails, acquireDatums updates the relevant lock in // etcd, which causes the master to fail the job (which is // handled above in the JOB_FAILURE case). There's no need to // handle failed datums here, just failed etcd writes. eg.Go(func() error { return a.acquireDatums( ctx, jobID, plan, logger, func(low, high int64) (*processResult, error) { processResult, err := a.processDatums(pachClient, logger, jobInfo, df, low, high, skip, useParentHashTree) if err != nil { return nil, err } return processResult, nil }, ) }) eg.Go(func() error { return a.mergeDatums(ctx, pachClient, jobInfo, jobID, plan, logger, df, skip, useParentHashTree) }) if err := eg.Wait(); err != nil { if jobCtx.Err() == context.Canceled { continue NextJob // job cancelled--don't restart, just wait for next job } return fmt.Errorf("acquire/process/merge datums for job %s exited with err: %v", jobID, err) } } return fmt.Errorf("worker: jobs.WatchByIndex(pipeline = %s) closed unexpectedly", a.pipelineInfo.Pipeline.Name) }, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error { logger.Logf("worker: watch closed or error running the worker process: %v; retrying in %v", err, d) return nil }) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/states.go#L49-L51
func (a *ActiveState) Age(t time.Time) time.Duration { return t.Sub(a.startTime) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/type_filter_wrapper.go#L49-L52
func (t *TypeFilterWrapperPlugin) AddFlags(cmd *cobra.Command) { cmd.Flags().BoolVar(&t.pullRequests, "no-pull-requests", false, "Ignore pull-requests") cmd.Flags().BoolVar(&t.issues, "no-issues", false, "Ignore issues") }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/spyglass/podlogartifact.go#L122-L142
func (a *PodLogArtifact) ReadAtMost(n int64) ([]byte, error) { logs, err := a.jobAgent.GetJobLog(a.name, a.buildID) if err != nil { return nil, fmt.Errorf("error getting pod log: %v", err) } reader := bytes.NewReader(logs) var byteCount int64 var p []byte for byteCount < n { b, err := reader.ReadByte() if err == io.EOF { return p, io.EOF } if err != nil { return nil, fmt.Errorf("error reading pod log: %v", err) } p = append(p, b) byteCount++ } return p, nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pfs.go#L166-L179
func (c APIClient) BuildCommit(repoName string, branch string, parent string, treeObject string) (*pfs.Commit, error) { commit, err := c.PfsAPIClient.BuildCommit( c.Ctx(), &pfs.BuildCommitRequest{ Parent: NewCommit(repoName, parent), Branch: branch, Tree: &pfs.Object{Hash: treeObject}, }, ) if err != nil { return nil, grpcutil.ScrubGRPC(err) } return commit, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L6693-L6697
func (v *CreateIsolatedWorldReturns) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage72(&r, v) return r.Error() }
https://github.com/iron-io/functions_go/blob/91b84f5bbb17095bf1c7028ec6e70a3dc06a5893/client/apps/delete_apps_app_parameters.go#L88-L91
func (o *DeleteAppsAppParams) WithContext(ctx context.Context) *DeleteAppsAppParams { o.SetContext(ctx) return o }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/client.go#L578-L582
func (c *APIClient) WithCtx(ctx context.Context) *APIClient { result := *c // copy c result.ctx = ctx return &result }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/secret/secret.go#L40-L46
func LoadSingleSecret(path string) ([]byte, error) { b, err := ioutil.ReadFile(path) if err != nil { return nil, fmt.Errorf("error reading %s: %v", path, err) } return bytes.TrimSpace(b), nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/gopherage/pkg/cov/junit/calculation/coverage.go#L27-L32
func (c *Coverage) Ratio() float32 { if c.NumAllStmts == 0 { return 1 } return float32(c.NumCoveredStmts) / float32(c.NumAllStmts) }
https://github.com/jinzhu/now/blob/8ec929ed50c3ac25ce77ba4486e1f277c552c591/main.go#L131-L133
func MustParseInLocation(loc *time.Location, strs ...string) time.Time { return New(time.Now().In(loc)).MustParse(strs...) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/report/report.go#L85-L106
func reportStatus(ghc GitHubClient, pj prowapi.ProwJob) error { refs := pj.Spec.Refs if pj.Spec.Report { contextState, err := prowjobStateToGitHubStatus(pj.Status.State) if err != nil { return err } sha := refs.BaseSHA if len(refs.Pulls) > 0 { sha = refs.Pulls[0].SHA } if err := ghc.CreateStatus(refs.Org, refs.Repo, sha, github.Status{ State: contextState, Description: truncate(pj.Status.Description), Context: pj.Spec.Context, // consider truncating this too TargetURL: pj.Status.URL, }); err != nil { return err } } return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pkg/require/require.go#L235-L256
func oneOfEquals(sliceName string, slice interface{}, elem interface{}) (bool, error) { e := reflect.ValueOf(elem) sl := reflect.ValueOf(slice) if slice == nil || sl.IsNil() { sl = reflect.MakeSlice(reflect.SliceOf(e.Type()), 0, 0) } if sl.Kind() != reflect.Slice { return false, fmt.Errorf("\"%s\" must a be a slice, but instead was %s", sliceName, sl.Type().String()) } if e.Type() != sl.Type().Elem() { return false, nil } arePtrs := e.Kind() == reflect.Ptr for i := 0; i < sl.Len(); i++ { if !arePtrs && reflect.DeepEqual(e.Interface(), sl.Index(i).Interface()) { return true, nil } else if arePtrs && reflect.DeepEqual(e.Elem().Interface(), sl.Index(i).Elem().Interface()) { return true, nil } } return false, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/schedule/schedule.go#L75-L90
func (f *fifo) Schedule(j Job) { f.mu.Lock() defer f.mu.Unlock() if f.cancel == nil { panic("schedule: schedule to stopped scheduler") } if len(f.pendings) == 0 { select { case f.resume <- struct{}{}: default: } } f.pendings = append(f.pendings, j) }
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/json-decode.go#L168-L227
func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) { if !rv.CanAddr() { panic("rv not addressable") } if printLog { fmt.Println("(d) decodeReflectJSONInterface") defer func() { fmt.Printf("(d) -> err: %v\n", err) }() } /* We don't make use of user-provided interface values because there are a lot of edge cases. * What if the type is mismatched? * What if the JSON field entry is missing? * Circular references? */ if !rv.IsNil() { // We don't strictly need to set it nil, but lets keep it here for a // while in case we forget, for defensive purposes. rv.Set(iinfo.ZeroValue) } // Consume type wrapper info. name, bz, err := decodeInterfaceJSON(bz) if err != nil { return } // XXX: Check name against interface to make sure that it actually // matches, and return an error if it doesn't. // NOTE: Unlike decodeReflectBinaryInterface, we already dealt with nil in decodeReflectJSON. // NOTE: We also "consumed" the interface wrapper by replacing `bz` above. // Get concrete type info. // NOTE: Unlike decodeReflectBinaryInterface, uses the full name string. var cinfo *TypeInfo cinfo, err = cdc.getTypeInfoFromName_rlock(name) if err != nil { return } // Construct the concrete type. var crv, irvSet = constructConcreteType(cinfo) // Decode into the concrete type. err = cdc.decodeReflectJSON(bz, cinfo, crv, fopts) if err != nil { rv.Set(irvSet) // Helps with debugging return } // We need to set here, for when !PointerPreferred and the type // is say, an array of bytes (e.g. [32]byte), then we must call // rv.Set() *after* the value was acquired. rv.Set(irvSet) return }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L3918-L3922
func (v EventScriptFailedToParse) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoDebugger38(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/t3rm1n4l/nitro/blob/937fe99f63a01a8bea7661c49e2f3f8af6541d7c/nodetable/table.go#L105-L107
func (nt *NodeTable) MemoryInUse() int64 { return int64(approxItemSize * (nt.fastHTCount + nt.slowHTCount)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L731-L738
func (s *store) Save() ([]byte, error) { b, err := json.Marshal(s.Clone()) if err != nil { return nil, err } return b, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/log/types.go#L112-L114
func (t *Source) UnmarshalJSON(buf []byte) error { return easyjson.Unmarshal(buf, t) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/storage/easyjson.go#L294-L298
func (v TrackIndexedDBForOriginParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoStorage3(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/hashtree/db.go#L710-L712
func (h *dbHashTree) PutFileOverwrite(path string, objects []*pfs.Object, overwriteIndex *pfs.OverwriteIndex, sizeDelta int64) error { return h.putFile(path, objects, overwriteIndex, sizeDelta, false) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/types.go#L366-L368
func (t ResourcePriority) MarshalEasyJSON(out *jwriter.Writer) { out.String(string(t)) }
https://github.com/bsm/sarama-cluster/blob/d5779253526cc8a3129a0e5d7cc429f4b4473ab4/consumer.go#L812-L842
func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error { memberID, _ := c.membership() sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial)) // Create partitionConsumer pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial) if err != nil { return err } // Store in subscriptions c.subs.Store(topic, partition, pc) // Start partition consumer goroutine tomb.Go(func(stopper <-chan none) { if c.client.config.Group.Mode == ConsumerModePartitions { pc.waitFor(stopper) } else { pc.multiplex(stopper, c.messages, c.errors) } }) if c.client.config.Group.Mode == ConsumerModePartitions { select { case c.partitions <- pc: case <-c.dying: pc.Close() } } return nil }
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/callbacks.go#L133-L149
func ModelValidator() *Callback { return C("fire/ModelValidator", Only(Create, Update), func(ctx *Context) error { // check model m, ok := ctx.Model.(ValidatableModel) if !ok { return fmt.Errorf("model is not validatable") } // validate model err := m.Validate() if err != nil { return err } return nil }) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/container_lxc.go#L1843-L1856
func (c *containerLXC) initStorage() error { if c.storage != nil { return nil } s, err := storagePoolVolumeContainerLoadInit(c.state, c.Project(), c.Name()) if err != nil { return err } c.storage = s return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L90-L92
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/images.go#L23-L62
func (c *Cluster) ImagesGet(project string, public bool) ([]string, error) { err := c.Transaction(func(tx *ClusterTx) error { enabled, err := tx.ProjectHasImages(project) if err != nil { return errors.Wrap(err, "Check if project has images") } if !enabled { project = "default" } return nil }) if err != nil { return nil, err } q := ` SELECT fingerprint FROM images JOIN projects ON projects.id = images.project_id WHERE projects.name = ? ` if public == true { q += " AND public=1" } var fp string inargs := []interface{}{project} outfmt := []interface{}{fp} dbResults, err := queryScan(c.db, q, inargs, outfmt) if err != nil { return []string{}, err } results := []string{} for _, r := range dbResults { results = append(results, r[0].(string)) } return results, nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/string.go#L35-L38
func String(s string, args ...interface{}) Renderer { e := New(Options{}) return e.String(s, args...) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/entrypoint/options.go#L68-L74
func (o *Options) Validate() error { if len(o.Args) == 0 { return errors.New("no process to wrap specified") } return o.Options.Validate() }
https://github.com/segmentio/objconv/blob/7a1d7b8e6f3551b30751e6b2ea6bae500883870e/objutil/duration.go#L90-L108
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { // Omit trailing zeros up to and including decimal point. w := len(buf) print := false for i := 0; i < prec; i++ { digit := v % 10 print = print || digit != 0 if print { w-- buf[w] = byte(digit) + '0' } v /= 10 } if print { w-- buf[w] = '.' } return w, v }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/status.go#L409-L426
func (sc *statusController) waitSync() { // wait for the min sync period time to elapse if needed. wait := time.After(time.Until(sc.lastSyncStart.Add(sc.config().Tide.StatusUpdatePeriod))) for { select { case <-wait: sc.Lock() pool := sc.poolPRs sc.Unlock() sc.sync(pool) return case more := <-sc.newPoolPending: if !more { return } } } }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/pkg/blobinfocache/memory/memory.go#L38-L44
func New() types.BlobInfoCache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/easyjson.go#L3914-L3918
func (v *ReplayXHRParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoNetwork26(&r, v) return r.Error() }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pkg/grpcutil/stream.go#L18-L28
func Chunk(data []byte, chunkSize int) [][]byte { var result [][]byte for i := 0; i < len(data); i += chunkSize { end := i + chunkSize if end > len(data) { end = len(data) } result = append(result, data[i:end]) } return result }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/performance/types.go#L48-L58
func (t *SetTimeDomainTimeDomain) UnmarshalEasyJSON(in *jlexer.Lexer) { switch SetTimeDomainTimeDomain(in.String()) { case SetTimeDomainTimeDomainTimeTicks: *t = SetTimeDomainTimeDomainTimeTicks case SetTimeDomainTimeDomainThreadTicks: *t = SetTimeDomainTimeDomainThreadTicks default: in.AddError(errors.New("unknown SetTimeDomainTimeDomain value")) } }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pfs/server/server.go#L35-L79
func NewBlockAPIServer(dir string, cacheBytes int64, backend string, etcdAddress string) (BlockAPIServer, error) { switch backend { case MinioBackendEnvVar: // S3 compatible doesn't like leading slashes if len(dir) > 0 && dir[0] == '/' { dir = dir[1:] } blockAPIServer, err := newMinioBlockAPIServer(dir, cacheBytes, etcdAddress) if err != nil { return nil, err } return blockAPIServer, nil case AmazonBackendEnvVar: // amazon doesn't like leading slashes if len(dir) > 0 && dir[0] == '/' { dir = dir[1:] } blockAPIServer, err := newAmazonBlockAPIServer(dir, cacheBytes, etcdAddress) if err != nil { return nil, err } return blockAPIServer, nil case GoogleBackendEnvVar: // TODO figure out if google likes leading slashses blockAPIServer, err := newGoogleBlockAPIServer(dir, cacheBytes, etcdAddress) if err != nil { return nil, err } return blockAPIServer, nil case MicrosoftBackendEnvVar: blockAPIServer, err := newMicrosoftBlockAPIServer(dir, cacheBytes, etcdAddress) if err != nil { return nil, err } return blockAPIServer, nil case LocalBackendEnvVar: fallthrough default: blockAPIServer, err := newLocalBlockAPIServer(dir, cacheBytes, etcdAddress) if err != nil { return nil, err } return blockAPIServer, nil } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L88-L104
func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { if l.matchTerm(index, logTerm) { lastnewi = index + uint64(len(ents)) ci := l.findConflict(ents) switch { case ci == 0: case ci <= l.committed: l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) default: offset := index + 1 l.append(ents[ci-offset:]...) } l.commitTo(min(committed, lastnewi)) return lastnewi, true } return 0, false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L724-L788
func (s *EtcdServer) start() { lg := s.getLogger() if s.Cfg.SnapshotCount == 0 { if lg != nil { lg.Info( "updating snapshot-count to default", zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), zap.Uint64("updated-snapshot-count", DefaultSnapshotCount), ) } else { plog.Infof("set snapshot count to default %d", DefaultSnapshotCount) } s.Cfg.SnapshotCount = DefaultSnapshotCount } if s.Cfg.SnapshotCatchUpEntries == 0 { if lg != nil { lg.Info( "updating snapshot catch-up entries to default", zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries), zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries), ) } s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries } s.w = wait.New() s.applyWait = wait.NewTimeList() s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() s.leaderChanged = make(chan struct{}) if s.ClusterVersion() != nil { if lg != nil { lg.Info( "starting etcd server", zap.String("local-member-id", s.ID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-id", s.Cluster().ID().String()), zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), ) } else { plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) } membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1) } else { if lg != nil { lg.Info( "starting etcd server", zap.String("local-member-id", s.ID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-version", "to_be_decided"), ) } else { plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) } } // TODO: if this is an empty log, writes all peer infos // into the first entry go s.run() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L3238-L3242
func (v EventInspectRequested) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime29(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/cluster/notify.go#L33-L112
func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolicy) (Notifier, error) { address, err := node.ClusterAddress(state.Node) if err != nil { return nil, errors.Wrap(err, "failed to fetch node address") } // Fast-track the case where we're not clustered at all. if address == "" { nullNotifier := func(func(lxd.ContainerServer) error) error { return nil } return nullNotifier, nil } peers := []string{} err = state.Cluster.Transaction(func(tx *db.ClusterTx) error { offlineThreshold, err := tx.NodeOfflineThreshold() if err != nil { return err } nodes, err := tx.Nodes() if err != nil { return err } for _, node := range nodes { if node.Address == address || node.Address == "0.0.0.0" { continue // Exclude ourselves } if node.IsOffline(offlineThreshold) { switch policy { case NotifyAll: return fmt.Errorf("peer node %s is down", node.Address) case NotifyAlive: continue // Just skip this node } } peers = append(peers, node.Address) } return nil }) if err != nil { return nil, err } notifier := func(hook func(lxd.ContainerServer) error) error { errs := make([]error, len(peers)) wg := sync.WaitGroup{} wg.Add(len(peers)) for i, address := range peers { logger.Debugf("Notify node %s of state changes", address) go func(i int, address string) { defer wg.Done() client, err := Connect(address, cert, true) if err != nil { errs[i] = errors.Wrapf(err, "failed to connect to peer %s", address) return } err = hook(client) if err != nil { errs[i] = errors.Wrapf(err, "failed to notify peer %s", address) } }(i, address) } wg.Wait() // TODO: aggregate all errors? for i, err := range errs { if err != nil { // FIXME: unfortunately the LXD client currently does not // provide a way to differentiate between errors if isClientConnectionError(err) && policy == NotifyAlive { logger.Warnf("Could not notify node %s", peers[i]) continue } return err } } return nil } return notifier, nil }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/peer.go#L124-L141
func (l *PeerList) GetNew(prevSelected map[string]struct{}) (*Peer, error) { l.Lock() defer l.Unlock() if l.peerHeap.Len() == 0 { return nil, ErrNoPeers } // Select a peer, avoiding previously selected peers. If all peers have been previously // selected, then it's OK to repick them. peer := l.choosePeer(prevSelected, true /* avoidHost */) if peer == nil { peer = l.choosePeer(prevSelected, false /* avoidHost */) } if peer == nil { return nil, ErrNoNewPeers } return peer, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/page.go#L332-L341
func (p *GetInstallabilityErrorsParams) Do(ctx context.Context) (errors []string, err error) { // execute var res GetInstallabilityErrorsReturns err = cdp.Execute(ctx, CommandGetInstallabilityErrors, nil, &res) if err != nil { return nil, err } return res.Errors, nil }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_zfs_utils.go#L30-L39
func zfsToolVersionGet() (string, error) { // This function is only really ever relevant on Ubuntu as the only // distro that ships out of sync tools and kernel modules out, err := shared.RunCommand("dpkg-query", "--showformat=${Version}", "--show", "zfsutils-linux") if err != nil { return "", err } return strings.TrimSpace(string(out)), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/repair.go#L134-L141
func openLast(lg *zap.Logger, dirpath string) (*fileutil.LockedFile, error) { names, err := readWALNames(lg, dirpath) if err != nil { return nil, err } last := filepath.Join(dirpath, names[len(names)-1]) return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode) }
https://github.com/go-bongo/bongo/blob/761759e31d8fed917377aa7085db01d218ce50d8/collection.go#L93-L95
func (c *Collection) collectionOnSession(sess *mgo.Session) *mgo.Collection { return sess.DB(c.Database).C(c.Name) }
https://github.com/enaml-ops/enaml/blob/4f847ee10b41afca41fe09fa839cb2f6ade06fb5/cloudconfig_manifest_primatives.go#L17-L21
func NewCloudConfigManifest(b []byte) *CloudConfigManifest { cm := new(CloudConfigManifest) yaml.Unmarshal(b, cm) return cm }
https://github.com/op/go-logging/blob/970db520ece77730c7e4724c61121037378659d9/log_nix.go#L80-L91
func ConvertColors(colors []int, bold bool) []string { converted := []string{} for _, i := range colors { if bold { converted = append(converted, ColorSeqBold(color(i))) } else { converted = append(converted, ColorSeq(color(i))) } } return converted }
https://github.com/TheThingsNetwork/go-utils/blob/aa2a11bd59104d2a8609328c2b2b55da61826470/influx/writer.go#L73-L76
func (p *batchPoint) pushError(err error) { p.errch <- err close(p.errch) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/dom.go#L1006-L1008
func (p *RedoParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandRedo, nil, nil) }
https://github.com/sclevine/agouti/blob/96599c91888f1b1cf2dccc7f1776ba7f511909e5/agouti.go#L101-L106
func SauceLabs(name, platform, browser, version, username, accessKey string, options ...Option) (*Page, error) { url := fmt.Sprintf("http://%s:%s@ondemand.saucelabs.com/wd/hub", username, accessKey) capabilities := NewCapabilities().Browser(browser).Platform(platform).Version(version) capabilities["name"] = name return NewPage(url, append([]Option{Desired(capabilities)}, options...)...) }
https://github.com/go-bongo/bongo/blob/761759e31d8fed917377aa7085db01d218ce50d8/utils.go#L10-L15
func lowerInitial(str string) string { for i, v := range str { return string(unicode.ToLower(v)) + str[i+1:] } return "" }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L1447-L1451
func (v PropertyPreview) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime14(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/nicholasjackson/bench/blob/2df9635f0ad020b2e82616b0fd87130aaa1ee12e/output/tablewriter.go#L11-L21
func WriteTabularData(interval time.Duration, r results.ResultSet, w io.Writer) { set := r.Reduce(interval) t := results.TabularResults{} rows := t.Tabulate(set) for _, row := range rows { w.Write([]byte(row.String())) w.Write([]byte("\n")) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1006-L1027
func (m *member) Close() { if m.grpcBridge != nil { m.grpcBridge.Close() m.grpcBridge = nil } if m.serverClient != nil { m.serverClient.Close() m.serverClient = nil } if m.grpcServer != nil { m.grpcServer.Stop() m.grpcServer.GracefulStop() m.grpcServer = nil m.grpcServerPeer.Stop() m.grpcServerPeer.GracefulStop() m.grpcServerPeer = nil } m.s.HardStop() for _, f := range m.serverClosers { f() } }
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/json-encode.go#L22-L128
func (cdc *Codec) encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) { if !rv.IsValid() { panic("should not happen") } if printLog { spew.Printf("(E) encodeReflectJSON(info: %v, rv: %#v (%v), fopts: %v)\n", info, rv.Interface(), rv.Type(), fopts) defer func() { fmt.Printf("(E) -> err: %v\n", err) }() } // Dereference value if pointer. var isNilPtr bool rv, _, isNilPtr = derefPointers(rv) // Write null if necessary. if isNilPtr { err = writeStr(w, `null`) return } // Special case: if rv.Type() == timeType { // Amino time strips the timezone. // NOTE: This must be done before json.Marshaler override below. ct := rv.Interface().(time.Time).Round(0).UTC() rv = reflect.ValueOf(ct) } // Handle override if rv implements json.Marshaler. if rv.CanAddr() { // Try pointer first. if rv.Addr().Type().Implements(jsonMarshalerType) { err = invokeMarshalJSON(w, rv.Addr()) return } } else if rv.Type().Implements(jsonMarshalerType) { err = invokeMarshalJSON(w, rv) return } // Handle override if rv implements json.Marshaler. if info.IsAminoMarshaler { // First, encode rv into repr instance. var rrv, rinfo = reflect.Value{}, (*TypeInfo)(nil) rrv, err = toReprObject(rv) if err != nil { return } rinfo, err = cdc.getTypeInfo_wlock(info.AminoMarshalReprType) if err != nil { return } // Then, encode the repr instance. err = cdc.encodeReflectJSON(w, rinfo, rrv, fopts) return } switch info.Type.Kind() { //---------------------------------------- // Complex case reflect.Interface: return cdc.encodeReflectJSONInterface(w, info, rv, fopts) case reflect.Array, reflect.Slice: return cdc.encodeReflectJSONList(w, info, rv, fopts) case reflect.Struct: return cdc.encodeReflectJSONStruct(w, info, rv, fopts) case reflect.Map: return cdc.encodeReflectJSONMap(w, info, rv, fopts) //---------------------------------------- // Signed, Unsigned case reflect.Int64, reflect.Int: _, err = fmt.Fprintf(w, `"%d"`, rv.Int()) // JS can't handle int64 return case reflect.Uint64, reflect.Uint: _, err = fmt.Fprintf(w, `"%d"`, rv.Uint()) // JS can't handle uint64 return case reflect.Int32, reflect.Int16, reflect.Int8, reflect.Uint32, reflect.Uint16, reflect.Uint8: return invokeStdlibJSONMarshal(w, rv.Interface()) //---------------------------------------- // Misc case reflect.Float64, reflect.Float32: if !fopts.Unsafe { return errors.New("Amino.JSON float* support requires `amino:\"unsafe\"`.") } fallthrough case reflect.Bool, reflect.String: return invokeStdlibJSONMarshal(w, rv.Interface()) //---------------------------------------- // Default default: panic(fmt.Sprintf("unsupported type %v", info.Type.Kind())) } }