_id
stringlengths
86
170
text
stringlengths
54
39.3k
https://github.com/taskcluster/taskcluster-client-go/blob/ef6acd428ae5844a933792ed6479d0e7dca61ef8/time.go#L30-L36
func (t *Time) UnmarshalJSON(data []byte) (err error) { // Fractional seconds are handled implicitly by Parse. x := new(time.Time) *x, err = time.Parse(`"`+time.RFC3339+`"`, string(data)) *t = Time(*x) return }
https://github.com/apparentlymart/go-rundeck-api/blob/2c962acae81080a937c350a5bea054c239f27a81/rundeck/client.go#L50-L72
func NewClient(config *ClientConfig) (*Client, error) { t := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: config.AllowUnverifiedSSL, }, } httpClient := &http.Client{ Transport: t, } apiPath, _ := url.Parse("api/13/") baseURL, err := url.Parse(config.BaseURL) if err != nil { return nil, fmt.Errorf("invalid base URL: %s", err.Error()) } apiURL := baseURL.ResolveReference(apiPath) return &Client{ httpClient: httpClient, apiURL: apiURL, authToken: config.AuthToken, }, nil }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/oci/archive/oci_dest.go#L54-L56
func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { return d.unpackedDest.SupportsSignatures(ctx) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pps.go#L423-L433
func (l *LogsIter) Next() bool { if l.err != nil { l.msg = nil return false } l.msg, l.err = l.logsClient.Recv() if l.err != nil { return false } return true }
https://github.com/naoina/genmai/blob/78583835e1e41e3938e1ddfffd7101f8ad27fae0/genmai.go#L1139-L1141
func (c *Condition) And(cond interface{}, args ...interface{}) *Condition { return c.appendQueryByCondOrExpr("And", 100, And, cond, args...) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/node.go#L29-L31
func (n NodeInfo) IsOffline(threshold time.Duration) bool { return nodeIsOffline(threshold, n.Heartbeat) }
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/taskqueue/taskqueue.go#L449-L458
func Purge(c context.Context, queueName string) error { if queueName == "" { queueName = "default" } req := &pb.TaskQueuePurgeQueueRequest{ QueueName: []byte(queueName), } res := &pb.TaskQueuePurgeQueueResponse{} return internal.Call(c, "taskqueue", "PurgeQueue", req, res) }
https://github.com/go-opencv/go-opencv/blob/a4fe8ec027ccc9eb8b7d0797db7c76e61083f1db/opencv/cxcore.go#L814-L816
func (seq *Seq) Push(element unsafe.Pointer) unsafe.Pointer { return unsafe.Pointer(C.cvSeqPush((*C.struct_CvSeq)(seq), element)) }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/printer.go#L10-L16
func PromptConfirmation(format string, a ...interface{}) string { m := fmt.Sprintf(format, a...) fmt.Fprintf(out, m) var yn string fmt.Fscanf(in, "%s", &yn) return yn }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/dom.go#L265-L268
func (p FocusParams) WithBackendNodeID(backendNodeID cdp.BackendNodeID) *FocusParams { p.BackendNodeID = backendNodeID return &p }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/image/manifest.go#L51-L64
func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { switch manifest.NormalizedMIMEType(mt) { case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) case manifest.DockerV2Schema2MediaType: return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: return manifestSchema2FromManifestList(ctx, sys, src, manblob) default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } }
https://github.com/golang/debug/blob/19561fee47cf8cd0400d1b094c5898002f97cf90/internal/gocore/object.go#L212-L227
func (p *Process) ForEachRoot(fn func(r *Root) bool) { for _, r := range p.globals { if !fn(r) { return } } for _, g := range p.goroutines { for _, f := range g.frames { for _, r := range f.roots { if !fn(r) { return } } } } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L4887-L4891
func (v *ContinueToLocationParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoDebugger47(&r, v) return r.Error() }
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/parser/kolonish/kolonish.go#L66-L70
func (p *Kolonish) ParseReader(name string, rdr io.Reader) (*parser.AST, error) { b := parser.NewBuilder() lex := NewReaderLexer(rdr) return b.Parse(name, lex) }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/typed/buffer.go#L423-L427
func (ref BytesRef) Update(b []byte) { if ref != nil { copy(ref, b) } }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/input/types.go#L412-L426
func (t *TouchType) UnmarshalEasyJSON(in *jlexer.Lexer) { switch TouchType(in.String()) { case TouchStart: *t = TouchStart case TouchEnd: *t = TouchEnd case TouchMove: *t = TouchMove case TouchCancel: *t = TouchCancel default: in.AddError(errors.New("unknown TouchType value")) } }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/ppsutil/util.go#L208-L234
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input { // branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits branchToCommit := make(map[string]*pfs.Commit) key := path.Join for _, prov := range outputCommitInfo.Provenance { branchToCommit[key(prov.Commit.Repo.Name, prov.Branch.Name)] = prov.Commit } jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input) pps.VisitInput(jobInput, func(input *pps.Input) { if input.Pfs != nil { if commit, ok := branchToCommit[key(input.Pfs.Repo, input.Pfs.Branch)]; ok { input.Pfs.Commit = commit.ID } } if input.Cron != nil { if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok { input.Cron.Commit = commit.ID } } if input.Git != nil { if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok { input.Git.Commit = commit.ID } } }) return jobInput }
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/xslate.go#L336-L343
func (tx *Xslate) RenderInto(w io.Writer, template string, vars Vars) error { bc, err := tx.Loader.Load(template) if err != nil { return err } tx.VM.Run(bc, vm.Vars(vars), w) return nil }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/docker_client.go#L160-L196
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil }
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/datastore/key.go#L265-L267
func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { return NewKey(c, kind, "", 0, parent) }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/manifest/docker_schema1.go#L155-L162
func (m *Schema1) Serialize() ([]byte, error) { // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. unsigned, err := json.Marshal(*m) if err != nil { return nil, err } return AddDummyV2S1Signature(unsigned) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/urlsmap.go#L58-L67
func (c URLsMap) String() string { var pairs []string for name, urls := range c { for _, url := range urls { pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) } } sort.Strings(pairs) return strings.Join(pairs, ",") }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm16/codegen_client.go#L1336-L1338
func (api *API) NetworkInterfaceLocator(href string) *NetworkInterfaceLocator { return &NetworkInterfaceLocator{Href(href), api} }
https://github.com/bluekeyes/hatpear/blob/ffb42d5bb417aa8e12b3b7ff73d028b915dafa10/hatpear.go#L26-L35
func Store(r *http.Request, err error) { errptr, ok := r.Context().Value(errorKey).(*error) if !ok { panic("hatpear: request not configured to store errors") } // check err after checking context to fail fast if unconfigured if err != nil { *errptr = err } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/maintenance/aws-janitor/resources/nat_gateway.go#L64-L85
func (NATGateway) ListAll(sess *session.Session, acct, region string) (*Set, error) { svc := ec2.New(sess, &aws.Config{Region: aws.String(region)}) set := NewSet(0) inp := &ec2.DescribeNatGatewaysInput{} err := svc.DescribeNatGatewaysPages(inp, func(page *ec2.DescribeNatGatewaysOutput, _ bool) bool { for _, gw := range page.NatGateways { now := time.Now() arn := natGateway{ Account: acct, Region: region, ID: *gw.NatGatewayId, }.ARN() set.firstSeen[arn] = now } return true }) return set, errors.Wrapf(err, "couldn't describe nat gateways for %q in %q", acct, region) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/indexeddb/types.go#L175-L177
func (t *KeyPathType) UnmarshalJSON(buf []byte) error { return easyjson.Unmarshal(buf, t) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/heapprofiler/easyjson.go#L1860-L1864
func (v *CollectGarbageParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoHeapprofiler22(&r, v) return r.Error() }
https://github.com/opentracing/opentracing-go/blob/659c90643e714681897ec2521c60567dd21da733/mocktracer/mockspan.go#L170-L174
func (s *MockSpan) BaggageItem(key string) string { s.RLock() defer s.RUnlock() return s.SpanContext.Baggage[key] }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/level_handler.go#L78-L101
func (s *levelHandler) deleteTables(toDel []*table.Table) error { s.Lock() // s.Unlock() below toDelMap := make(map[uint64]struct{}) for _, t := range toDel { toDelMap[t.ID()] = struct{}{} } // Make a copy as iterators might be keeping a slice of tables. var newTables []*table.Table for _, t := range s.tables { _, found := toDelMap[t.ID()] if !found { newTables = append(newTables, t) continue } s.totalSize -= t.Size() } s.tables = newTables s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow. return decrRefs(toDel) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/workload/workload.go#L14-L35
func RunWorkload( client *client.APIClient, rand *rand.Rand, size int, ) error { worker := newWorker(rand) for i := 0; i < size; i++ { if err := worker.work(client); err != nil { return err } } for _, job := range worker.startedJobs { jobInfo, err := client.InspectJob(job.ID, true) if err != nil { return err } if jobInfo.State != pps.JobState_JOB_SUCCESS { return fmt.Errorf("job %s failed", job.ID) } } return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/memory/easyjson.go#L223-L227
func (v *SimulatePressureNotificationParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoMemory2(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/memory/memory.go#L157-L159
func (p *StartSamplingParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandStartSampling, p, nil) }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pkg/tracing/tracing.go#L161-L164
func UnaryClientInterceptor() grpc.UnaryClientInterceptor { return otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer(), otgrpc.IncludingSpans(addTraceIfTracingEnabled)) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_zfs.go#L826-L830
func (s *storageZfs) ContainerStorageReady(container container) bool { volumeName := projectPrefix(container.Project(), container.Name()) fs := fmt.Sprintf("containers/%s", volumeName) return zfsFilesystemEntityExists(s.getOnDiskPoolName(), fs) }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/introspection.go#L244-L265
func (ch *Channel) IntrospectOthers(opts *IntrospectionOptions) map[string][]ChannelInfo { if !opts.IncludeOtherChannels { return nil } channelMap.Lock() defer channelMap.Unlock() states := make(map[string][]ChannelInfo) for svc, channels := range channelMap.existing { channelInfos := make([]ChannelInfo, 0, len(channels)) for _, otherChan := range channels { if ch == otherChan { continue } channelInfos = append(channelInfos, otherChan.ReportInfo(opts)) } states[svc] = channelInfos } return states }
https://github.com/stefantalpalaru/pool/blob/df8b849d27751462526089005979b28064c1e08e/pool.go#L293-L301
func (pool *Pool) Status() stats { stats_pipe := make(chan stats) if pool.supervisor_started { pool.stats_wanted_pipe <- stats_pipe return <-stats_pipe } // the supervisor wasn't started so we return a zeroed structure return stats{} }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dpdf/gc.go#L159-L169
func (gc *GraphicContext) CreateStringPath(text string, x, y float64) (cursor float64) { //fpdf uses the top left corner left, top, right, bottom := gc.GetStringBounds(text) w := right - left h := bottom - top // gc.pdf.SetXY(x, y-h) do not use this as y-h might be negative margin := gc.pdf.GetCellMargin() gc.pdf.MoveTo(x-left-margin, y+top) gc.pdf.CellFormat(w, h, text, "", 0, "BL", false, 0, "") return w }
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/exp/cpp/export/export.go#L571-L586
func FramesToFrameRange(frames *C.int, num C.size_t, sorted bool, zfill int) *C.char { // caller must free string if num == C.size_t(0) { return C.CString("") } n := int(num) slice := (*[1 << 30]C.int)(unsafe.Pointer(frames))[:n:n] ints := make([]int, n) for i := 0; i < n; i++ { ints[i] = int(slice[i]) } return C.CString(fileseq.FramesToFrameRange(ints, sorted, zfill)) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_lvm.go#L423-L474
func (s *storageLvm) StoragePoolMount() (bool, error) { source := s.pool.Config["source"] if source == "" { return false, fmt.Errorf("no \"source\" property found for the storage pool") } if !filepath.IsAbs(source) { return true, nil } poolMountLockID := getPoolMountLockID(s.pool.Name) lxdStorageMapLock.Lock() if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok { lxdStorageMapLock.Unlock() if _, ok := <-waitChannel; ok { logger.Warnf("Received value over semaphore, this should not have happened") } // Give the benefit of the doubt and assume that the other // thread actually succeeded in mounting the storage pool. return false, nil } lxdStorageOngoingOperationMap[poolMountLockID] = make(chan bool) lxdStorageMapLock.Unlock() removeLockFromMap := func() { lxdStorageMapLock.Lock() if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok { close(waitChannel) delete(lxdStorageOngoingOperationMap, poolMountLockID) } lxdStorageMapLock.Unlock() } defer removeLockFromMap() if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) { // Try to prepare new loop device. loopF, loopErr := prepareLoopDev(source, 0) if loopErr != nil { return false, loopErr } // Make sure that LO_FLAGS_AUTOCLEAR is unset. loopErr = unsetAutoclearOnLoopDev(int(loopF.Fd())) if loopErr != nil { return false, loopErr } s.loopInfo = loopF } return true, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/serviceworker/easyjson.go#L788-L792
func (v *Registration) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoServiceworker8(&r, v) return r.Error() }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/deploy.go#L151-L174
func diffDeploy(w http.ResponseWriter, r *http.Request, t auth.Token) error { writer := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, "") defer writer.Stop() fmt.Fprint(w, "Saving the difference between the old and new code\n") appName := r.URL.Query().Get(":appname") diff := InputValue(r, "customdata") instance, err := app.GetByName(appName) if err != nil { return &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()} } if t.GetAppName() != app.InternalAppName { canDiffDeploy := permission.Check(t, permission.PermAppReadDeploy, contextsForApp(instance)...) if !canDiffDeploy { return &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()} } } evt, err := event.GetRunning(appTarget(appName), permission.PermAppDeploy.FullName()) if err != nil { return err } return evt.SetOtherCustomData(map[string]string{ "diff": diff, }) }
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/local_peer.go#L114-L118
func (peer *localPeer) doConnectionEstablished(conn ourConnection) { peer.actionChan <- func() { peer.handleConnectionEstablished(conn) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/strings.go#L38-L47
func NewStringsValue(s string) (ss *StringsValue) { if s == "" { return &StringsValue{} } ss = new(StringsValue) if err := ss.Set(s); err != nil { plog.Panicf("new StringsValue should never fail: %v", err) } return ss }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/tcp_transport.go#L100-L102
func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { return t.listener.Accept() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/css.go#L762-L771
func (p *StopRuleUsageTrackingParams) Do(ctx context.Context) (ruleUsage []*RuleUsage, err error) { // execute var res StopRuleUsageTrackingReturns err = cdp.Execute(ctx, CommandStopRuleUsageTracking, nil, &res) if err != nil { return nil, err } return res.RuleUsage, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L963-L967
func (v *SetFontFamiliesParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage10(&r, v) return r.Error() }
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/label/label.go#L146-L151
func (l Label) Abs(repo, pkg string) Label { if !l.Relative { return l } return Label{Repo: repo, Pkg: pkg, Name: l.Name} }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L703-L707
func (v RunScriptParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime6(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/memcache/memcache.go#L152-L154
func Delete(c context.Context, key string) error { return singleError(DeleteMulti(c, []string{key})) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/images.go#L709-L713
func (c *Cluster) ImageLastAccessInit(fingerprint string) error { stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?` err := exec(c.db, stmt, fingerprint) return err }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/ppsutil/util.go#L161-L168
func GetExpectedNumHashtrees(spec *ppsclient.HashtreeSpec) (int64, error) { if spec == nil || spec.Constant == 0 { return 1, nil } else if spec.Constant > 0 { return int64(spec.Constant), nil } return 0, fmt.Errorf("unable to interpret HashtreeSpec %+v", spec) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/shared/termios/termios_unix.go#L28-L40
func GetState(fd int) (*State, error) { termios := syscall.Termios{} ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(&termios))) if ret != 0 { return nil, err.(syscall.Errno) } state := State{} state.Termios = termios return &state, nil }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/pkg/blobinfocache/boltdb/boltdb.go#L102-L125
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding // a read lock, blocking any future writes. // Hence this preliminary check, which is RACY: Another process could remove the file // between the Lstat call and opening the database. if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { return err } lockPath(bdc.path) defer unlockPath(bdc.path) db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } defer func() { if err := db.Close(); retErr == nil && err != nil { retErr = err } }() return db.View(fn) }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/target/easyjson.go#L1651-L1655
func (v *EventDetachedFromTarget) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoTarget18(&r, v) return r.Error() }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/policyconfiguration/naming.go#L13-L30
func DockerReferenceIdentity(ref reference.Named) (string, error) { res := ref.Name() tagged, isTagged := ref.(reference.NamedTagged) digested, isDigested := ref.(reference.Canonical) switch { case isTagged && isDigested: // Note that this CAN actually happen. return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) case isTagged: res = res + ":" + tagged.Tag() case isDigested: res = res + "@" + digested.Digest().String() default: // Coverage: The above was supposed to be exhaustive. return "", errors.New("Internal inconsistency, unexpected default branch") } return res, nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/cmdutil/env.go#L27-L35
func Main(do func(interface{}) error, appEnv interface{}, decoders ...Decoder) { if err := Populate(appEnv, decoders...); err != nil { mainError(err) } if err := do(appEnv); err != nil { mainError(err) } os.Exit(0) }
https://github.com/alexflint/go-arg/blob/fb1ae1c3e0bd00d45333c3d51384afc05846f7a0/parse.go#L59-L65
func Parse(dest ...interface{}) error { p, err := NewParser(Config{}, dest...) if err != nil { return err } return p.Parse(flags()) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/hook/server.go#L210-L229
func (s *Server) dispatch(endpoint string, payload []byte, h http.Header) error { req, err := http.NewRequest(http.MethodPost, endpoint, bytes.NewBuffer(payload)) if err != nil { return err } req.Header = h resp, err := s.do(req) if err != nil { return err } defer resp.Body.Close() rb, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("response has status %q and body %q", resp.Status, string(rb)) } return nil }
https://github.com/iron-io/functions_go/blob/91b84f5bbb17095bf1c7028ec6e70a3dc06a5893/client/apps/apps_client.go#L122-L145
func (a *Client) PatchAppsApp(params *PatchAppsAppParams) (*PatchAppsAppOK, error) { // TODO: Validate the params before sending if params == nil { params = NewPatchAppsAppParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ ID: "PatchAppsApp", Method: "PATCH", PathPattern: "/apps/{app}", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, Reader: &PatchAppsAppReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } return result.(*PatchAppsAppOK), nil }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/writers/helpers.go#L206-L222
func flagType(param *gen.ActionParam) string { path := param.QueryName if _, ok := param.Type.(*gen.ArrayDataType); ok { return "[]string" } else if _, ok := param.Type.(*gen.EnumerableDataType); ok { return "map" } else if _, ok := param.Type.(*gen.SourceUploadDataType); ok { return "sourcefile" } else if _, ok := param.Type.(*gen.UploadDataType); ok { return "file" } b, ok := param.Type.(*gen.BasicDataType) if !ok { panic("Wooaat? a object leaf??? - " + path) } return string(*b) }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/docker_image_src.go#L330-L369
func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) sig, err := ioutil.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil } return nil, false, err } return sig, false, nil case "http", "https": logrus.Debugf("GET %s", url) req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, false, err } req = req.WithContext(ctx) res, err := s.c.doHTTP(req) if err != nil { return nil, false, err } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) } sig, err := ioutil.ReadAll(res.Body) if err != nil { return nil, false, err } return sig, false, nil default: return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/client.go#L79-L83
func (c *Client) Namespace(ns string) *Client { nc := *c nc.namespace = ns return &nc }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/input/input.go#L378-L380
func (p *SetIgnoreInputEventsParams) Do(ctx context.Context) (err error) { return cdp.Execute(ctx, CommandSetIgnoreInputEvents, p, nil) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/daemon.go#L124-L132
func NewDaemon(config *DaemonConfig, os *sys.OS) *Daemon { return &Daemon{ config: config, os: os, setupChan: make(chan struct{}), readyChan: make(chan struct{}), shutdownChan: make(chan struct{}), } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/set.go#L92-L98
func (us *unsafeSet) Values() (values []string) { values = make([]string, 0) for val := range us.d { values = append(values, val) } return values }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gcsupload/run.go#L41-L62
func (o Options) Run(spec *downwardapi.JobSpec, extra map[string]gcs.UploadFunc) error { uploadTargets := o.assembleTargets(spec, extra) if !o.DryRun { ctx := context.Background() gcsClient, err := storage.NewClient(ctx, option.WithCredentialsFile(o.GcsCredentialsFile)) if err != nil { return fmt.Errorf("could not connect to GCS: %v", err) } if err := gcs.Upload(gcsClient.Bucket(o.Bucket), uploadTargets); err != nil { return fmt.Errorf("failed to upload to GCS: %v", err) } } else { for destination := range uploadTargets { logrus.WithField("dest", destination).Info("Would upload") } } logrus.Info("Finished upload to GCS") return nil }
https://github.com/naoina/genmai/blob/78583835e1e41e3938e1ddfffd7101f8ad27fae0/genmai.go#L1199-L1201
func (c *Condition) Limit(lim int) *Condition { return c.appendQuery(500, Limit, lim) }
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/french/step2b.go#L10-L59
func step2b(word *snowballword.SnowballWord) bool { // Search for the longest among the following suffixes in RV. // suffix, suffixRunes := word.FirstSuffixIn(word.RVstart, len(word.RS), "eraIent", "assions", "erions", "assiez", "assent", "èrent", "eront", "erons", "eriez", "erait", "erais", "asses", "antes", "aIent", "âtes", "âmes", "ions", "erez", "eras", "erai", "asse", "ants", "ante", "ées", "iez", "era", "ant", "ait", "ais", "és", "ée", "ât", "ez", "er", "as", "ai", "é", "a", ) switch suffix { case "ions": // Delete if in R2 suffixLen := len(suffixRunes) if word.FitsInR2(suffixLen) { word.RemoveLastNRunes(suffixLen) return true } return false case "é", "ée", "ées", "és", "èrent", "er", "era", "erai", "eraIent", "erais", "erait", "eras", "erez", "eriez", "erions", "erons", "eront", "ez", "iez": // Delete word.RemoveLastNRunes(len(suffixRunes)) return true case "âmes", "ât", "âtes", "a", "ai", "aIent", "ais", "ait", "ant", "ante", "antes", "ants", "as", "asse", "assent", "asses", "assiez", "assions": // Delete word.RemoveLastNRunes(len(suffixRunes)) // If preceded by e (unicode code point 101), delete // idx := len(word.RS) - 1 if idx >= 0 && word.RS[idx] == 101 && word.FitsInRV(1) { word.RemoveLastNRunes(1) } return true } return false }
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/binary-decode.go#L891-L912
func decodeFieldNumberAndTyp3(bz []byte) (num uint32, typ Typ3, n int, err error) { // Read uvarint value. var value64 = uint64(0) value64, n, err = DecodeUvarint(bz) if err != nil { return } // Decode first typ3 byte. typ = Typ3(value64 & 0x07) // Decode num. var num64 uint64 num64 = value64 >> 3 if num64 > (1<<29 - 1) { err = fmt.Errorf("invalid field num %v", num64) return } num = uint32(num64) return }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/config.go#L861-L868
func ConfigPath(value string) string { if value != "" { return value } logrus.Warningf("defaulting to %s until 15 July 2019, please migrate", DefaultConfigPath) return DefaultConfigPath }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/storage/types.go#L49-L77
func (t *Type) UnmarshalEasyJSON(in *jlexer.Lexer) { switch Type(in.String()) { case TypeAppcache: *t = TypeAppcache case TypeCookies: *t = TypeCookies case TypeFileSystems: *t = TypeFileSystems case TypeIndexeddb: *t = TypeIndexeddb case TypeLocalStorage: *t = TypeLocalStorage case TypeShaderCache: *t = TypeShaderCache case TypeWebsql: *t = TypeWebsql case TypeServiceWorkers: *t = TypeServiceWorkers case TypeCacheStorage: *t = TypeCacheStorage case TypeAll: *t = TypeAll case TypeOther: *t = TypeOther default: in.AddError(errors.New("unknown Type value")) } }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/openshift/openshift-copies.go#L711-L722
func resolvePaths(refs []*string, base string) error { for _, ref := range refs { // Don't resolve empty paths if len(*ref) > 0 { // Don't resolve absolute paths if !filepath.IsAbs(*ref) { *ref = filepath.Join(base, *ref) } } } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L708-L716
func (r *Raft) quorumSize() int { voters := 0 for _, server := range r.configurations.latest.Servers { if server.Suffrage == Voter { voters++ } } return voters/2 + 1 }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/httpclient/http.go#L533-L547
func filterHeaders( dumpFormat Format, hiddenHeaders map[string]bool, headers http.Header, iterator headerIterator) { for k, v := range headers { if !dumpFormat.IsVerbose() { if _, ok := hiddenHeaders[k]; ok { continue } } iterator(k, v) } }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/image/oci.go#L85-L95
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { cb, err := m.ConfigBlob(ctx) if err != nil { return nil, err } configOCI := &imgspecv1.Image{} if err := json.Unmarshal(cb, configOCI); err != nil { return nil, err } return configOCI, nil }
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/samples/geometry/geometry.go#L91-L106
func Dash(gc draw2d.GraphicContext, x, y, width, height float64) { sx, sy := width/162, height/205 gc.SetStrokeColor(image.Black) gc.SetLineDash([]float64{height / 10, height / 50, height / 50, height / 50}, -50.0) gc.SetLineCap(draw2d.ButtCap) gc.SetLineJoin(draw2d.RoundJoin) gc.SetLineWidth(height / 50) gc.MoveTo(x+sx*60.0, y) gc.LineTo(x+sx*60.0, y) gc.LineTo(x+sx*162, y+sy*205) rLineTo(gc, sx*-102.4, 0) gc.CubicCurveTo(x+sx*-17, y+sy*205, x+sx*-17, y+sy*103, x+sx*60.0, y+sy*103.0) gc.Stroke() gc.SetLineDash(nil, 0.0) }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/provision/kubernetes/pkg/apis/tsuru/v1/register.go#L34-L45
func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &App{}, &AppList{}, ) scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
https://github.com/VividCortex/ewma/blob/43880d236f695d39c62cf7aa4ebd4508c258e6c0/ewma.go#L66-L72
func (e *SimpleEWMA) Add(value float64) { if e.value == 0 { // this is a proxy for "uninitialized" e.value = value } else { e.value = (value * DECAY) + (e.value * (1 - DECAY)) } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/ci/ci.go#L13-L64
func New(opts *Options) (*genny.Generator, error) { g := genny.New() if err := opts.Validate(); err != nil { return g, err } g.Transformer(genny.Replace("-no-pop", "")) g.Transformer(genny.Dot()) box := packr.New("buffalo:genny:ci", "../ci/templates") var fname string switch opts.Provider { case "travis", "travis-ci": fname = "-dot-travis.yml.tmpl" case "gitlab", "gitlab-ci": if opts.App.WithPop { fname = "-dot-gitlab-ci.yml.tmpl" } else { fname = "-dot-gitlab-ci-no-pop.yml.tmpl" } default: return g, fmt.Errorf("could not find a template for %s", opts.Provider) } f, err := box.FindString(fname) if err != nil { return g, err } g.File(genny.NewFileS(fname, f)) data := map[string]interface{}{ "opts": opts, } if opts.DBType == "postgres" { data["testDbUrl"] = "postgres://postgres:postgres@postgres:5432/" + opts.App.Name.File().String() + "_test?sslmode=disable" } else if opts.DBType == "mysql" { data["testDbUrl"] = "mysql://root:root@(mysql:3306)/" + opts.App.Name.File().String() + "_test?parseTime=true&multiStatements=true&readTimeout=1s" } else { data["testDbUrl"] = "" } helpers := template.FuncMap{} t := gogen.TemplateTransformer(data, helpers) g.Transformer(t) return g, nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L1855-L1859
func (v *RemoveScriptToEvaluateOnNewDocumentParams) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage20(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L5773-L5777
func (v *EventFrameStoppedLoading) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoPage60(&r, v) return r.Error() }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/cast/easyjson.go#L140-L144
func (v StartTabMirroringParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoCast1(&w, v) return w.Buffer.BuildBytes(), w.Error }
https://github.com/sclevine/agouti/blob/96599c91888f1b1cf2dccc7f1776ba7f511909e5/page.go#L492-L498
func (p *Page) DoubleClick() error { if err := p.session.DoubleClick(); err != nil { return fmt.Errorf("failed to double click: %s", err) } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L467-L473
func finalState(status prowjobv1.ProwJobState) bool { switch status { case "", prowjobv1.PendingState, prowjobv1.TriggeredState: return false } return true }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/fetcher/issues.go#L46-L83
func UpdateIssues(db *gorm.DB, client ClientInterface) { latest, err := findLatestIssueUpdate(db, client.RepositoryName()) if err != nil { glog.Error("Failed to find last issue update: ", err) return } c := make(chan *github.Issue, 200) go client.FetchIssues(latest, c) for issue := range c { issueOrm, err := NewIssue(issue, client.RepositoryName()) if err != nil { glog.Error("Can't create issue:", err) continue } if db.Create(issueOrm).Error != nil { // We assume record already exists. Let's // update. First we need to delete labels and // assignees, as they are just concatenated // otherwise. db.Delete(sql.Label{}, "issue_id = ? AND repository = ?", issueOrm.ID, client.RepositoryName()) db.Delete(sql.Assignee{}, "issue_id = ? AND repository = ?", issueOrm.ID, client.RepositoryName()) if err := db.Save(issueOrm).Error; err != nil { glog.Error("Failed to update database issue: ", err) } } // Issue is updated, find if we have new comments UpdateComments(*issue.Number, issueOrm.IsPR, db, client) // and find if we have new events UpdateIssueEvents(*issue.Number, db, client) } }
https://github.com/dailyburn/bigquery/blob/b6f18972580ed8882d195da0e9b7c9b94902a1ea/client/client.go#L214-L295
func (c *Client) largeDataPagedQuery(service *bigquery.Service, pageSize int, dataset, project, queryStr string, dataChan chan Data) ([][]interface{}, []string, error) { c.printDebug("largeDataPagedQuery starting") ts := time.Now() // start query tableRef := bigquery.TableReference{DatasetId: dataset, ProjectId: project, TableId: c.tempTableName} jobConfigQuery := bigquery.JobConfigurationQuery{} datasetRef := &bigquery.DatasetReference{ DatasetId: dataset, ProjectId: project, } jobConfigQuery.AllowLargeResults = true jobConfigQuery.Query = queryStr jobConfigQuery.DestinationTable = &tableRef jobConfigQuery.DefaultDataset = datasetRef if !c.flattenResults { c.printDebug("setting FlattenResults to false") // need a pointer to bool f := false jobConfigQuery.FlattenResults = &f } jobConfigQuery.WriteDisposition = "WRITE_TRUNCATE" jobConfigQuery.CreateDisposition = "CREATE_IF_NEEDED" jobConfig := bigquery.JobConfiguration{} jobConfig.Query = &jobConfigQuery job := bigquery.Job{} job.Configuration = &jobConfig jobInsert := service.Jobs.Insert(project, &job) runningJob, jerr := jobInsert.Do() if jerr != nil { c.printDebug("Error inserting job!", jerr) if dataChan != nil { dataChan <- Data{Err: jerr} } return nil, nil, jerr } var qr *bigquery.GetQueryResultsResponse var rows [][]interface{} var headers []string var err error // Periodically, job references are not created, but errors are also not thrown. // In this scenario, retry up to 5 times to get a job reference before giving up. for i := 1; ; i++ { r := service.Jobs.GetQueryResults(project, runningJob.JobReference.JobId) r.TimeoutMs(c.RequestTimeout) qr, err = r.Do() headers, rows = c.headersAndRows(qr.Schema, qr.Rows) if i >= maxRequestRetry || qr.JobReference != nil || err != nil { if i > 1 { c.printDebug("Took %v tries to get a job reference", i) } break } } if err == nil && qr.JobReference == nil { err = fmt.Errorf("missing job reference") } if err != nil { c.printDebug("Error loading query: ", err) if dataChan != nil { dataChan <- Data{Err: err} } return nil, nil, err } rows, headers, err = c.processPagedQuery(qr.JobReference, qr.PageToken, dataChan, headers, rows) c.printDebug("largeDataPagedQuery completed in ", time.Now().Sub(ts).Seconds(), "s") return rows, headers, err }
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/server.go#L159-L495
func RunServer(dry bool) http.Handler { err := log.Init() if err != nil { stdLog.Fatalf("unable to initialize logging: %v", err) } err = InitializeDBServices() if err != nil { fatal(err) } m := apiRouter.NewRouter() for _, handler := range tsuruHandlerList { m.Add(handler.version, handler.method, handler.path, handler.h) } if disableIndex, _ := config.GetBool("disable-index-page"); !disableIndex { m.Add("1.0", "Get", "/", Handler(index)) } m.Add("1.0", "Get", "/info", Handler(info)) m.Add("1.0", "Get", "/services/instances", AuthorizationRequiredHandler(serviceInstances)) m.Add("1.0", "Get", "/services/{service}/instances/{instance}", AuthorizationRequiredHandler(serviceInstance)) m.Add("1.0", "Delete", "/services/{service}/instances/{instance}", AuthorizationRequiredHandler(removeServiceInstance)) m.Add("1.0", "Post", "/services/{service}/instances", AuthorizationRequiredHandler(createServiceInstance)) m.Add("1.0", "Put", "/services/{service}/instances/{instance}", AuthorizationRequiredHandler(updateServiceInstance)) m.Add("1.0", "Put", "/services/{service}/instances/{instance}/{app}", AuthorizationRequiredHandler(bindServiceInstance)) m.Add("1.0", "Delete", "/services/{service}/instances/{instance}/{app}", AuthorizationRequiredHandler(unbindServiceInstance)) m.Add("1.0", "Get", "/services/{service}/instances/{instance}/status", AuthorizationRequiredHandler(serviceInstanceStatus)) m.Add("1.0", "Put", "/services/{service}/instances/permission/{instance}/{team}", AuthorizationRequiredHandler(serviceInstanceGrantTeam)) m.Add("1.0", "Delete", "/services/{service}/instances/permission/{instance}/{team}", AuthorizationRequiredHandler(serviceInstanceRevokeTeam)) proxyInstanceHandler := AuthorizationRequiredHandler(serviceInstanceProxy) proxyServiceHandler := AuthorizationRequiredHandler(serviceProxy) m.AddAll("1.0", "/services/{service}/proxy/{instance}", proxyInstanceHandler) m.AddAll("1.0", "/services/proxy/service/{service}", proxyServiceHandler) m.Add("1.0", "Get", "/services", AuthorizationRequiredHandler(serviceList)) m.Add("1.0", "Post", "/services", AuthorizationRequiredHandler(serviceCreate)) m.Add("1.0", "Put", "/services/{name}", AuthorizationRequiredHandler(serviceUpdate)) m.Add("1.0", "Delete", "/services/{name}", AuthorizationRequiredHandler(serviceDelete)) m.Add("1.0", "Get", "/services/{name}", AuthorizationRequiredHandler(serviceInfo)) m.Add("1.0", "Get", "/services/{name}/plans", AuthorizationRequiredHandler(servicePlans)) m.Add("1.0", "Get", "/services/{name}/doc", AuthorizationRequiredHandler(serviceDoc)) m.Add("1.0", "Put", "/services/{name}/doc", AuthorizationRequiredHandler(serviceAddDoc)) m.Add("1.0", "Put", "/services/{service}/team/{team}", AuthorizationRequiredHandler(grantServiceAccess)) m.Add("1.0", "Delete", "/services/{service}/team/{team}", AuthorizationRequiredHandler(revokeServiceAccess)) m.Add("1.0", "Delete", "/apps/{app}", AuthorizationRequiredHandler(appDelete)) m.Add("1.0", "Get", "/apps/{app}", AuthorizationRequiredHandler(appInfo)) m.Add("1.0", "Post", "/apps/{app}/cname", AuthorizationRequiredHandler(setCName)) m.Add("1.0", "Delete", "/apps/{app}/cname", AuthorizationRequiredHandler(unsetCName)) runHandler := AuthorizationRequiredHandler(runCommand) m.Add("1.0", "Post", "/apps/{app}/run", runHandler) m.Add("1.0", "Post", "/apps/{app}/restart", AuthorizationRequiredHandler(restart)) m.Add("1.0", "Post", "/apps/{app}/start", AuthorizationRequiredHandler(start)) m.Add("1.0", "Post", "/apps/{app}/stop", AuthorizationRequiredHandler(stop)) m.Add("1.0", "Post", "/apps/{app}/sleep", AuthorizationRequiredHandler(sleep)) m.Add("1.0", "Get", "/apps/{appname}/quota", AuthorizationRequiredHandler(getAppQuota)) m.Add("1.0", "Put", "/apps/{appname}/quota", AuthorizationRequiredHandler(changeAppQuota)) m.Add("1.0", "Put", "/apps/{appname}", AuthorizationRequiredHandler(updateApp)) m.Add("1.0", "Get", "/apps/{app}/env", AuthorizationRequiredHandler(getEnv)) m.Add("1.0", "Post", "/apps/{app}/env", AuthorizationRequiredHandler(setEnv)) m.Add("1.0", "Delete", "/apps/{app}/env", AuthorizationRequiredHandler(unsetEnv)) m.Add("1.0", "Get", "/apps", AuthorizationRequiredHandler(appList)) m.Add("1.0", "Post", "/apps", AuthorizationRequiredHandler(createApp)) forceDeleteLockHandler := AuthorizationRequiredHandler(forceDeleteLock) m.Add("1.0", "Delete", "/apps/{app}/lock", forceDeleteLockHandler) m.Add("1.0", "Put", "/apps/{app}/units", AuthorizationRequiredHandler(addUnits)) m.Add("1.0", "Delete", "/apps/{app}/units", AuthorizationRequiredHandler(removeUnits)) registerUnitHandler := AuthorizationRequiredHandler(registerUnit) m.Add("1.0", "Post", "/apps/{app}/units/register", registerUnitHandler) setUnitStatusHandler := AuthorizationRequiredHandler(setUnitStatus) m.Add("1.0", "Post", "/apps/{app}/units/{unit}", setUnitStatusHandler) m.Add("1.0", "Put", "/apps/{app}/teams/{team}", AuthorizationRequiredHandler(grantAppAccess)) m.Add("1.0", "Delete", "/apps/{app}/teams/{team}", AuthorizationRequiredHandler(revokeAppAccess)) m.Add("1.0", "Get", "/apps/{app}/log", AuthorizationRequiredHandler(appLog)) logPostHandler := AuthorizationRequiredHandler(addLog) m.Add("1.0", "Post", "/apps/{app}/log", logPostHandler) m.Add("1.0", "Post", "/apps/{appname}/deploy/rollback", AuthorizationRequiredHandler(deployRollback)) m.Add("1.4", "Put", "/apps/{appname}/deploy/rollback/update", AuthorizationRequiredHandler(deployRollbackUpdate)) m.Add("1.3", "Post", "/apps/{appname}/deploy/rebuild", AuthorizationRequiredHandler(deployRebuild)) m.Add("1.0", "Get", "/apps/{app}/metric/envs", AuthorizationRequiredHandler(appMetricEnvs)) m.Add("1.0", "Post", "/apps/{app}/routes", AuthorizationRequiredHandler(appRebuildRoutes)) m.Add("1.2", "Get", "/apps/{app}/certificate", AuthorizationRequiredHandler(listCertificates)) m.Add("1.2", "Put", "/apps/{app}/certificate", AuthorizationRequiredHandler(setCertificate)) m.Add("1.2", "Delete", "/apps/{app}/certificate", AuthorizationRequiredHandler(unsetCertificate)) m.Add("1.5", "Post", "/apps/{app}/routers", AuthorizationRequiredHandler(addAppRouter)) m.Add("1.5", "Put", "/apps/{app}/routers/{router}", AuthorizationRequiredHandler(updateAppRouter)) m.Add("1.5", "Delete", "/apps/{app}/routers/{router}", AuthorizationRequiredHandler(removeAppRouter)) m.Add("1.5", "Get", "/apps/{app}/routers", AuthorizationRequiredHandler(listAppRouters)) m.Add("1.0", "Post", "/node/status", AuthorizationRequiredHandler(setNodeStatus)) m.Add("1.0", "Get", "/deploys", AuthorizationRequiredHandler(deploysList)) m.Add("1.0", "Get", "/deploys/{deploy}", AuthorizationRequiredHandler(deployInfo)) m.Add("1.1", "Get", "/events", AuthorizationRequiredHandler(eventList)) m.Add("1.3", "Get", "/events/blocks", AuthorizationRequiredHandler(eventBlockList)) m.Add("1.3", "Post", "/events/blocks", AuthorizationRequiredHandler(eventBlockAdd)) m.Add("1.3", "Delete", "/events/blocks/{uuid}", AuthorizationRequiredHandler(eventBlockRemove)) m.Add("1.1", "Get", "/events/kinds", AuthorizationRequiredHandler(kindList)) m.Add("1.1", "Get", "/events/{uuid}", AuthorizationRequiredHandler(eventInfo)) m.Add("1.1", "Post", "/events/{uuid}/cancel", AuthorizationRequiredHandler(eventCancel)) m.Add("1.6", "Get", "/events/webhooks", AuthorizationRequiredHandler(webhookList)) m.Add("1.6", "Post", "/events/webhooks", AuthorizationRequiredHandler(webhookCreate)) m.Add("1.6", "Get", "/events/webhooks/{name}", AuthorizationRequiredHandler(webhookInfo)) m.Add("1.6", "Put", "/events/webhooks/{name}", AuthorizationRequiredHandler(webhookUpdate)) m.Add("1.6", "Delete", "/events/webhooks/{name}", AuthorizationRequiredHandler(webhookDelete)) m.Add("1.0", "Get", "/platforms", AuthorizationRequiredHandler(platformList)) m.Add("1.0", "Post", "/platforms", AuthorizationRequiredHandler(platformAdd)) m.Add("1.0", "Put", "/platforms/{name}", AuthorizationRequiredHandler(platformUpdate)) m.Add("1.0", "Delete", "/platforms/{name}", AuthorizationRequiredHandler(platformRemove)) m.Add("1.6", "Get", "/platforms/{name}", AuthorizationRequiredHandler(platformInfo)) m.Add("1.6", "Post", "/platforms/{name}/rollback", AuthorizationRequiredHandler(platformRollback)) // These handlers don't use {app} on purpose. Using :app means that only // the token generate for the given app is valid, but these handlers // use a token generated for Gandalf. m.Add("1.0", "Post", "/apps/{appname}/repository/clone", AuthorizationRequiredHandler(deploy)) m.Add("1.0", "Post", "/apps/{appname}/deploy", AuthorizationRequiredHandler(deploy)) diffDeployHandler := AuthorizationRequiredHandler(diffDeploy) m.Add("1.0", "Post", "/apps/{appname}/diff", diffDeployHandler) m.Add("1.5", "Post", "/apps/{appname}/build", AuthorizationRequiredHandler(build)) // Shell also doesn't use {app} on purpose. Middlewares don't play well // with websocket. m.Add("1.0", "Get", "/apps/{appname}/shell", http.HandlerFunc(remoteShellHandler)) m.Add("1.0", "Get", "/users", AuthorizationRequiredHandler(listUsers)) m.Add("1.0", "Post", "/users", Handler(createUser)) m.Add("1.0", "Get", "/users/info", AuthorizationRequiredHandler(userInfo)) m.Add("1.0", "Get", "/auth/scheme", Handler(authScheme)) m.Add("1.0", "Post", "/auth/login", Handler(login)) m.Add("1.0", "Post", "/auth/saml", Handler(samlCallbackLogin)) m.Add("1.0", "Get", "/auth/saml", Handler(samlMetadata)) m.Add("1.0", "Post", "/users/{email}/password", Handler(resetPassword)) m.Add("1.0", "Post", "/users/{email}/tokens", Handler(login)) m.Add("1.0", "Get", "/users/{email}/quota", AuthorizationRequiredHandler(getUserQuota)) m.Add("1.0", "Put", "/users/{email}/quota", AuthorizationRequiredHandler(changeUserQuota)) m.Add("1.0", "Delete", "/users/tokens", AuthorizationRequiredHandler(logout)) m.Add("1.0", "Put", "/users/password", AuthorizationRequiredHandler(changePassword)) m.Add("1.0", "Delete", "/users", AuthorizationRequiredHandler(removeUser)) m.Add("1.0", "Get", "/users/keys", AuthorizationRequiredHandler(listKeys)) m.Add("1.0", "Post", "/users/keys", AuthorizationRequiredHandler(addKeyToUser)) m.Add("1.0", "Delete", "/users/keys/{key}", AuthorizationRequiredHandler(removeKeyFromUser)) m.Add("1.0", "Get", "/users/api-key", AuthorizationRequiredHandler(showAPIToken)) m.Add("1.0", "Post", "/users/api-key", AuthorizationRequiredHandler(regenerateAPIToken)) m.Add("1.0", "Get", "/logs", websocket.Handler(addLogs)) m.Add("1.0", "Get", "/teams", AuthorizationRequiredHandler(teamList)) m.Add("1.0", "Post", "/teams", AuthorizationRequiredHandler(createTeam)) m.Add("1.0", "Delete", "/teams/{name}", AuthorizationRequiredHandler(removeTeam)) m.Add("1.6", "Put", "/teams/{name}", AuthorizationRequiredHandler(updateTeam)) m.Add("1.4", "Get", "/teams/{name}", AuthorizationRequiredHandler(teamInfo)) m.Add("1.0", "Post", "/swap", AuthorizationRequiredHandler(swap)) m.Add("1.0", "Get", "/healthcheck/", http.HandlerFunc(healthcheck)) m.Add("1.0", "Get", "/healthcheck", http.HandlerFunc(healthcheck)) m.Add("1.0", "Get", "/iaas/machines", AuthorizationRequiredHandler(machinesList)) m.Add("1.0", "Delete", "/iaas/machines/{machine_id}", AuthorizationRequiredHandler(machineDestroy)) m.Add("1.0", "Get", "/iaas/templates", AuthorizationRequiredHandler(templatesList)) m.Add("1.0", "Post", "/iaas/templates", AuthorizationRequiredHandler(templateCreate)) m.Add("1.0", "Put", "/iaas/templates/{template_name}", AuthorizationRequiredHandler(templateUpdate)) m.Add("1.0", "Delete", "/iaas/templates/{template_name}", AuthorizationRequiredHandler(templateDestroy)) m.Add("1.0", "Get", "/plans", AuthorizationRequiredHandler(listPlans)) m.Add("1.0", "Post", "/plans", AuthorizationRequiredHandler(addPlan)) m.Add("1.0", "Delete", "/plans/{planname}", AuthorizationRequiredHandler(removePlan)) m.Add("1.0", "Get", "/pools", AuthorizationRequiredHandler(poolList)) m.Add("1.0", "Post", "/pools", AuthorizationRequiredHandler(addPoolHandler)) m.Add("1.0", "Delete", "/pools/{name}", AuthorizationRequiredHandler(removePoolHandler)) m.Add("1.0", "Put", "/pools/{name}", AuthorizationRequiredHandler(poolUpdateHandler)) m.Add("1.0", "Post", "/pools/{name}/team", AuthorizationRequiredHandler(addTeamToPoolHandler)) m.Add("1.0", "Delete", "/pools/{name}/team", AuthorizationRequiredHandler(removeTeamToPoolHandler)) m.Add("1.3", "Get", "/constraints", AuthorizationRequiredHandler(poolConstraintList)) m.Add("1.3", "Put", "/constraints", AuthorizationRequiredHandler(poolConstraintSet)) m.Add("1.0", "Get", "/roles", AuthorizationRequiredHandler(listRoles)) m.Add("1.4", "Put", "/roles", AuthorizationRequiredHandler(roleUpdate)) m.Add("1.0", "Post", "/roles", AuthorizationRequiredHandler(addRole)) m.Add("1.0", "Get", "/roles/{name}", AuthorizationRequiredHandler(roleInfo)) m.Add("1.0", "Delete", "/roles/{name}", AuthorizationRequiredHandler(removeRole)) m.Add("1.0", "Post", "/roles/{name}/permissions", AuthorizationRequiredHandler(addPermissions)) m.Add("1.0", "Delete", "/roles/{name}/permissions/{permission}", AuthorizationRequiredHandler(removePermissions)) m.Add("1.0", "Post", "/roles/{name}/user", AuthorizationRequiredHandler(assignRole)) m.Add("1.0", "Delete", "/roles/{name}/user/{email}", AuthorizationRequiredHandler(dissociateRole)) m.Add("1.0", "Get", "/role/default", AuthorizationRequiredHandler(listDefaultRoles)) m.Add("1.0", "Post", "/role/default", AuthorizationRequiredHandler(addDefaultRole)) m.Add("1.0", "Delete", "/role/default", AuthorizationRequiredHandler(removeDefaultRole)) m.Add("1.0", "Get", "/permissions", AuthorizationRequiredHandler(listPermissions)) m.Add("1.6", "Post", "/roles/{name}/token", AuthorizationRequiredHandler(assignRoleToToken)) m.Add("1.6", "Delete", "/roles/{name}/token/{token_id}", AuthorizationRequiredHandler(dissociateRoleFromToken)) m.Add("1.0", "Get", "/debug/goroutines", AuthorizationRequiredHandler(dumpGoroutines)) m.Add("1.0", "Get", "/debug/pprof/", AuthorizationRequiredHandler(indexHandler)) m.Add("1.0", "Get", "/debug/pprof/cmdline", AuthorizationRequiredHandler(cmdlineHandler)) m.Add("1.0", "Get", "/debug/pprof/profile", AuthorizationRequiredHandler(profileHandler)) m.Add("1.0", "Get", "/debug/pprof/symbol", AuthorizationRequiredHandler(symbolHandler)) m.Add("1.0", "Get", "/debug/pprof/heap", AuthorizationRequiredHandler(indexHandler)) m.Add("1.0", "Get", "/debug/pprof/goroutine", AuthorizationRequiredHandler(indexHandler)) m.Add("1.0", "Get", "/debug/pprof/threadcreate", AuthorizationRequiredHandler(indexHandler)) m.Add("1.0", "Get", "/debug/pprof/block", AuthorizationRequiredHandler(indexHandler)) m.Add("1.0", "Get", "/debug/pprof/trace", AuthorizationRequiredHandler(traceHandler)) m.Add("1.3", "GET", "/node/autoscale", AuthorizationRequiredHandler(autoScaleHistoryHandler)) m.Add("1.3", "GET", "/node/autoscale/config", AuthorizationRequiredHandler(autoScaleGetConfig)) m.Add("1.3", "POST", "/node/autoscale/run", AuthorizationRequiredHandler(autoScaleRunHandler)) m.Add("1.3", "GET", "/node/autoscale/rules", AuthorizationRequiredHandler(autoScaleListRules)) m.Add("1.3", "POST", "/node/autoscale/rules", AuthorizationRequiredHandler(autoScaleSetRule)) m.Add("1.3", "DELETE", "/node/autoscale/rules", AuthorizationRequiredHandler(autoScaleDeleteRule)) m.Add("1.3", "DELETE", "/node/autoscale/rules/{id}", AuthorizationRequiredHandler(autoScaleDeleteRule)) m.Add("1.2", "GET", "/node", AuthorizationRequiredHandler(listNodesHandler)) m.Add("1.2", "GET", "/node/apps/{appname}/containers", AuthorizationRequiredHandler(listUnitsByApp)) m.Add("1.2", "GET", "/node/{address:.*}/containers", AuthorizationRequiredHandler(listUnitsByNode)) m.Add("1.2", "POST", "/node", AuthorizationRequiredHandler(addNodeHandler)) m.Add("1.2", "PUT", "/node", AuthorizationRequiredHandler(updateNodeHandler)) m.Add("1.2", "DELETE", "/node/{address:.*}", AuthorizationRequiredHandler(removeNodeHandler)) m.Add("1.3", "POST", "/node/rebalance", AuthorizationRequiredHandler(rebalanceNodesHandler)) m.Add("1.6", "GET", "/node/{address:.*}", AuthorizationRequiredHandler(infoNodeHandler)) m.Add("1.2", "GET", "/nodecontainers", AuthorizationRequiredHandler(nodeContainerList)) m.Add("1.2", "POST", "/nodecontainers", AuthorizationRequiredHandler(nodeContainerCreate)) m.Add("1.2", "GET", "/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerInfo)) m.Add("1.2", "DELETE", "/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerDelete)) m.Add("1.2", "POST", "/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerUpdate)) m.Add("1.2", "POST", "/nodecontainers/{name}/upgrade", AuthorizationRequiredHandler(nodeContainerUpgrade)) m.Add("1.2", "POST", "/install/hosts", AuthorizationRequiredHandler(installHostAdd)) m.Add("1.2", "GET", "/install/hosts", AuthorizationRequiredHandler(installHostList)) m.Add("1.2", "GET", "/install/hosts/{name}", AuthorizationRequiredHandler(installHostInfo)) m.Add("1.2", "GET", "/healing/node", AuthorizationRequiredHandler(nodeHealingRead)) m.Add("1.2", "POST", "/healing/node", AuthorizationRequiredHandler(nodeHealingUpdate)) m.Add("1.2", "DELETE", "/healing/node", AuthorizationRequiredHandler(nodeHealingDelete)) m.Add("1.3", "GET", "/healing", AuthorizationRequiredHandler(healingHistoryHandler)) m.Add("1.3", "GET", "/routers", AuthorizationRequiredHandler(listRouters)) m.Add("1.2", "GET", "/metrics", promhttp.Handler()) m.Add("1.7", "GET", "/provisioner", AuthorizationRequiredHandler(provisionerList)) m.Add("1.3", "POST", "/provisioner/clusters", AuthorizationRequiredHandler(createCluster)) m.Add("1.4", "POST", "/provisioner/clusters/{name}", AuthorizationRequiredHandler(updateCluster)) m.Add("1.3", "GET", "/provisioner/clusters", AuthorizationRequiredHandler(listClusters)) m.Add("1.3", "DELETE", "/provisioner/clusters/{name}", AuthorizationRequiredHandler(deleteCluster)) m.Add("1.4", "GET", "/volumes", AuthorizationRequiredHandler(volumesList)) m.Add("1.4", "GET", "/volumes/{name}", AuthorizationRequiredHandler(volumeInfo)) m.Add("1.4", "DELETE", "/volumes/{name}", AuthorizationRequiredHandler(volumeDelete)) m.Add("1.4", "POST", "/volumes", AuthorizationRequiredHandler(volumeCreate)) m.Add("1.4", "POST", "/volumes/{name}", AuthorizationRequiredHandler(volumeUpdate)) m.Add("1.4", "POST", "/volumes/{name}/bind", AuthorizationRequiredHandler(volumeBind)) m.Add("1.4", "DELETE", "/volumes/{name}/bind", AuthorizationRequiredHandler(volumeUnbind)) m.Add("1.4", "GET", "/volumeplans", AuthorizationRequiredHandler(volumePlansList)) m.Add("1.6", "GET", "/tokens", AuthorizationRequiredHandler(tokenList)) m.Add("1.7", "GET", "/tokens/{token_id}", AuthorizationRequiredHandler(tokenInfo)) m.Add("1.6", "POST", "/tokens", AuthorizationRequiredHandler(tokenCreate)) m.Add("1.6", "DELETE", "/tokens/{token_id}", AuthorizationRequiredHandler(tokenDelete)) m.Add("1.6", "PUT", "/tokens/{token_id}", AuthorizationRequiredHandler(tokenUpdate)) m.Add("1.7", "GET", "/brokers", AuthorizationRequiredHandler(serviceBrokerList)) m.Add("1.7", "POST", "/brokers", AuthorizationRequiredHandler(serviceBrokerAdd)) m.Add("1.7", "PUT", "/brokers/{broker}", AuthorizationRequiredHandler(serviceBrokerUpdate)) m.Add("1.7", "DELETE", "/brokers/{broker}", AuthorizationRequiredHandler(serviceBrokerDelete)) // Handlers for compatibility reasons, should be removed on tsuru 2.0. m.Add("1.4", "Post", "/teams/{name}", AuthorizationRequiredHandler(updateTeam)) m.Add("1.0", "GET", "/docker/node", AuthorizationRequiredHandler(listNodesHandler)) m.Add("1.0", "GET", "/docker/node/apps/{appname}/containers", AuthorizationRequiredHandler(listUnitsByApp)) m.Add("1.0", "GET", "/docker/node/{address:.*}/containers", AuthorizationRequiredHandler(listUnitsByNode)) m.Add("1.0", "POST", "/docker/node", AuthorizationRequiredHandler(addNodeHandler)) m.Add("1.0", "PUT", "/docker/node", AuthorizationRequiredHandler(updateNodeHandler)) m.Add("1.0", "DELETE", "/docker/node/{address:.*}", AuthorizationRequiredHandler(removeNodeHandler)) m.Add("1.0", "POST", "/docker/containers/rebalance", AuthorizationRequiredHandler(rebalanceNodesHandler)) m.Add("1.0", "GET", "/docker/nodecontainers", AuthorizationRequiredHandler(nodeContainerList)) m.Add("1.0", "POST", "/docker/nodecontainers", AuthorizationRequiredHandler(nodeContainerCreate)) m.Add("1.0", "GET", "/docker/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerInfo)) m.Add("1.0", "DELETE", "/docker/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerDelete)) m.Add("1.0", "POST", "/docker/nodecontainers/{name}", AuthorizationRequiredHandler(nodeContainerUpdate)) m.Add("1.0", "POST", "/docker/nodecontainers/{name}/upgrade", AuthorizationRequiredHandler(nodeContainerUpgrade)) m.Add("1.0", "GET", "/docker/healing/node", AuthorizationRequiredHandler(nodeHealingRead)) m.Add("1.0", "POST", "/docker/healing/node", AuthorizationRequiredHandler(nodeHealingUpdate)) m.Add("1.0", "DELETE", "/docker/healing/node", AuthorizationRequiredHandler(nodeHealingDelete)) m.Add("1.0", "GET", "/docker/healing", AuthorizationRequiredHandler(healingHistoryHandler)) m.Add("1.0", "GET", "/docker/autoscale", AuthorizationRequiredHandler(autoScaleHistoryHandler)) m.Add("1.0", "GET", "/docker/autoscale/config", AuthorizationRequiredHandler(autoScaleGetConfig)) m.Add("1.0", "POST", "/docker/autoscale/run", AuthorizationRequiredHandler(autoScaleRunHandler)) m.Add("1.0", "GET", "/docker/autoscale/rules", AuthorizationRequiredHandler(autoScaleListRules)) m.Add("1.0", "POST", "/docker/autoscale/rules", AuthorizationRequiredHandler(autoScaleSetRule)) m.Add("1.0", "DELETE", "/docker/autoscale/rules", AuthorizationRequiredHandler(autoScaleDeleteRule)) m.Add("1.0", "DELETE", "/docker/autoscale/rules/{id}", AuthorizationRequiredHandler(autoScaleDeleteRule)) m.Add("1.0", "GET", "/plans/routers", AuthorizationRequiredHandler(listRouters)) n := negroni.New() n.Use(negroni.NewRecovery()) n.Use(negroni.HandlerFunc(contextClearerMiddleware)) if !dry { n.Use(newLoggerMiddleware()) } n.UseHandler(m) n.Use(negroni.HandlerFunc(flushingWriterMiddleware)) n.Use(negroni.HandlerFunc(setRequestIDHeaderMiddleware)) n.Use(negroni.HandlerFunc(errorHandlingMiddleware)) n.Use(negroni.HandlerFunc(setVersionHeadersMiddleware)) n.Use(negroni.HandlerFunc(authTokenMiddleware)) n.Use(&appLockMiddleware{excludedHandlers: []http.Handler{ logPostHandler, runHandler, forceDeleteLockHandler, registerUnitHandler, setUnitStatusHandler, diffDeployHandler, }}) n.UseHandler(http.HandlerFunc(runDelayedHandler)) if !dry { err := startServer(n) if err != nil { fatal(err) } } return n }
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/pprof/pprof.go#L41-L54
func Register(registrar tchannel.Registrar) { handler := func(ctx context.Context, call *tchannel.InboundCall) { req, err := thttp.ReadRequest(call) if err != nil { registrar.Logger().WithFields( tchannel.LogField{Key: "err", Value: err.Error()}, ).Warn("Failed to read HTTP request.") return } serveHTTP(req, call.Response()) } registrar.Register(tchannel.HandlerFunc(handler), "_pprof") }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_zfs.go#L2345-L2471
func (s *storageZfs) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error { logger.Debugf("Creating ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name) poolName := s.getOnDiskPoolName() imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint) fs := fmt.Sprintf("images/%s", fingerprint) revert := true subrevert := true err := s.createImageDbPoolVolume(fingerprint) if err != nil { return err } defer func() { if !subrevert { return } s.deleteImageDbPoolVolume(fingerprint) }() if zfsFilesystemEntityExists(poolName, fmt.Sprintf("deleted/%s", fs)) { if err := zfsPoolVolumeRename(poolName, fmt.Sprintf("deleted/%s", fs), fs, true); err != nil { return err } defer func() { if !revert { return } s.ImageDelete(fingerprint) }() // In case this is an image from an older lxd instance, wipe the // mountpoint. err = zfsPoolVolumeSet(poolName, fs, "mountpoint", "none") if err != nil { return err } revert = false subrevert = false return nil } if !shared.PathExists(imageMntPoint) { err := os.MkdirAll(imageMntPoint, 0700) if err != nil { return err } defer func() { if !subrevert { return } os.RemoveAll(imageMntPoint) }() } // Create temporary mountpoint directory. tmp := getImageMountPoint(s.pool.Name, "") tmpImageDir, err := ioutil.TempDir(tmp, "") if err != nil { return err } defer os.RemoveAll(tmpImageDir) imagePath := shared.VarPath("images", fingerprint) // Create a new storage volume on the storage pool for the image. dataset := fmt.Sprintf("%s/%s", poolName, fs) msg, err := zfsPoolVolumeCreate(dataset, "mountpoint=none") if err != nil { logger.Errorf("Failed to create ZFS dataset \"%s\" on storage pool \"%s\": %s", dataset, s.pool.Name, msg) return err } subrevert = false defer func() { if !revert { return } s.ImageDelete(fingerprint) }() // Set a temporary mountpoint for the image. err = zfsPoolVolumeSet(poolName, fs, "mountpoint", tmpImageDir) if err != nil { return err } // Make sure that the image actually got mounted. if !shared.IsMountPoint(tmpImageDir) { zfsMount(poolName, fs) } // Unpack the image into the temporary mountpoint. err = unpackImage(imagePath, tmpImageDir, storageTypeZfs, s.s.OS.RunningInUserNS, nil) if err != nil { return err } // Mark the new storage volume for the image as readonly. if err = zfsPoolVolumeSet(poolName, fs, "readonly", "on"); err != nil { return err } // Remove the temporary mountpoint from the image storage volume. if err = zfsPoolVolumeSet(poolName, fs, "mountpoint", "none"); err != nil { return err } // Make sure that the image actually got unmounted. if shared.IsMountPoint(tmpImageDir) { zfsUmount(poolName, fs, tmpImageDir) } // Create a snapshot of that image on the storage pool which we clone for // container creation. err = zfsPoolVolumeSnapshotCreate(poolName, fs, "readonly") if err != nil { return err } revert = false logger.Debugf("Created ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name) return nil }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd-p2c/transfer.go#L52-L139
func rsyncSendSetup(path string, rsyncArgs string) (*exec.Cmd, net.Conn, io.ReadCloser, error) { auds := fmt.Sprintf("@lxd-p2c/%s", uuid.NewRandom().String()) if len(auds) > shared.ABSTRACT_UNIX_SOCK_LEN-1 { auds = auds[:shared.ABSTRACT_UNIX_SOCK_LEN-1] } l, err := net.Listen("unix", auds) if err != nil { return nil, nil, nil, err } execPath, err := os.Readlink("/proc/self/exe") if err != nil { return nil, nil, nil, err } if !shared.PathExists(execPath) { execPath = os.Args[0] } rsyncCmd := fmt.Sprintf("sh -c \"%s netcat %s\"", execPath, auds) args := []string{ "-ar", "--devices", "--numeric-ids", "--partial", "--sparse", "--xattrs", "--delete", "--compress", "--compress-level=2", } // Ignore deletions (requires 3.1 or higher) rsyncCheckVersion := func(min string) bool { out, err := shared.RunCommand("rsync", "--version") if err != nil { return false } fields := strings.Split(out, " ") curVer, err := version.Parse(fields[3]) if err != nil { return false } minVer, err := version.Parse(min) if err != nil { return false } return curVer.Compare(minVer) >= 0 } if rsyncCheckVersion("3.1.0") { args = append(args, "--ignore-missing-args") } if rsyncArgs != "" { args = append(args, strings.Split(rsyncArgs, " ")...) } args = append(args, []string{path, "localhost:/tmp/foo"}...) args = append(args, []string{"-e", rsyncCmd}...) cmd := exec.Command("rsync", args...) cmd.Stdout = os.Stderr stderr, err := cmd.StderrPipe() if err != nil { return nil, nil, nil, err } if err := cmd.Start(); err != nil { return nil, nil, nil, err } conn, err := l.Accept() if err != nil { cmd.Process.Kill() cmd.Wait() return nil, nil, nil, err } l.Close() return cmd, conn, stderr, nil }
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/protocol_crypto.go#L85-L87
func (sender *gobTCPSender) Send(msg []byte) error { return sender.encoder.Encode(msg) }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/cluster/config.go#L68-L73
func (c *Config) CandidServer() (string, string, int64, string) { return c.m.GetString("candid.api.url"), c.m.GetString("candid.api.key"), c.m.GetInt64("candid.expiry"), c.m.GetString("candid.domains") }
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L7423-L7430
func (r *PlacementGroup) Locator(api *API) *PlacementGroupLocator { for _, l := range r.Links { if l["rel"] == "self" { return api.PlacementGroupLocator(l["href"]) } } return nil }
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/version/client.go#L52-L54
func PrettyPrintVersionNoAdditional(version *pb.Version) string { return fmt.Sprintf("%d.%d.%d", version.Major, version.Minor, version.Micro) }
https://github.com/segmentio/nsq-go/blob/ff4eef968f46eb580d9dba4f637c5dfb1e5b2208/message.go#L33-L45
func (id MessageID) WriteTo(w io.Writer) (int64, error) { a := [16]byte{} b := strconv.AppendUint(a[:0], uint64(id), 16) n := len(a) - len(b) copy(a[n:], b) for i := 0; i != n; i++ { a[i] = '0' } c, e := w.Write(a[:]) return int64(c), e }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/easyjson.go#L5842-L5846
func (v *EventWebSocketClosed) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjsonC5a4559bDecodeGithubComChromedpCdprotoNetwork46(&r, v) return r.Error() }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/server.go#L21-L98
func (a *App) Serve(srvs ...servers.Server) error { a.Logger.Infof("Starting application at %s", a.Options.Addr) payload := events.Payload{ "app": a, } if err := events.EmitPayload(EvtAppStart, payload); err != nil { return err } if len(srvs) == 0 { if strings.HasPrefix(a.Options.Addr, "unix:") { tcp, err := servers.UnixSocket(a.Options.Addr[5:]) if err != nil { return err } srvs = append(srvs, tcp) } else { srvs = append(srvs, servers.New()) } } ctx, cancel := sigtx.WithCancel(a.Context, syscall.SIGTERM, os.Interrupt) defer cancel() go func() { // gracefully shut down the application when the context is cancelled <-ctx.Done() a.Logger.Info("Shutting down application") events.EmitError(EvtAppStop, ctx.Err(), payload) if err := a.Stop(ctx.Err()); err != nil { events.EmitError(EvtAppStopErr, err, payload) a.Logger.Error(err) } if !a.WorkerOff { // stop the workers a.Logger.Info("Shutting down worker") events.EmitPayload(EvtWorkerStop, payload) if err := a.Worker.Stop(); err != nil { events.EmitError(EvtWorkerStopErr, err, payload) a.Logger.Error(err) } } for _, s := range srvs { if err := s.Shutdown(ctx); err != nil { a.Logger.Error(err) } } }() // if configured to do so, start the workers if !a.WorkerOff { go func() { events.EmitPayload(EvtWorkerStart, payload) if err := a.Worker.Start(ctx); err != nil { a.Stop(err) } }() } for _, s := range srvs { s.SetAddr(a.Addr) go func(s servers.Server) { if err := s.Start(ctx, a); err != nil { a.Stop(err) } }(s) } <-ctx.Done() return a.Context.Err() }
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/storage/storage_image.go#L520-L552
func (s *storageImageDestination) computeID(m manifest.Manifest) string { // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. var diffIDs []digest.Digest switch m := m.(type) { case *manifest.Schema1: // Build a list of the diffIDs we've generated for the non-throwaway FS layers, // in reverse of the order in which they were originally listed. for i, compat := range m.ExtractedV1Compatibility { if compat.ThrowAway { continue } blobSum := m.FSLayers[i].BlobSum diffID, ok := s.blobDiffIDs[blobSum] if !ok { logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } case *manifest.Schema2, *manifest.OCI1: // We know the ID calculation for these formats doesn't actually use the diffIDs, // so we don't need to populate the diffID list. default: return "" } id, err := m.ImageID(diffIDs) if err != nil { return "" } return id }
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/cluster/update.go#L688-L793
func updateFromV5(tx *sql.Tx) error { // Fetch the IDs of all existing nodes. nodeIDs, err := query.SelectIntegers(tx, "SELECT id FROM nodes") if err != nil { return errors.Wrap(err, "failed to get IDs of current nodes") } // Fetch the IDs of all existing ceph volumes. volumeIDs, err := query.SelectIntegers(tx, ` SELECT storage_volumes.id FROM storage_volumes JOIN storage_pools ON storage_volumes.storage_pool_id=storage_pools.id WHERE storage_pools.driver='ceph' `) if err != nil { return errors.Wrap(err, "failed to get IDs of current ceph volumes") } // Fetch all existing ceph volumes. volumes := make([]struct { ID int Name string StoragePoolID int NodeID int Type int Description string }, len(volumeIDs)) sql := ` SELECT storage_volumes.id, storage_volumes.name, storage_volumes.storage_pool_id, storage_volumes.node_id, storage_volumes.type, storage_volumes.description FROM storage_volumes JOIN storage_pools ON storage_volumes.storage_pool_id=storage_pools.id WHERE storage_pools.driver='ceph' ` stmt, err := tx.Prepare(sql) if err != nil { return err } defer stmt.Close() err = query.SelectObjects(stmt, func(i int) []interface{} { return []interface{}{ &volumes[i].ID, &volumes[i].Name, &volumes[i].StoragePoolID, &volumes[i].NodeID, &volumes[i].Type, &volumes[i].Description, } }) if err != nil { return errors.Wrap(err, "failed to fetch current volumes") } // Duplicate each volume row across all nodes, and keep track of the // new volume IDs that we've inserted. created := make(map[int][]int64, 0) // Existing volume ID to new volumes IDs. columns := []string{"name", "storage_pool_id", "node_id", "type", "description"} for _, volume := range volumes { for _, nodeID := range nodeIDs { if volume.NodeID == nodeID { // This node already has the volume row continue } values := []interface{}{ volume.Name, volume.StoragePoolID, nodeID, volume.Type, volume.Description, } id, err := query.UpsertObject(tx, "storage_volumes", columns, values) if err != nil { return errors.Wrap(err, "failed to insert new volume") } _, ok := created[volume.ID] if !ok { created[volume.ID] = make([]int64, 0) } created[volume.ID] = append(created[volume.ID], id) } } // Duplicate each volume config row across all nodes. for id, newIDs := range created { config, err := query.SelectConfig(tx, "storage_volumes_config", "storage_volume_id=?", id) if err != nil { errors.Wrap(err, "failed to fetch volume config") } for _, newID := range newIDs { for key, value := range config { _, err := tx.Exec(` INSERT INTO storage_volumes_config(storage_volume_id, key, value) VALUES(?, ?, ?) `, newID, key, value) if err != nil { return errors.Wrap(err, "failed to insert new volume config") } } } } return nil }
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/indexeddb/easyjson.go#L1715-L1719
func (v DeleteObjectStoreEntriesParams) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjsonC5a4559bEncodeGithubComChromedpCdprotoIndexeddb15(&w, v) return w.Buffer.BuildBytes(), w.Error }