_id
stringlengths 86
170
| text
stringlengths 54
39.3k
|
|---|---|
https://github.com/golang/debug/blob/19561fee47cf8cd0400d1b094c5898002f97cf90/internal/gocore/dominator.go#L269-L275
|
func (d *ltDom) eval(v vName) vName {
if d.ancestor[v] == -1 {
return v
}
d.compress(v)
return d.labels[v]
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/pretty/pretty.go#L38-L41
|
func Duration(d *types.Duration) string {
duration, _ := types.DurationFromProto(d)
return units.HumanDuration(duration)
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/clonerefs/options.go#L176-L183
|
func (r *gitRefs) Set(value string) error {
gitRef, err := ParseRefs(value)
if err != nil {
return err
}
r.gitRefs = append(r.gitRefs, *gitRef)
return nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/devlxd.go#L397-L407
|
func extractUnderlyingConn(w http.ResponseWriter) *net.UnixConn {
v := reflect.Indirect(reflect.ValueOf(w))
connPtr := v.FieldByName("conn")
conn := reflect.Indirect(connPtr)
rwc := conn.FieldByName("rwc")
netConnPtr := (*net.Conn)(unsafe.Pointer(rwc.UnsafeAddr()))
unixConnPtr := (*netConnPtr).(*net.UnixConn)
return unixConnPtr
}
|
https://github.com/sclevine/agouti/blob/96599c91888f1b1cf2dccc7f1776ba7f511909e5/page.go#L502-L519
|
func (p *Page) Click(event Click, button Button) error {
var err error
switch event {
case SingleClick:
err = p.session.Click(api.Button(button))
case HoldClick:
err = p.session.ButtonDown(api.Button(button))
case ReleaseClick:
err = p.session.ButtonUp(api.Button(button))
default:
err = errors.New("invalid touch event")
}
if err != nil {
return fmt.Errorf("failed to %s %s: %s", event, button, err)
}
return nil
}
|
https://github.com/hooklift/govix/blob/063702285520a992b920fc1575e305dc9ffd6ffe/vm.go#L33-L51
|
func NewVirtualMachine(handle C.VixHandle, vmxpath string) (*VM, error) {
vmxfile := &VMXFile{
path: vmxpath,
}
// Loads VMX file in memory
err := vmxfile.Read()
if err != nil {
return nil, err
}
vm := &VM{
handle: handle,
vmxfile: vmxfile,
}
runtime.SetFinalizer(vm, cleanupVM)
return vm, nil
}
|
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/exp/cpp/export/export.go#L442-L450
|
func FileSequence_Frame_Int(id FileSeqId, frame int) *C.char {
fs, ok := sFileSeqs.Get(id)
// caller must free string
if !ok {
return C.CString("")
}
frameStr, _ := fs.Frame(frame)
return C.CString(frameStr)
}
|
https://github.com/pact-foundation/pact-go/blob/467dea56d27e154363e1975f6e9f4dbf66148e79/client/service_manager.go#L104-L111
|
func (s *ServiceManager) Command() *exec.Cmd {
cmd := exec.Command(s.Cmd, s.Args...)
env := os.Environ()
env = append(env, s.Env...)
cmd.Env = env
return cmd
}
|
https://github.com/kokardy/listing/blob/795534c33c5ab6be8b85a15951664ab11fb70ea7/perm.go#L66-L84
|
func repeated_permutations(list []int, select_num, buf int) (c chan []int) {
c = make(chan []int, buf)
go func() {
defer close(c)
switch select_num {
case 1:
for _, v := range list {
c <- []int{v}
}
default:
for i := 0; i < len(list); i++ {
for perm := range repeated_permutations(list, select_num-1, buf) {
c <- append([]int{list[i]}, perm...)
}
}
}
}()
return
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/app/app.go#L1644-L1654
|
func (app *App) AddCName(cnames ...string) error {
actions := []*action.Action{
&validateNewCNames,
&setNewCNamesToProvisioner,
&saveCNames,
&updateApp,
}
err := action.NewPipeline(actions...).Execute(app, cnames)
rebuild.RoutesRebuildOrEnqueue(app.Name)
return err
}
|
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/internal/api_common.go#L93-L95
|
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
return withNamespace(ctx, namespace)
}
|
https://github.com/siddontang/go-log/blob/1e957dd83bed18c84716181da7b80d4af48eaefe/log/log.go#L26-L29
|
func Fatal(args ...interface{}) {
logger.Output(2, LevelFatal, fmt.Sprint(args...))
os.Exit(1)
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/provision/docker/handlers.go#L56-L100
|
func moveContainerHandler(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {
params := map[string]string{}
err = api.ParseInput(r, ¶ms)
if err != nil {
return err
}
contId := r.URL.Query().Get(":id")
to := params["to"]
if to == "" {
return &tsuruErrors.ValidationError{Message: fmt.Sprintf("Invalid params: id: %q - to: %q", contId, to)}
}
cont, err := mainDockerProvisioner.GetContainer(contId)
if err != nil {
return &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
permContexts, err := moveContainersPermissionContexts(cont.HostAddr, to)
if err != nil {
return err
}
if !permission.Check(t, permission.PermNodeUpdateMoveContainer, permContexts...) {
return permission.ErrUnauthorized
}
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeContainer, Value: contId},
Kind: permission.PermNodeUpdateMoveContainer,
Owner: t,
CustomData: event.FormToCustomData(r.Form),
Allowed: event.Allowed(permission.PermPoolReadEvents, permContexts...),
})
if err != nil {
return err
}
defer func() { evt.Done(err) }()
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
evt.SetLogWriter(writer)
_, err = mainDockerProvisioner.moveContainer(contId, to, evt)
if err != nil {
return errors.Wrap(err, "Error trying to move container")
}
fmt.Fprintf(writer, "Containers moved successfully!\n")
return nil
}
|
https://github.com/kpango/glg/blob/68d2670cb2dbff047331daad841149a82ac37796/glg.go#L570-L586
|
func (g *Glg) HTTPLoggerFunc(name string, hf http.HandlerFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := fastime.Now()
hf(w, r)
err := g.Logf("Method: %s\tURI: %s\tName: %s\tTime: %s",
r.Method, r.RequestURI, name, fastime.Now().Sub(start).String())
if err != nil {
err = g.Error(err)
if err != nil {
fmt.Println(err)
}
}
})
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/fragmenting_writer.go#L246-L296
|
func (w *fragmentingWriter) Close() error {
last := w.state == fragmentingWriteInLastArgument
if w.err != nil {
return w.err
}
if !w.state.isWritingArgument() {
w.err = errNotWritingArgument
return w.err
}
w.curChunk.finish()
// There are three possibilities here:
// 1. There are no more arguments
// flush with more_fragments=false, mark the stream as complete
// 2. There are more arguments, but we can't fit more data into this fragment
// flush with more_fragments=true, start new fragment, write empty chunk to indicate
// the current argument is complete
// 3. There are more arguments, and we can fit more data into this fragment
// update the chunk but leave the current fragment open
if last {
// No more arguments - flush this final fragment and mark ourselves complete
w.state = fragmentingWriteComplete
w.curFragment.finish(false)
w.err = w.sender.flushFragment(w.curFragment)
w.sender.doneSending()
return w.err
}
w.state = fragmentingWriteWaitingForArgument
if w.curFragment.contents.BytesRemaining() > chunkHeaderSize {
// There's enough room in this fragment for the next argument's
// initial chunk, so we're done here
return nil
}
// This fragment is full - flush and prepare for another argument
w.curFragment.finish(true)
if w.err = w.sender.flushFragment(w.curFragment); w.err != nil {
return w.err
}
if w.curFragment, w.err = w.sender.newFragment(false, w.checksum); w.err != nil {
return w.err
}
// Write an empty chunk to indicate this argument has ended
w.curFragment.contents.WriteUint16(0)
return nil
}
|
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/example/models.go#L58-L70
|
func (i *Item) Validate() error {
// check name
if utf8.RuneCountInString(i.Name) < 1 {
return fire.E("missing name")
}
// check timestamps
if i.Created.IsZero() || i.Updated.IsZero() {
return fire.E("missing timestamp")
}
return nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/zap_grpc.go#L26-L32
|
func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
if err != nil {
return nil, err
}
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/robots/issue-creator/sources/flakyjob-reporter.go#L165-L167
|
func (fj *FlakyJob) Title() string {
return fmt.Sprintf("%s flaked %d times in the past week", fj.Name, *fj.FlakeCount)
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/praxisgen/action_analysis.go#L275-L291
|
func toPattern(verb, path string) *gen.PathPattern {
pattern := gen.PathPattern{
HTTPMethod: verb,
Path: path,
Pattern: pathVariablesRegexp.ReplaceAllLiteralString(path, "/%s"),
Regexp: pathVariablesRegexp.ReplaceAllLiteralString(regexp.QuoteMeta(path),
`/([^/]+)`),
}
matches := pathVariablesRegexp.FindAllStringSubmatch(path, -1)
if len(matches) > 0 {
pattern.Variables = make([]string, len(matches))
for i, m := range matches {
pattern.Variables[i] = m[1]
}
}
return &pattern
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L665-L668
|
func (e AccountFlags) String() string {
name, _ := accountFlagsMap[int32(e)]
return name
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gerrit/client/client.go#L225-L237
|
func (c *Client) GetBranchRevision(instance, project, branch string) (string, error) {
h, ok := c.handlers[instance]
if !ok {
return "", fmt.Errorf("not activated gerrit instance: %s", instance)
}
res, _, err := h.projectService.GetBranch(project, branch)
if err != nil {
return "", err
}
return res.Revision, nil
}
|
https://github.com/segmentio/nsq-go/blob/ff4eef968f46eb580d9dba4f637c5dfb1e5b2208/consumer.go#L114-L122
|
func StartConsumer(config ConsumerConfig) (c *Consumer, err error) {
c, err = NewConsumer(config)
if err != nil {
return
}
c.Start()
return
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/releasenote/releasenote.go#L308-L321
|
func determineReleaseNoteLabel(body string) string {
composedReleaseNote := strings.ToLower(strings.TrimSpace(getReleaseNote(body)))
if composedReleaseNote == "" {
return ReleaseNoteLabelNeeded
}
if noneRe.MatchString(composedReleaseNote) {
return releaseNoteNone
}
if strings.Contains(composedReleaseNote, actionRequiredNote) {
return releaseNoteActionRequired
}
return releaseNote
}
|
https://github.com/Knetic/govaluate/blob/9aa49832a739dcd78a5542ff189fb82c3e423116/stagePlanner.go#L181-L196
|
func planStages(tokens []ExpressionToken) (*evaluationStage, error) {
stream := newTokenStream(tokens)
stage, err := planTokens(stream)
if err != nil {
return nil, err
}
// while we're now fully-planned, we now need to re-order same-precedence operators.
// this could probably be avoided with a different planning method
reorderStages(stage)
stage = elideLiterals(stage)
return stage, nil
}
|
https://github.com/naoina/genmai/blob/78583835e1e41e3938e1ddfffd7101f8ad27fae0/genmai.go#L1204-L1206
|
func (c *Condition) Offset(offset int) *Condition {
return c.appendQuery(700, Offset, offset)
}
|
https://github.com/antlinker/go-dirtyfilter/blob/533f538ffaa112776b1258c3db63e6f55648e18b/nodefilter.go#L48-L56
|
func NewNodeFilter(text []string) DirtyFilter {
nf := &nodeFilter{
root: newNode(),
}
for i, l := 0, len(text); i < l; i++ {
nf.addDirtyWords(text[i])
}
return nf
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/serviceworker/easyjson.go#L962-L966
|
func (v EventWorkerVersionUpdated) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoServiceworker10(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L6409-L6413
|
func (v EventDomContentEventFired) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage68(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/tarfile/src.go#L262-L267
|
func (s *Source) Close() error {
if s.removeTarPathOnClose {
return os.Remove(s.tarPath)
}
return nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/pkg/ghclient/core.go#L123-L146
|
func (c *Client) depaginate(action string, opts *github.ListOptions, call func() ([]interface{}, *github.Response, error)) ([]interface{}, error) {
var allItems []interface{}
wrapper := func() (*github.Response, error) {
items, resp, err := call()
if err == nil {
allItems = append(allItems, items...)
}
return resp, err
}
opts.Page = 1
opts.PerPage = 100
lastPage := 1
for ; opts.Page <= lastPage; opts.Page++ {
resp, err := c.retry(action, wrapper)
if err != nil {
return allItems, fmt.Errorf("error while depaginating page %d/%d: %v", opts.Page, lastPage, err)
}
if resp.LastPage > 0 {
lastPage = resp.LastPage
}
}
return allItems, nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L4961-L4968
|
func (r *IpAddressBinding) Locator(api *API) *IpAddressBindingLocator {
for _, l := range r.Links {
if l["rel"] == "self" {
return api.IpAddressBindingLocator(l["href"])
}
}
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/emulation/emulation.go#L512-L515
|
func (p SetVirtualTimePolicyParams) WithBudget(budget float64) *SetVirtualTimePolicyParams {
p.Budget = budget
return &p
}
|
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L1024-L1041
|
func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
// Check for valid offset if we are reading to writable log.
maxFid := atomic.LoadUint32(&vlog.maxFid)
if vp.Fid == maxFid && vp.Offset >= vlog.woffset() {
return nil, nil, errors.Errorf(
"Invalid value pointer offset: %d greater than current offset: %d",
vp.Offset, vlog.woffset())
}
buf, cb, err := vlog.readValueBytes(vp, s)
if err != nil {
return nil, cb, err
}
var h header
h.Decode(buf)
n := uint32(headerBufSize) + h.klen
return buf[n : n+h.vlen], cb, nil
}
|
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/vm/state.go#L119-L125
|
func (st *State) CurrentMark() int {
x, err := st.markstack.Top()
if err != nil {
x = 0
}
return x.(int)
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/copy/copy.go#L771-L890
|
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
}
var destStream io.Reader = digestingReader
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
isCompressed := decompressor != nil
destStream = bar.ProxyReader(destStream)
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
originalLayerReader = destStream
}
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we don’t care.
go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
destStream = s
inputInfo.Digest = ""
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
}
// === Report progress using the c.progress channel, if required.
if c.progress != nil && c.progressInterval > 0 {
destStream = &progressReader{
source: destStream,
channel: c.progress,
interval: c.progressInterval,
artifact: srcInfo,
lastTime: time.Now(),
}
}
// === Finally, send the layer stream to dest.
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(ioutil.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
// (because inputInfo.Digest == "", this must have been computed afresh).
switch compressionOperation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
}
}
return uploadedInfo, nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/endpoints/network.go#L35-L40
|
func (e *Endpoints) NetworkCert() *shared.CertInfo {
e.mu.RLock()
defer e.mu.RUnlock()
return e.cert
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pfs/server/obj_block_api_server.go#L69-L110
|
func newObjBlockAPIServer(dir string, cacheBytes int64, etcdAddress string, objClient obj.Client, test bool) (*objBlockAPIServer, error) {
// defensive measure to make sure storage is working and error early if it's not
// this is where we'll find out if the credentials have been misconfigured
if err := obj.TestStorage(context.Background(), objClient); err != nil {
return nil, err
}
oneCacheShare := cacheBytes / (objectCacheShares + tagCacheShares + objectInfoCacheShares + blockCacheShares)
s := &objBlockAPIServer{
Logger: log.NewLogger("pfs.BlockAPI.Obj"),
dir: dir,
objClient: objClient,
objectIndexes: make(map[string]*pfsclient.ObjectIndex),
objectCacheBytes: oneCacheShare * objectCacheShares,
}
objectGroupName := "object"
tagGroupName := "tag"
objectInfoGroupName := "objectInfo"
blockGroupName := "block"
if test {
uuid := uuid.New()
objectGroupName += uuid
tagGroupName += uuid
objectInfoGroupName += uuid
blockGroupName += uuid
}
s.objectCache = groupcache.NewGroup(objectGroupName, oneCacheShare*objectCacheShares, groupcache.GetterFunc(s.objectGetter))
s.tagCache = groupcache.NewGroup(tagGroupName, oneCacheShare*tagCacheShares, groupcache.GetterFunc(s.tagGetter))
s.objectInfoCache = groupcache.NewGroup(objectInfoGroupName, oneCacheShare*objectInfoCacheShares, groupcache.GetterFunc(s.objectInfoGetter))
s.blockCache = groupcache.NewGroup(blockGroupName, oneCacheShare*blockCacheShares, groupcache.GetterFunc(s.blockGetter))
if !test {
RegisterCacheStats("tag", &s.tagCache.Stats)
RegisterCacheStats("object", &s.objectCache.Stats)
RegisterCacheStats("object_info", &s.objectInfoCache.Stats)
}
go s.watchGC(etcdAddress)
return s, nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_ceph_utils.go#L1078-L1239
|
func cephContainerSnapshotDelete(clusterName string, poolName string,
volumeName string, volumeType string, snapshotName string,
userName string) int {
logImageEntry := fmt.Sprintf("%s/%s_%s", poolName, volumeType, volumeName)
logSnapshotEntry := fmt.Sprintf("%s/%s_%s@%s", poolName, volumeType,
volumeName, snapshotName)
clones, err := cephRBDSnapshotListClones(clusterName, poolName,
volumeName, volumeType, snapshotName, userName)
if err != nil {
if err != db.ErrNoSuchObject {
logger.Errorf(`Failed to list clones of RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`RBD snapshot "%s" of RBD storage volume "%s" does not have any clones`, logSnapshotEntry, logImageEntry)
// unprotect
err = cephRBDSnapshotUnprotect(clusterName, poolName, volumeName,
volumeType, snapshotName, userName)
if err != nil {
logger.Errorf(`Failed to unprotect RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Unprotected RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// unmap
err = cephRBDVolumeSnapshotUnmap(clusterName, poolName,
volumeName, volumeType, snapshotName, userName, true)
if err != nil {
logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// delete
err = cephRBDSnapshotDelete(clusterName, poolName, volumeName,
volumeType, snapshotName, userName)
if err != nil {
logger.Errorf(`Failed to delete RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Deleted RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// Only delete the parent image if it is a zombie. If it is not
// we know that LXD is still using it.
if strings.HasPrefix(volumeType, "zombie_") {
ret := cephContainerDelete(clusterName, poolName,
volumeName, volumeType, userName)
if ret < 0 {
logger.Errorf(`Failed to delete RBD storage volume "%s"`,
logImageEntry)
return -1
}
logger.Debugf(`Deleted RBD storage volume "%s"`, logImageEntry)
}
return 0
} else {
logger.Debugf(`Detected "%v" as clones of RBD snapshot "%s" of RBD storage volume "%s"`, clones, logSnapshotEntry, logImageEntry)
canDelete := true
for _, clone := range clones {
clonePool, cloneType, cloneName, err := parseClone(clone)
if err != nil {
logger.Errorf(`Failed to parse clone "%s" of RBD snapshot "%s" of RBD storage volume "%s"`, clone, logSnapshotEntry, logImageEntry)
return -1
}
logger.Debugf(`Split clone "%s" of RBD snapshot "%s" of RBD storage volume "%s" into pool name "%s", volume type "%s", and volume name "%s"`, clone, logSnapshotEntry, logImageEntry, clonePool, cloneType, cloneName)
if !strings.HasPrefix(cloneType, "zombie_") {
canDelete = false
continue
}
ret := cephContainerDelete(clusterName, clonePool,
cloneName, cloneType, userName)
if ret < 0 {
logger.Errorf(`Failed to delete clone "%s" of RBD snapshot "%s" of RBD storage volume "%s"`, clone, logSnapshotEntry, logImageEntry)
return -1
} else if ret == 1 {
// Only marked as zombie
canDelete = false
}
}
if canDelete {
logger.Debugf(`Deleted all clones of RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// unprotect
err = cephRBDSnapshotUnprotect(clusterName, poolName,
volumeName, volumeType, snapshotName, userName)
if err != nil {
logger.Errorf(`Failed to unprotect RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Unprotected RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// unmap
err = cephRBDVolumeSnapshotUnmap(clusterName, poolName,
volumeName, volumeType, snapshotName, userName,
true)
if err != nil {
logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// delete
err = cephRBDSnapshotDelete(clusterName, poolName,
volumeName, volumeType, snapshotName, userName)
if err != nil {
logger.Errorf(`Failed to delete RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debugf(`Deleted RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
// Only delete the parent image if it is a zombie. If it
// is not we know that LXD is still using it.
if strings.HasPrefix(volumeType, "zombie_") {
ret := cephContainerDelete(clusterName,
poolName, volumeName, volumeType,
userName)
if ret < 0 {
logger.Errorf(`Failed to delete RBD storage volume "%s"`, logImageEntry)
return -1
}
logger.Debugf(`Deleted RBD storage volume "%s"`,
logImageEntry)
}
} else {
logger.Debugf(`Could not delete all clones of RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
if strings.HasPrefix(snapshotName, "zombie_") {
return 1
}
err := cephRBDVolumeSnapshotUnmap(clusterName, poolName,
volumeName, volumeType, snapshotName, userName,
true)
if err != nil {
logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
return -1
}
logger.Debug(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
newSnapshotName := fmt.Sprintf("zombie_%s", snapshotName)
logSnapshotNewEntry := fmt.Sprintf("%s/%s_%s@%s",
poolName, volumeName, volumeType, newSnapshotName)
err = cephRBDVolumeSnapshotRename(clusterName, poolName,
volumeName, volumeType, snapshotName,
newSnapshotName, userName)
if err != nil {
logger.Errorf(`Failed to rename RBD snapshot "%s" of RBD storage volume "%s" to %s`, logSnapshotEntry, logImageEntry, logSnapshotNewEntry)
return -1
}
logger.Debugf(`Renamed RBD snapshot "%s" of RBD storage volume "%s" to %s`, logSnapshotEntry, logImageEntry, logSnapshotNewEntry)
}
}
return 1
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/hook/server.go#L164-L190
|
func (s *Server) needDemux(eventType, srcRepo string) []plugins.ExternalPlugin {
var matching []plugins.ExternalPlugin
srcOrg := strings.Split(srcRepo, "/")[0]
for repo, plugins := range s.Plugins.Config().ExternalPlugins {
// Make sure the repositories match
if repo != srcRepo && repo != srcOrg {
continue
}
// Make sure the events match
for _, p := range plugins {
if len(p.Events) == 0 {
matching = append(matching, p)
} else {
for _, et := range p.Events {
if et != eventType {
continue
}
matching = append(matching, p)
break
}
}
}
}
return matching
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L234-L277
|
func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
PermitWithoutStream: c.cfg.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
// Provide a net dialer that supports cancelation and timeout.
f := func(dialEp string, t time.Duration) (net.Conn, error) {
proto, host, _ := endpoint.ParseEndpoint(dialEp)
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
}
opts = append(opts, grpc.WithDialer(f))
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
// once it is available.
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
opts = append(opts,
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
// Streams that are safe to retry are enabled individually.
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
)
return opts, nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/ss/ssd/codegen_client.go#L389-L391
|
func (api *API) TemplateLocator(href string) *TemplateLocator {
return &TemplateLocator{Href(href), api}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/queue.go#L43-L77
|
func (q *Queue) Dequeue() (string, error) {
// TODO: fewer round trips by fetching more than one key
resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...)
if err != nil {
return "", err
}
kv, err := claimFirstKey(q.client, resp.Kvs)
if err != nil {
return "", err
} else if kv != nil {
return string(kv.Value), nil
} else if resp.More {
// missed some items, retry to read in more
return q.Dequeue()
}
// nothing yet; wait on elements
ev, err := WaitPrefixEvents(
q.client,
q.keyPrefix,
resp.Header.Revision,
[]mvccpb.Event_EventType{mvccpb.PUT})
if err != nil {
return "", err
}
ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision)
if err != nil {
return "", err
} else if !ok {
return q.Dequeue()
}
return string(ev.Kv.Value), err
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/pkg/sysregistriesv2/system_registries_v2.go#L405-L408
|
func readRegistryConf(configPath string) ([]byte, error) {
configBytes, err := ioutil.ReadFile(configPath)
return configBytes, err
}
|
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/french/step1.go#L9-L255
|
func step1(word *snowballword.SnowballWord) bool {
suffix, suffixRunes := word.FirstSuffix(
"issements", "issement", "atrices", "utions", "usions", "logies",
"emment", "ements", "atrice", "ations", "ateurs", "amment", "ution",
"usion", "ments", "logie", "istes", "ismes", "iqUes", "euses",
"ences", "ement", "ation", "ateur", "ances", "ables", "ment",
"ités", "iste", "isme", "iqUe", "euse", "ence", "eaux", "ance",
"able", "ives", "ité", "eux", "aux", "ive", "ifs", "if",
)
if suffix == "" {
return false
}
isInR1 := (word.R1start <= len(word.RS)-len(suffixRunes))
isInR2 := (word.R2start <= len(word.RS)-len(suffixRunes))
isInRV := (word.RVstart <= len(word.RS)-len(suffixRunes))
// Handle simple replacements & deletions in R2 first
if isInR2 {
// Handle simple replacements in R2
repl := ""
switch suffix {
case "logie", "logies":
repl = "log"
case "usion", "ution", "usions", "utions":
repl = "u"
case "ence", "ences":
repl = "ent"
}
if repl != "" {
word.ReplaceSuffixRunes(suffixRunes, []rune(repl), true)
return true
}
// Handle simple deletions in R2
switch suffix {
case "ance", "iqUe", "isme", "able", "iste", "eux", "ances", "iqUes", "ismes", "ables", "istes":
word.RemoveLastNRunes(len(suffixRunes))
return true
}
}
// Handle simple replacements in RV
if isInRV {
// NOTE: these are "special" suffixes in that
// we must still do steps 2a and 2b of the
// French stemmer even when these suffixes are
// found in step1. Therefore, we are returning
// `false` here.
repl := ""
switch suffix {
case "amment":
repl = "ant"
case "emment":
repl = "ent"
}
if repl != "" {
word.ReplaceSuffixRunes(suffixRunes, []rune(repl), true)
return false
}
// Delete if preceded by a vowel that is also in RV
if suffix == "ment" || suffix == "ments" {
idx := len(word.RS) - len(suffixRunes) - 1
if idx >= word.RVstart && isLowerVowel(word.RS[idx]) {
word.RemoveLastNRunes(len(suffixRunes))
return false
}
return false
}
}
// Handle all the other "special" cases. All of these
// return true immediately after changing the word.
//
switch suffix {
case "eaux":
// Replace with eau
word.ReplaceSuffixRunes(suffixRunes, []rune("eau"), true)
return true
case "aux":
// Replace with al if in R1
if isInR1 {
word.ReplaceSuffixRunes(suffixRunes, []rune("al"), true)
return true
}
case "euse", "euses":
// Delete if in R2, else replace by eux if in R1
if isInR2 {
word.RemoveLastNRunes(len(suffixRunes))
return true
} else if isInR1 {
word.ReplaceSuffixRunes(suffixRunes, []rune("eux"), true)
return true
}
case "issement", "issements":
// Delete if in R1 and preceded by a non-vowel
if isInR1 {
idx := len(word.RS) - len(suffixRunes) - 1
if idx >= 0 && isLowerVowel(word.RS[idx]) == false {
word.RemoveLastNRunes(len(suffixRunes))
return true
}
}
return false
case "atrice", "ateur", "ation", "atrices", "ateurs", "ations":
// Delete if in R2
if isInR2 {
word.RemoveLastNRunes(len(suffixRunes))
// If preceded by "ic", delete if in R2, else replace by "iqU".
newSuffix, newSuffixRunes := word.FirstSuffix("ic")
if newSuffix != "" {
if word.FitsInR2(len(newSuffixRunes)) {
word.RemoveLastNRunes(len(newSuffixRunes))
} else {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("iqU"), true)
}
}
return true
}
case "ement", "ements":
if isInRV {
// Delete if in RV
word.RemoveLastNRunes(len(suffixRunes))
// If preceded by "iv", delete if in R2
// (and if further preceded by "at", delete if in R2)
newSuffix, newSuffixRunes := word.RemoveFirstSuffixIfIn(word.R2start, "iv")
if newSuffix != "" {
word.RemoveFirstSuffixIfIn(word.R2start, "at")
return true
}
// If preceded by "eus", delete if in R2, else replace by "eux" if in R1
newSuffix, newSuffixRunes = word.FirstSuffix("eus")
if newSuffix != "" {
newSuffixLen := len(newSuffixRunes)
if word.FitsInR2(newSuffixLen) {
word.RemoveLastNRunes(newSuffixLen)
} else if word.FitsInR1(newSuffixLen) {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("eux"), true)
}
return true
}
// If preceded by abl or iqU, delete if in R2, otherwise,
newSuffix, newSuffixRunes = word.FirstSuffix("abl", "iqU")
if newSuffix != "" {
newSuffixLen := len(newSuffixRunes)
if word.FitsInR2(newSuffixLen) {
word.RemoveLastNRunes(newSuffixLen)
}
return true
}
// If preceded by ièr or Ièr, replace by i if in RV
newSuffix, newSuffixRunes = word.FirstSuffix("ièr", "Ièr")
if newSuffix != "" {
if word.FitsInRV(len(newSuffixRunes)) {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("i"), true)
}
return true
}
return true
}
case "ité", "ités":
if isInR2 {
// Delete if in R2
word.RemoveLastNRunes(len(suffixRunes))
// If preceded by "abil", delete if in R2, else replace by "abl"
newSuffix, newSuffixRunes := word.FirstSuffix("abil")
if newSuffix != "" {
newSuffixLen := len(newSuffixRunes)
if word.FitsInR2(newSuffixLen) {
word.RemoveLastNRunes(newSuffixLen)
} else {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("abl"), true)
}
return true
}
// If preceded by "ic", delete if in R2, else replace by "iqU"
newSuffix, newSuffixRunes = word.FirstSuffix("ic")
if newSuffix != "" {
newSuffixLen := len(newSuffixRunes)
if word.FitsInR2(newSuffixLen) {
word.RemoveLastNRunes(newSuffixLen)
} else {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("iqU"), true)
}
return true
}
// If preceded by "iv", delete if in R2
newSuffix, newSuffixRunes = word.RemoveFirstSuffixIfIn(word.R2start, "iv")
return true
}
case "if", "ive", "ifs", "ives":
if isInR2 {
// Delete if in R2
word.RemoveLastNRunes(len(suffixRunes))
// If preceded by at, delete if in R2
newSuffix, newSuffixRunes := word.RemoveFirstSuffixIfIn(word.R2start, "at")
if newSuffix != "" {
// And if further preceded by ic, delete if in R2, else replace by iqU
newSuffix, newSuffixRunes = word.FirstSuffix("ic")
if newSuffix != "" {
newSuffixLen := len(newSuffixRunes)
if word.FitsInR2(newSuffixLen) {
word.RemoveLastNRunes(newSuffixLen)
} else {
word.ReplaceSuffixRunes(newSuffixRunes, []rune("iqU"), true)
}
}
}
return true
}
}
return false
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L868-L872
|
func (v SetFontSizesParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage9(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L339-L357
|
func (f *FakeClient) GetFile(org, repo, file, commit string) ([]byte, error) {
contents, ok := f.RemoteFiles[file]
if !ok {
return nil, fmt.Errorf("could not find file %s", file)
}
if commit == "" {
if master, ok := contents["master"]; ok {
return []byte(master), nil
}
return nil, fmt.Errorf("could not find file %s in master", file)
}
if content, ok := contents[commit]; ok {
return []byte(content), nil
}
return nil, fmt.Errorf("could not find file %s with ref %s", file, commit)
}
|
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/atomic_rbuf.go#L37-L41
|
func (b *AtomicFixedSizeRingBuf) Readable() int {
b.tex.Lock()
defer b.tex.Unlock()
return b.readable
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/docker_image_src.go#L150-L159
|
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return s.fetchManifest(ctx, instanceDigest.String())
}
err := s.ensureManifestIsLoaded(ctx)
if err != nil {
return nil, "", err
}
return s.cachedManifest, s.cachedManifestMIMEType, nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/util.go#L145-L154
|
func defrag(c *v3.Client, ep string) {
fmt.Printf("Defragmenting %q\n", ep)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := c.Defragment(ctx, ep)
cancel()
if err != nil {
ExitWithError(ExitError, err)
}
fmt.Printf("Defragmented %q\n", ep)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/overlay/easyjson.go#L1602-L1606
|
func (v *GetHighlightObjectForTestReturns) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoOverlay15(&r, v)
return r.Error()
}
|
https://github.com/mrd0ll4r/tbotapi/blob/edc257282178bb5cebbfcc41260ec04c1ec7ac19/outgoing.go#L357-L360
|
func (op *OutgoingUserProfilePhotosRequest) SetOffset(to int) *OutgoingUserProfilePhotosRequest {
op.Offset = to
return op
}
|
https://github.com/xwb1989/sqlparser/blob/120387863bf27d04bc07db8015110a6e96d0146c/normalizer.go#L62-L74
|
func (nz *normalizer) WalkStatement(node SQLNode) (bool, error) {
switch node := node.(type) {
case *Select:
_ = Walk(nz.WalkSelect, node)
// Don't continue
return false, nil
case *SQLVal:
nz.convertSQLVal(node)
case *ComparisonExpr:
nz.convertComparison(node)
}
return true, nil
}
|
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/loader/cache.go#L128-L134
|
func (c *FileCache) GetCachePath(key string) string {
// What's the best, portable way to remove make an absolute path into
// a relative path?
key = filepath.Clean(key)
key = strings.TrimPrefix(key, "/")
return filepath.Join(c.Dir, key)
}
|
https://github.com/sclevine/agouti/blob/96599c91888f1b1cf2dccc7f1776ba7f511909e5/webdriver.go#L60-L68
|
func (w *WebDriver) NewPage(options ...Option) (*Page, error) {
newOptions := w.defaultOptions.Merge(options)
session, err := w.Open(newOptions.Capabilities())
if err != nil {
return nil, fmt.Errorf("failed to connect to WebDriver: %s", err)
}
return newPage(session), nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/mkbuild-cluster/main.go#L217-L308
|
func do(o options) error {
// Refresh credentials if requested
if o.getClientCert {
if err := getCredentials(o); err != nil {
return fmt.Errorf("get client cert: %v", err)
}
}
// Create the new cluster entry
d, err := describeCluster(o)
if err != nil {
return fmt.Errorf("describe auth: %v", err)
}
newCluster := kube.Cluster{
Endpoint: "https://" + d.Endpoint,
ClusterCACertificate: d.Auth.ClusterCACertificate,
ClientKey: d.Auth.ClientKey,
ClientCertificate: d.Auth.ClientCertificate,
}
// Try to use this entry
if !o.skipCheck {
c, err := kube.NewClient(&newCluster, "kube-system")
if err != nil {
return fmt.Errorf("create client: %v", err)
}
if _, err = c.ListPods("k8s-app=kube-dns"); err != nil {
logrus.WithError(err).Errorf("Failed to validate credentials (consider --get-client-cert)")
return fmt.Errorf("list all pods to check new credentials: %v", err)
}
}
// Just print this entry if requested
if o.printEntry {
data, err := kube.MarshalClusterMap(map[string]kube.Cluster{o.alias: newCluster})
if err != nil {
return fmt.Errorf("marshal %s: %v", o.alias, err)
}
fmt.Println(string(data))
return nil
}
// Append the new entry to the current secret
// First read in the secret from stdin
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("read stdin: %v", err)
}
var s coreapi.Secret
if err := yaml.Unmarshal(b, &s); err != nil {
return fmt.Errorf("unmarshal stdin: %v", err)
}
// Now decode the {alias: cluster} map and print out current keys
clusters, err := kube.UnmarshalClusterMap(s.Data["cluster"])
if err != nil {
return fmt.Errorf("unmarshal secret: %v", err)
}
var existing []string
for a := range clusters {
existing = append(existing, a)
}
logrus.Infof("Existing clusters: %s", strings.Join(existing, ", "))
// Add new key
_, ok := clusters[o.alias]
if ok && !o.overwrite {
return fmt.Errorf("cluster %s already exists", o.alias)
}
clusters[o.alias] = newCluster
logrus.Infof("New cluster: %s", o.alias)
// Marshal the {alias: cluster} map back into secret data
data, err := kube.MarshalClusterMap(clusters)
if err != nil {
return fmt.Errorf("marshal clusters: %v", err)
}
if o.printData { // Just print the data outside of the secret
fmt.Println(string(data))
return nil
}
// Output the new secret
s.Data["cluster"] = data
buf, err := yaml.Marshal(s)
if err != nil {
return fmt.Errorf("marshal secret: %v", err)
}
fmt.Println(string(buf))
return nil
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/oci/layout/oci_dest.go#L270-L279
|
func (d *ociImageDestination) Commit(ctx context.Context) error {
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
return err
}
indexJSON, err := json.Marshal(d.index)
if err != nil {
return err
}
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/peer_heap.go#L58-L63
|
func (ph *peerHeap) Push(x interface{}) {
n := len(ph.peerScores)
item := x.(*peerScore)
item.index = n
ph.peerScores = append(ph.peerScores, item)
}
|
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/callbacks.go#L59-L70
|
func Only(ops ...Operation) Matcher {
return func(ctx *Context) bool {
// allow if operation is listed
for _, op := range ops {
if op == ctx.Operation {
return true
}
}
return false
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/input/types.go#L354-L356
|
func (t DispatchMouseEventPointerType) MarshalEasyJSON(out *jwriter.Writer) {
out.String(string(t))
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L4041-L4045
|
func (v GetDocumentReturns) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoDom46(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/taskcluster/taskcluster-client-go/blob/ef6acd428ae5844a933792ed6479d0e7dca61ef8/tchooks/tchooks.go#L104-L108
|
func (hooks *Hooks) ListHookGroups() (*HookGroups, error) {
cd := tcclient.Client(*hooks)
responseObject, _, err := (&cd).APICall(nil, "GET", "/hooks", new(HookGroups), nil)
return responseObject.(*HookGroups), err
}
|
https://github.com/profitbricks/profitbricks-sdk-go/blob/1d2db5f00bf5dd0b6c29273541c71c60cdf4d4d4/ipblock.go#L64-L69
|
func (c *Client) UpdateIPBlock(ipblockid string, props IPBlockProperties) (*IPBlock, error) {
url := ipblockPath(ipblockid) + `?depth=` + c.client.depth + `&pretty=` + strconv.FormatBool(c.client.pretty)
ret := &IPBlock{}
err := c.client.Patch(url, props, ret, http.StatusAccepted)
return ret, err
}
|
https://github.com/libp2p/go-libp2p-crypto/blob/9d2fed53443f745e6dc4d02bdcc94d9742a0ca84/secp256k1.go#L117-L125
|
func (k *Secp256k1PublicKey) Verify(data []byte, sigStr []byte) (bool, error) {
sig, err := btcec.ParseDERSignature(sigStr, btcec.S256())
if err != nil {
return false, err
}
hash := sha256.Sum256(data)
return sig.Verify(hash[:], (*btcec.PublicKey)(k)), nil
}
|
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L167-L173
|
func randomHeight() int {
h := 1
for h < maxHeight && rand.Uint32() <= heightIncrease {
h++
}
return h
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/service.go#L363-L388
|
func serviceAddDoc(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {
serviceName := r.URL.Query().Get(":name")
s, err := getService(serviceName)
if err != nil {
return err
}
allowed := permission.Check(t, permission.PermServiceUpdateDoc,
contextsForServiceProvision(&s)...,
)
if !allowed {
return permission.ErrUnauthorized
}
s.Doc = InputValue(r, "doc")
evt, err := event.New(&event.Opts{
Target: serviceTarget(s.Name),
Kind: permission.PermServiceUpdateDoc,
Owner: t,
CustomData: event.FormToCustomData(InputFields(r)),
Allowed: event.Allowed(permission.PermServiceReadEvents, contextsForServiceProvision(&s)...),
})
if err != nil {
return err
}
defer func() { evt.Done(err) }()
return service.Update(s)
}
|
https://github.com/michaelbironneau/garbler/blob/2018e2dc9c1173564cc1352ccb90bdc0ac29c12f/lib/garbler.go#L93-L110
|
func NewPasswords(reqs *PasswordStrengthRequirements, n int) ([]string, error) {
var err error
if reqs == nil {
reqs = &Medium
}
if ok, problems := reqs.sanityCheck(); !ok {
return nil, errors.New("requirements failed validation: " + problems)
}
e := Garbler{}
passes := make([]string, n, n)
for i := 0; i < n; i++ {
passes[i], err = e.password(*reqs)
if err != nil {
return nil, err
}
}
return passes, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/client.go#L2408-L2431
|
func (c *Client) ListCollaborators(org, repo string) ([]User, error) {
c.log("ListCollaborators", org, repo)
if c.fake {
return nil, nil
}
path := fmt.Sprintf("/repos/%s/%s/collaborators", org, repo)
var users []User
err := c.readPaginatedResults(
path,
// This accept header enables the nested teams preview.
// https://developer.github.com/changes/2017-08-30-preview-nested-teams/
"application/vnd.github.hellcat-preview+json",
func() interface{} {
return &[]User{}
},
func(obj interface{}) {
users = append(users, *(obj.(*[]User))...)
},
)
if err != nil {
return nil, err
}
return users, nil
}
|
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L140-L233
|
func (w *WaterMark) process(closer *Closer) {
defer closer.Done()
var indices uint64Heap
// pending maps raft proposal index to the number of pending mutations for this proposal.
pending := make(map[uint64]int)
waiters := make(map[uint64][]chan struct{})
heap.Init(&indices)
var loop uint64
processOne := func(index uint64, done bool) {
// If not already done, then set. Otherwise, don't undo a done entry.
prev, present := pending[index]
if !present {
heap.Push(&indices, index)
}
delta := 1
if done {
delta = -1
}
pending[index] = prev + delta
loop++
if len(indices) > 0 && loop%10000 == 0 {
min := indices[0]
w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: %-4d. Value: %d\n",
w.Name, index, len(indices), w.DoneUntil(), min, pending[min])
}
// Update mark by going through all indices in order; and checking if they have
// been done. Stop at the first index, which isn't done.
doneUntil := w.DoneUntil()
if doneUntil > index {
AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index)
}
until := doneUntil
loops := 0
for len(indices) > 0 {
min := indices[0]
if done := pending[min]; done > 0 {
break // len(indices) will be > 0.
}
// Even if done is called multiple times causing it to become
// negative, we should still pop the index.
heap.Pop(&indices)
delete(pending, min)
until = min
loops++
}
for i := doneUntil + 1; i <= until; i++ {
toNotify := waiters[i]
for _, ch := range toNotify {
close(ch)
}
delete(waiters, i) // Release the memory back.
}
if until != doneUntil {
AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until))
w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops)
}
}
for {
select {
case <-closer.HasBeenClosed():
return
case mark := <-w.markCh:
if mark.waiter != nil {
doneUntil := atomic.LoadUint64(&w.doneUntil)
if doneUntil >= mark.index {
close(mark.waiter)
} else {
ws, ok := waiters[mark.index]
if !ok {
waiters[mark.index] = []chan struct{}{mark.waiter}
} else {
waiters[mark.index] = append(ws, mark.waiter)
}
}
} else {
if mark.index > 0 {
processOne(mark.index, mark.done)
}
for _, index := range mark.indices {
processOne(index, mark.done)
}
}
}
}
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/logging.go#L20-L41
|
func expireLogsTask(state *state.State) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
opRun := func(op *operation) error {
return expireLogs(ctx, state)
}
op, err := operationCreate(state.Cluster, "", operationClassTask, db.OperationLogsExpire, nil, nil, opRun, nil, nil)
if err != nil {
logger.Error("Failed to start log expiry operation", log.Ctx{"err": err})
return
}
logger.Infof("Expiring log files")
_, err = op.Run()
if err != nil {
logger.Error("Failed to expire logs", log.Ctx{"err": err})
}
logger.Infof("Done expiring log files")
}
return f, task.Daily()
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/fetch/fetch.go#L117-L123
|
func FulfillRequest(requestID RequestID, responseCode int64, responseHeaders []*HeaderEntry) *FulfillRequestParams {
return &FulfillRequestParams{
RequestID: requestID,
ResponseCode: responseCode,
ResponseHeaders: responseHeaders,
}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2http/client.go#L587-L618
|
func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) {
if err == nil {
return
}
switch e := err.(type) {
case *v2error.Error:
e.WriteTo(w)
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
if lg != nil {
lg.Warn(
"v2 response error",
zap.String("internal-server-error", err.Error()),
)
} else {
mlog.MergeError(err)
}
default:
if lg != nil {
lg.Warn(
"unexpected v2 response error",
zap.String("internal-server-error", err.Error()),
)
} else {
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
}
ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0)
ee.WriteTo(w)
}
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/client/lxd_containers.go#L598-L611
|
func (r *ProtocolLXD) RenameContainer(name string, container api.ContainerPost) (Operation, error) {
// Sanity check
if container.Migration {
return nil, fmt.Errorf("Can't ask for a migration through RenameContainer")
}
// Send the request
op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s", url.QueryEscape(name)), container, "")
if err != nil {
return nil, err
}
return op, nil
}
|
https://github.com/libp2p/go-libp2p-pubsub/blob/9db3dbdde90f44d1c420192c5cefd60682fbdbb9/pubsub.go#L922-L927
|
func WithValidatorConcurrency(n int) ValidatorOpt {
return func(addVal *addValReq) error {
addVal.throttle = n
return nil
}
}
|
https://github.com/cloudfoundry-incubator/cf-test-helpers/blob/83791edc4b0a2d48b602088c30332063b8f02f32/helpers/app_commands.go#L31-L34
|
func CurlAppRoot(cfg helpersinternal.CurlConfig, appName string) string {
appCurler := helpersinternal.NewAppCurler(Curl, cfg)
return appCurler.CurlAndWait(cfg, appName, "/", CURL_TIMEOUT)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/easyjson.go#L4375-L4379
|
func (v *GetPlatformFontsForNodeParams) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoCss38(&r, v)
return r.Error()
}
|
https://github.com/pact-foundation/pact-go/blob/467dea56d27e154363e1975f6e9f4dbf66148e79/dsl/verify_mesage_request.go#L52-L80
|
func (v *VerifyMessageRequest) Validate() error {
v.Args = []string{}
if len(v.PactURLs) != 0 {
v.Args = append(v.Args, v.PactURLs...)
} else {
return fmt.Errorf("Pact URLs is mandatory")
}
v.Args = append(v.Args, "--format", "json")
if v.BrokerUsername != "" {
v.Args = append(v.Args, "--broker-username", v.BrokerUsername)
}
if v.BrokerPassword != "" {
v.Args = append(v.Args, "--broker-password", v.BrokerPassword)
}
if v.ProviderVersion != "" {
v.Args = append(v.Args, "--provider_app_version", v.ProviderVersion)
}
if v.PublishVerificationResults {
v.Args = append(v.Args, "--publish_verification_results", "true")
}
return nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L12814-L12816
|
func (api *API) ServerTemplateMultiCloudImageLocator(href string) *ServerTemplateMultiCloudImageLocator {
return &ServerTemplateMultiCloudImageLocator{Href(href), api}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/runtime.go#L763-L766
|
func (p RunScriptParams) WithAwaitPromise(awaitPromise bool) *RunScriptParams {
p.AwaitPromise = awaitPromise
return &p
}
|
https://github.com/HiLittleCat/core/blob/ae2101184ecd36354d3fcff0ea69d67d3fdbe156/routergroup.go#L57-L63
|
func (group *RouterGroup) Group(relativePath string, handlers ...RouterHandler) *RouterGroup {
return &RouterGroup{
Handlers: group.combineHandlers(handlers),
basePath: group.calculateAbsolutePath(relativePath),
engine: group.engine,
}
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/channel.go#L191-L196
|
func (ccc channelConnectionCommon) Tracer() opentracing.Tracer {
if ccc.tracer != nil {
return ccc.tracer
}
return opentracing.GlobalTracer()
}
|
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/exp/cpp/export/export.go#L375-L382
|
func FileSequence_FrameRange(id FileSeqId) *C.char {
fs, ok := sFileSeqs.Get(id)
// caller must free string
if !ok {
return C.CString("")
}
return C.CString(fs.FrameRange())
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domstorage/easyjson.go#L903-L907
|
func (v EventDomStorageItemAdded) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoDomstorage8(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/easyjson.go#L7034-L7038
|
func (v EventLoadingFailed) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoNetwork54(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/datastore/load.go#L176-L280
|
func setVal(v reflect.Value, pValue interface{}) string {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, ok := pValue.(int64)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.OverflowInt(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetInt(x)
case reflect.Bool:
x, ok := pValue.(bool)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.SetBool(x)
case reflect.String:
switch x := pValue.(type) {
case appengine.BlobKey:
v.SetString(string(x))
case ByteString:
v.SetString(string(x))
case string:
v.SetString(x)
default:
if pValue != nil {
return typeMismatchReason(pValue, v)
}
}
case reflect.Float32, reflect.Float64:
x, ok := pValue.(float64)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.OverflowFloat(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetFloat(x)
case reflect.Ptr:
x, ok := pValue.(*Key)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if _, ok := v.Interface().(*Key); !ok {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
case reflect.Struct:
switch v.Type() {
case typeOfTime:
x, ok := pValue.(time.Time)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
case typeOfGeoPoint:
x, ok := pValue.(appengine.GeoPoint)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
default:
ent, ok := pValue.(*Entity)
if !ok {
return typeMismatchReason(pValue, v)
}
// Recursively load nested struct
pls, err := newStructPLS(v.Addr().Interface())
if err != nil {
return err.Error()
}
// if ent has a Key value and our struct has a Key field,
// load the Entity's Key value into the Key field on the struct.
if ent.Key != nil && pls.codec.keyField != -1 {
pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
}
err = pls.Load(ent.Properties)
if err != nil {
return err.Error()
}
}
case reflect.Slice:
x, ok := pValue.([]byte)
if !ok {
if y, yok := pValue.(ByteString); yok {
x, ok = []byte(y), true
}
}
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.Type().Elem().Kind() != reflect.Uint8 {
return typeMismatchReason(pValue, v)
}
v.SetBytes(x)
default:
return typeMismatchReason(pValue, v)
}
return ""
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/config/schema.go#L30-L36
|
func (s Schema) Defaults() map[string]interface{} {
values := make(map[string]interface{}, len(s))
for name, key := range s {
values[name] = key.Default
}
return values
}
|
https://github.com/omniscale/go-mapnik/blob/710dfcc5e486e5760d0a5c46be909d91968e1ffb/mapnik.go#L332-L351
|
func (m *Map) RenderImage(opts RenderOpts) (*image.NRGBA, error) {
scaleFactor := opts.ScaleFactor
if scaleFactor == 0.0 {
scaleFactor = 1.0
}
i := C.mapnik_map_render_to_image(m.m, C.double(opts.Scale), C.double(scaleFactor))
if i == nil {
return nil, m.lastError()
}
defer C.mapnik_image_free(i)
size := 0
raw := C.mapnik_image_to_raw(i, (*C.size_t)(unsafe.Pointer(&size)))
b := C.GoBytes(unsafe.Pointer(raw), C.int(size))
img := &image.NRGBA{
Pix: b,
Stride: int(m.width * 4),
Rect: image.Rect(0, 0, int(m.width), int(m.height)),
}
return img, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/genfiles/genfiles.go#L152-L170
|
func (g *Group) loadPaths(r io.Reader) error {
s := bufio.NewScanner(r)
for s.Scan() {
l := strings.TrimSpace(s.Text())
if l == "" || l[0] == '#' {
// Ignore comments and empty lines.
continue
}
g.Paths[l] = true
}
if err := s.Err(); err != nil {
return fmt.Errorf("scan error: %v", err)
}
return nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L674-L709
|
func (c *Client) GetEnqueuedBuilds(jobs []BuildQueryParams) (map[string]Build, error) {
c.logger.Debug("GetEnqueuedBuilds")
data, err := c.Get("/queue/api/json?tree=items[task[name],actions[parameters[name,value]]]")
if err != nil {
return nil, fmt.Errorf("cannot list builds from the queue: %v", err)
}
page := struct {
QueuedBuilds []Build `json:"items"`
}{}
if err := json.Unmarshal(data, &page); err != nil {
return nil, fmt.Errorf("cannot unmarshal builds from the queue: %v", err)
}
jenkinsBuilds := make(map[string]Build)
for _, jb := range page.QueuedBuilds {
prowJobID := jb.ProwJobID()
// Ignore builds with missing buildID parameters.
if prowJobID == "" {
continue
}
// Ignore builds for jobs we didn't ask for.
var exists bool
for _, job := range jobs {
if prowJobID == job.ProwJobID {
exists = true
break
}
}
if !exists {
continue
}
jb.enqueued = true
jenkinsBuilds[prowJobID] = jb
}
return jenkinsBuilds, nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L120-L129
|
func (n *node) Write(value string, index uint64) *v2error.Error {
if n.IsDir() {
return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
}
n.Value = value
n.ModifiedIndex = index
return nil
}
|
https://github.com/apparentlymart/go-rundeck-api/blob/2c962acae81080a937c350a5bea054c239f27a81/rundeck/key.go#L33-L40
|
func (c *Client) GetKeysInDirMeta(path string) ([]KeyMeta, error) {
r := &keyMetaListContents{}
err := c.get([]string{"storage", "keys", path}, nil, r)
if err != nil {
return nil, err
}
return r.Keys, nil
}
|
https://github.com/nwaples/rardecode/blob/197ef08ef68c4454ae5970a9c2692d6056ceb8d7/decode_reader.go#L107-L126
|
func (w *window) read(p []byte) (n int) {
if w.r > w.w {
n = copy(p, w.buf[w.r:])
w.r = (w.r + n) & w.mask
p = p[n:]
}
if w.r < w.w {
l := copy(p, w.buf[w.r:w.w])
w.r += l
n += l
}
if w.l > 0 && n > 0 {
// if we have successfully read data, copy any
// leftover data from a previous copyBytes.
l := w.l
w.l = 0
w.copyBytes(l, w.o)
}
return n
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/typed/buffer.go#L430-L434
|
func (ref BytesRef) UpdateString(s string) {
if ref != nil {
copy(ref, s)
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domdebugger/domdebugger.go#L229-L231
|
func (p *SetEventListenerBreakpointParams) Do(ctx context.Context) (err error) {
return cdp.Execute(ctx, CommandSetEventListenerBreakpoint, p, nil)
}
|
https://github.com/iron-io/functions_go/blob/91b84f5bbb17095bf1c7028ec6e70a3dc06a5893/client/routes/put_apps_app_routes_route_parameters.go#L109-L112
|
func (o *PutAppsAppRoutesRouteParams) WithBody(body *models.RouteWrapper) *PutAppsAppRoutesRouteParams {
o.SetBody(body)
return o
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/pkg/compression/compression.go#L20-L22
|
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
return pgzip.NewReader(r)
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/revision.go#L132-L136
|
func (rc *Revision) Pause() {
rc.mu.Lock()
rc.paused = true
rc.mu.Unlock()
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry_interceptor.go#L325-L329
|
func withRetryPolicy(rp retryPolicy) retryOption {
return retryOption{applyFunc: func(o *options) {
o.retryPolicy = rp
}}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/update_command.go#L43-L63
|
func updateCommandFunc(c *cli.Context, ki client.KeysAPI) {
if len(c.Args()) == 0 {
handleError(c, ExitBadArgs, errors.New("key required"))
}
key := c.Args()[0]
value, err := argOrStdin(c.Args(), os.Stdin, 1)
if err != nil {
handleError(c, ExitBadArgs, errors.New("value required"))
}
ttl := c.Int("ttl")
ctx, cancel := contextWithTotalTimeout(c)
resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevExist})
cancel()
if err != nil {
handleError(c, ExitServerError, err)
}
printResponseKey(resp, c.GlobalString("output"))
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pps.go#L81-L87
|
func DatumTagPrefix(salt string) string {
// We need to hash the salt because UUIDs are not necessarily
// random in every bit.
h := sha256.New()
h.Write([]byte(salt))
return hex.EncodeToString(h.Sum(nil))[:4]
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/praxisgen/helpers.go#L26-L32
|
func toGoReturnTypeName(name string, slice bool) string {
slicePrefix := ""
if slice {
slicePrefix = "[]"
}
return fmt.Sprintf("%s*%s", slicePrefix, toGoTypeName(name))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.