_id
stringlengths 86
170
| text
stringlengths 54
39.3k
|
|---|---|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/root_peer_list.go#L76-L83
|
func (l *RootPeerList) GetOrAdd(hostPort string) *Peer {
peer, ok := l.Get(hostPort)
if ok {
return peer
}
return l.Add(hostPort)
}
|
https://github.com/mrd0ll4r/tbotapi/blob/edc257282178bb5cebbfcc41260ec04c1ec7ac19/ctors.go#L226-L238
|
func (api *TelegramBotAPI) NewOutgoingDocumentResend(recipient Recipient, fileID string) *OutgoingDocument {
return &OutgoingDocument{
outgoingMessageBase: outgoingMessageBase{
outgoingBase: outgoingBase{
api: api,
Recipient: recipient,
},
},
outgoingFileBase: outgoingFileBase{
fileID: fileID,
},
}
}
|
https://github.com/enaml-ops/enaml/blob/4f847ee10b41afca41fe09fa839cb2f6ade06fb5/enaml.go#L22-L30
|
func Cloud(config CloudConfig) (result string, err error) {
var cloudConfigManifest CloudConfigManifest
cloudConfigManifest = config.GetManifest()
var ccfgYaml []byte
if ccfgYaml, err = yaml.Marshal(cloudConfigManifest); err == nil {
result = string(ccfgYaml)
}
return
}
|
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/sequence.go#L863-L919
|
func FindSequenceOnDiskPad(pattern string, padStyle PadStyle, opts ...FileOption) (*FileSequence, error) {
optsCopy := make([]FileOption, len(opts))
copy(optsCopy, opts)
var strictPadding bool
for _, opt := range opts {
switch opt {
case FileOptPadStyleHash1:
padStyle = PadStyleHash1
optsCopy = append(optsCopy, FileOptPadStyleHash1)
case FileOptPadStyleHash4:
padStyle = PadStyleHash4
optsCopy = append(optsCopy, FileOptPadStyleHash4)
case StrictPadding:
strictPadding = true
}
}
fs, err := NewFileSequencePad(pattern, padStyle)
if err != nil {
// Treat a bad pattern as a non-match
fmt.Println(err.Error())
return nil, nil
}
seqs, err := FindSequencesOnDisk(fs.Dirname(), optsCopy...)
if err != nil {
return nil, fmt.Errorf("failed to find %q: %s", pattern, err.Error())
}
base := fs.Basename()
ext := fs.Ext()
pad := fs.Padding()
fill := fs.ZFill()
for _, seq := range seqs {
// Find the first match and return it
if seq.Basename() != base || seq.Ext() != ext {
continue
}
seq.SetPaddingStyle(padStyle)
// Strict padding check
if strictPadding && pad != "" && seq.ZFill() != fill {
continue
}
return seq, nil
}
// If we get this far, we didn't find a match
return nil, nil
}
|
https://github.com/taskcluster/taskcluster-client-go/blob/ef6acd428ae5844a933792ed6479d0e7dca61ef8/readwriteseeker/readwriteseeker.go#L15-L28
|
func (rws *ReadWriteSeeker) Write(p []byte) (n int, err error) {
minCap := rws.pos + len(p)
if minCap > cap(rws.buf) { // Make sure buf has enough capacity:
buf2 := make([]byte, len(rws.buf), minCap+len(p)) // add some extra
copy(buf2, rws.buf)
rws.buf = buf2
}
if minCap > len(rws.buf) {
rws.buf = rws.buf[:minCap]
}
copy(rws.buf[rws.pos:], p)
rws.pos += len(p)
return len(p), nil
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pps.go#L368-L394
|
func (c APIClient) ListDatumF(jobID string, pageSize int64, page int64, f func(di *pps.DatumInfo) error) error {
client, err := c.PpsAPIClient.ListDatumStream(
c.Ctx(),
&pps.ListDatumRequest{
Job: NewJob(jobID),
PageSize: pageSize,
Page: page,
},
)
if err != nil {
return grpcutil.ScrubGRPC(err)
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
} else if err != nil {
return grpcutil.ScrubGRPC(err)
}
if err := f(resp.DatumInfo); err != nil {
if err == errutil.ErrBreak {
return nil
}
return err
}
}
}
|
https://github.com/pandemicsyn/oort/blob/fca1d3baddc1d944387cc8bbe8b21f911ec9091b/api/groupreplstore_GEN_.go#L356-L364
|
func (rs *ReplGroupStore) Startup(ctx context.Context) error {
rs.ringLock.Lock()
if rs.ringServerExitChan == nil {
rs.ringServerExitChan = make(chan struct{})
go rs.ringServerConnector(rs.ringServerExitChan)
}
rs.ringLock.Unlock()
return nil
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/storage/storage_image.go#L131-L180
|
func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
var layer storage.Layer
var diffOptions *storage.DiffOptions
// We need a valid digest value.
err = info.Digest.Validate()
if err != nil {
return nil, -1, "", err
}
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
if err != nil {
return nil, -1, "", err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
return ioutil.NopCloser(r), int64(r.Len()), "", nil
}
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time.
s.getBlobMutex.Lock()
i := s.layerPosition[info.Digest]
s.layerPosition[info.Digest] = i + 1
s.getBlobMutex.Unlock()
if len(layers) > 0 {
layer = layers[i%len(layers)]
}
// Force the storage layer to not try to match any compression that was used when the layer was first
// handed to it.
noCompression := archive.Uncompressed
diffOptions = &storage.DiffOptions{
Compression: &noCompression,
}
if layer.UncompressedSize < 0 {
n = -1
} else {
n = layer.UncompressedSize
}
logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
if err != nil {
return nil, -1, "", err
}
return rc, n, layer.ID, err
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/daemon.go#L997-L1081
|
func (d *Daemon) Stop() error {
logger.Info("Starting shutdown sequence")
errs := []error{}
trackError := func(err error) {
if err != nil {
errs = append(errs, err)
}
}
if d.endpoints != nil {
trackError(d.endpoints.Down())
}
trackError(d.tasks.Stop(3 * time.Second)) // Give tasks a bit of time to cleanup.
trackError(d.clusterTasks.Stop(3 * time.Second)) // Give tasks a bit of time to cleanup.
shouldUnmount := false
if d.cluster != nil {
// It might be that database nodes are all down, in that case
// we don't want to wait too much.
//
// FIXME: it should be possible to provide a context or a
// timeout for database queries.
ch := make(chan bool)
go func() {
n, err := d.numRunningContainers()
ch <- err != nil || n == 0
}()
select {
case shouldUnmount = <-ch:
case <-time.After(2 * time.Second):
shouldUnmount = true
}
logger.Infof("Closing the database")
err := d.cluster.Close()
// If we got io.EOF the network connection was interrupted and
// it's likely that the other node shutdown. Let's just log the
// event and return cleanly.
if errors.Cause(err) == driver.ErrBadConn {
logger.Debugf("Could not close remote database cleanly: %v", err)
} else {
trackError(err)
}
}
if d.db != nil {
trackError(d.db.Close())
}
if d.gateway != nil {
trackError(d.gateway.Shutdown())
}
if d.endpoints != nil {
trackError(d.endpoints.Down())
}
if d.endpoints != nil {
trackError(d.endpoints.Down())
}
if shouldUnmount {
logger.Infof("Unmounting temporary filesystems")
syscall.Unmount(shared.VarPath("devlxd"), syscall.MNT_DETACH)
syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH)
logger.Infof("Done unmounting temporary filesystems")
} else {
logger.Debugf(
"Not unmounting temporary filesystems (containers are still running)")
}
var err error
if n := len(errs); n > 0 {
format := "%v"
if n > 1 {
format += fmt.Sprintf(" (and %d more errors)", n)
}
err = fmt.Errorf(format, errs[0])
}
if err != nil {
logger.Errorf("Failed to cleanly shutdown daemon: %v", err)
}
return err
}
|
https://github.com/justinfx/gofileseq/blob/2555f296b4493d1825f5f6fab4aa0ff51a8306cd/ranges/ranges.go#L331-L336
|
func (l *InclusiveRanges) Start() int {
for _, b := range l.blocks {
return b.Start()
}
return 0
}
|
https://github.com/kpango/glg/blob/68d2670cb2dbff047331daad841149a82ac37796/glg.go#L1063-L1072
|
func (g *Glg) Fatalf(format string, val ...interface{}) {
err := g.out(FATAL, format, val...)
if err != nil {
err = g.Error(err.Error())
if err != nil {
panic(err)
}
}
exit(1)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/profiler/easyjson.go#L1360-L1364
|
func (v *ScriptCoverage) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoProfiler15(&r, v)
return r.Error()
}
|
https://github.com/apuigsech/seekret/blob/9b1f7ea1b3fd5bd29d93cf62102cb66e54428a49/models/rule.go#L37-L52
|
func NewRule(name string, match string) (*Rule, error) {
matchRegexp, err := regexp.Compile("(?i)" + match)
if err != nil {
return nil, err
}
if err != nil {
fmt.Println(err)
}
r := &Rule{
Enabled: false,
Name: name,
Match: matchRegexp,
}
return r, nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/peer.go#L358-L370
|
func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
var ok bool
// Considering MsgSnap may have a big size, e.g., 1G, and will block
// stream for a long time, only use one of the N pipelines to send MsgSnap.
if isMsgSnap(m) {
return p.pipeline.msgc, pipelineMsg
} else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
return writec, streamAppV2
} else if writec, ok = p.writer.writec(); ok {
return writec, streamMsg
}
return p.pipeline.msgc, pipelineMsg
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/provision/kubernetes/pkg/client/clientset/versioned/typed/tsuru/v1/fake/fake_app.go#L41-L60
|
func (c *FakeApps) List(opts v1.ListOptions) (result *tsuru_v1.AppList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(appsResource, appsKind, c.ns, opts), &tsuru_v1.AppList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &tsuru_v1.AppList{}
for _, item := range obj.(*tsuru_v1.AppList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
|
https://github.com/janos/web/blob/0fb0203103deb84424510a8d5166ac00700f2b0e/templates/templates.go#L115-L117
|
func WithFunction(name string, fn interface{}) Option {
return func(o *Options) { o.functions[name] = fn }
}
|
https://github.com/HiLittleCat/core/blob/ae2101184ecd36354d3fcff0ea69d67d3fdbe156/log/log.go#L11-L16
|
func Stack(err interface{}) {
stack := make([]byte, 64<<10)
stack = stack[:runtime.Stack(stack, false)]
log.Printf("%v\n%s", err, stack)
}
|
https://github.com/go-opencv/go-opencv/blob/a4fe8ec027ccc9eb8b7d0797db7c76e61083f1db/opencv/cxcore.go#L181-L183
|
func (img *IplImage) Set1D(x int, value Scalar) {
C.cvSet1D(unsafe.Pointer(img), C.int(x), (C.CvScalar)(value))
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/mkbuild-cluster/main.go#L173-L214
|
func describeCluster(o options) (*describe, error) {
if o.account != "" {
act, err := getAccount()
if err != nil {
return nil, fmt.Errorf("get current account: %v", err)
}
defer setAccount(act)
if err = setAccount(o.account); err != nil {
return nil, fmt.Errorf("set account %s: %v", o.account, err)
}
}
args, cmd := command(
"gcloud", "container", "clusters", "describe", o.cluster,
"--project", o.project,
"--zone", o.zone,
"--format=yaml",
)
data, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("%s: %v", strings.Join(args, " "), err)
}
var d describe
if yaml.Unmarshal(data, &d); err != nil {
return nil, fmt.Errorf("unmarshal gcloud: %v", err)
}
if d.Endpoint == "" {
return nil, errors.New("empty endpoint")
}
if len(d.Auth.ClusterCACertificate) == 0 {
return nil, errors.New("empty clusterCaCertificate")
}
if len(d.Auth.ClientKey) == 0 {
return nil, errors.New("empty clientKey, consider running with --get-client-cert")
}
if len(d.Auth.ClientCertificate) == 0 {
return nil, errors.New("empty clientCertificate, consider running with --get-client-cert")
}
return &d, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L254-L256
|
func (f *FakeClient) ListStatuses(org, repo, ref string) ([]github.Status, error) {
return f.CreatedStatuses[ref], nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_ceph_utils.go#L1562-L1675
|
func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath string, backup backup, source container) error {
sourceIsSnapshot := source.IsSnapshot()
sourceContainerName := source.Name()
sourceContainerOnlyName := projectPrefix(source.Project(), sourceContainerName)
sourceSnapshotOnlyName := ""
// Prepare for rsync
rsync := func(oldPath string, newPath string, bwlimit string) error {
output, err := rsyncLocalCopy(oldPath, newPath, bwlimit)
if err != nil {
return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
}
return nil
}
bwlimit := s.pool.Config["rsync.bwlimit"]
// Create a temporary snapshot
snapshotName := fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if sourceIsSnapshot {
sourceContainerOnlyName, sourceSnapshotOnlyName, _ = containerGetParentAndSnapshotName(sourceContainerName)
sourceContainerOnlyName = projectPrefix(source.Project(), sourceContainerOnlyName)
snapshotName = fmt.Sprintf("snapshot_%s", projectPrefix(source.Project(), sourceSnapshotOnlyName))
} else {
// This is costly but we need to ensure that all cached data has
// been committed to disk. If we don't then the rbd snapshot of
// the underlying filesystem can be inconsistent or - worst case
// - empty.
syscall.Sync()
// create snapshot
err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
if err != nil {
return err
}
defer cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
}
// Protect volume so we can create clones of it
err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
if err != nil {
return err
}
defer cephRBDSnapshotUnprotect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
// Create a new volume from the snapshot
cloneName := uuid.NewRandom().String()
err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.OSDPoolName, cloneName, "backup", s.UserName)
if err != nil {
return err
}
defer cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
// Map the new volume
RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
if err != nil {
return err
}
defer cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName, true)
// Generate a new UUID if needed
RBDFilesystem := s.getRBDFilesystem()
msg, err := fsGenerateNewUUID(RBDFilesystem, RBDDevPath)
if err != nil {
logger.Errorf("Failed to create new UUID for filesystem \"%s\": %s: %s", RBDFilesystem, msg, err)
return err
}
// Create a temporary mountpoing
tmpContainerMntPoint, err := ioutil.TempDir("", "lxd_backup_")
if err != nil {
return err
}
defer os.RemoveAll(tmpContainerMntPoint)
err = os.Chmod(tmpContainerMntPoint, 0700)
if err != nil {
return err
}
// Mount the volume
mountFlags, mountOptions := lxdResolveMountoptions(s.getRBDMountOptions())
err = tryMount(RBDDevPath, tmpContainerMntPoint, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
logger.Errorf("Failed to mount RBD device %s onto %s: %s", RBDDevPath, tmpContainerMntPoint, err)
return err
}
logger.Debugf("Mounted RBD device %s onto %s", RBDDevPath, tmpContainerMntPoint)
defer tryUnmount(tmpContainerMntPoint, syscall.MNT_DETACH)
// Figure out the target name
targetName := sourceContainerName
if sourceIsSnapshot {
_, targetName, _ = containerGetParentAndSnapshotName(sourceContainerName)
}
// Create the path for the backup.
targetBackupMntPoint := fmt.Sprintf("%s/container", tmpPath)
if sourceIsSnapshot {
targetBackupMntPoint = fmt.Sprintf("%s/snapshots/%s", tmpPath, targetName)
}
err = os.MkdirAll(targetBackupMntPoint, 0711)
if err != nil {
return err
}
err = rsync(tmpContainerMntPoint, targetBackupMntPoint, bwlimit)
if err != nil {
return err
}
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/layertree/layertree.go#L292-L301
|
func (p *ReplaySnapshotParams) Do(ctx context.Context) (dataURL string, err error) {
// execute
var res ReplaySnapshotReturns
err = cdp.Execute(ctx, CommandReplaySnapshot, p, &res)
if err != nil {
return "", err
}
return res.DataURL, nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/profiler/easyjson.go#L542-L546
|
func (v TakePreciseCoverageParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoProfiler5(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L340-L355
|
func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex
valueMatch := prevValue == "" || n.Value == prevValue
ok = valueMatch && indexMatch
switch {
case valueMatch && indexMatch:
which = CompareMatch
case indexMatch && !valueMatch:
which = CompareValueNotMatch
case valueMatch && !indexMatch:
which = CompareIndexNotMatch
default:
which = CompareNotMatch
}
return ok, which
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/sys/os.go#L83-L109
|
func (s *OS) Init() error {
err := s.initDirs()
if err != nil {
return err
}
s.Architectures, err = util.GetArchitectures()
if err != nil {
return err
}
s.LxcPath = filepath.Join(s.VarDir, "containers")
s.BackingFS, err = util.FilesystemDetect(s.LxcPath)
if err != nil {
logger.Error("Error detecting backing fs", log.Ctx{"err": err})
}
s.IdmapSet = util.GetIdmapSet()
s.ExecPath = util.GetExecPath()
s.RunningInUserNS = shared.RunningInUserNS()
s.initAppArmor()
s.initCGroup()
return nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/ss/ssm/codegen_client.go#L952-L954
|
func (r *Operation) Locator(api *API) *OperationLocator {
return api.OperationLocator(r.Href)
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gerrit/reporter/reporter.go#L44-L54
|
func NewReporter(cookiefilePath string, projects map[string][]string, lister pjlister.ProwJobLister) (*Client, error) {
gc, err := client.NewClient(projects)
if err != nil {
return nil, err
}
gc.Start(cookiefilePath)
return &Client{
gc: gc,
lister: lister,
}, nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/backup.go#L232-L321
|
func backupFixStoragePool(c *db.Cluster, b backupInfo, useDefaultPool bool) error {
var poolName string
if useDefaultPool {
// Get the default profile
_, profile, err := c.ProfileGet("default", "default")
if err != nil {
return err
}
_, v, err := shared.GetRootDiskDevice(profile.Devices)
if err != nil {
return err
}
poolName = v["pool"]
} else {
poolName = b.Pool
}
// Get the default's profile pool
_, pool, err := c.StoragePoolGet(poolName)
if err != nil {
return err
}
f := func(path string) error {
// Read in the backup.yaml file.
backup, err := slurpBackupFile(path)
if err != nil {
return err
}
rootDiskDeviceFound := false
// Change the pool in the backup.yaml
backup.Pool = pool
if backup.Container.Devices != nil {
devName, _, err := shared.GetRootDiskDevice(backup.Container.Devices)
if err == nil {
backup.Container.Devices[devName]["pool"] = poolName
rootDiskDeviceFound = true
}
}
if backup.Container.ExpandedDevices != nil {
devName, _, err := shared.GetRootDiskDevice(backup.Container.ExpandedDevices)
if err == nil {
backup.Container.ExpandedDevices[devName]["pool"] = poolName
rootDiskDeviceFound = true
}
}
if !rootDiskDeviceFound {
return fmt.Errorf("No root device could be found")
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
data, err := yaml.Marshal(&backup)
if err != nil {
return err
}
_, err = file.Write(data)
if err != nil {
return err
}
return nil
}
err = f(shared.VarPath("storage-pools", pool.Name, "containers", b.Name, "backup.yaml"))
if err != nil {
return err
}
for _, snap := range b.Snapshots {
err = f(shared.VarPath("storage-pools", pool.Name, "containers-snapshots", b.Name, snap,
"backup.yaml"))
if err != nil {
return err
}
}
return nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/shared/log15/stack/stack.go#L156-L165
|
func (pcs Trace) Format(s fmt.State, c rune) {
s.Write([]byte("["))
for i, pc := range pcs {
if i > 0 {
s.Write([]byte(" "))
}
pc.Format(s, c)
}
s.Write([]byte("]"))
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_lvm_utils.go#L369-L441
|
func (s *storageLvm) copyContainerLv(target container, source container, readonly bool, refresh bool) error {
exists, err := storageLVExists(getLvmDevPath(target.Project(), s.getOnDiskPoolName(),
storagePoolVolumeAPIEndpointContainers, containerNameToLVName(target.Name())))
if err != nil {
return err
}
// Only create container/snapshot if it doesn't already exist
if !exists {
err := s.ContainerCreate(target)
if err != nil {
return err
}
}
targetName := target.Name()
targetStart, err := target.StorageStart()
if err != nil {
return err
}
if targetStart {
defer target.StorageStop()
}
sourceName := source.Name()
sourceStart, err := source.StorageStart()
if err != nil {
return err
}
if sourceStart {
defer source.StorageStop()
}
sourcePool, err := source.StoragePool()
if err != nil {
return err
}
sourceContainerMntPoint := getContainerMountPoint(source.Project(), sourcePool, sourceName)
if source.IsSnapshot() {
sourceContainerMntPoint = getSnapshotMountPoint(source.Project(), sourcePool, sourceName)
}
targetContainerMntPoint := getContainerMountPoint(target.Project(), s.pool.Name, targetName)
if target.IsSnapshot() {
targetContainerMntPoint = getSnapshotMountPoint(source.Project(), s.pool.Name, targetName)
}
if source.IsRunning() {
err = source.Freeze()
if err != nil {
return err
}
defer source.Unfreeze()
}
bwlimit := s.pool.Config["rsync.bwlimit"]
output, err := rsyncLocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit)
if err != nil {
return fmt.Errorf("failed to rsync container: %s: %s", string(output), err)
}
if readonly {
targetLvmName := containerNameToLVName(targetName)
poolName := s.getOnDiskPoolName()
output, err := shared.TryRunCommand("lvchange", "-pr", fmt.Sprintf("%s/%s_%s", poolName, storagePoolVolumeAPIEndpointContainers, targetLvmName))
if err != nil {
logger.Errorf("Failed to make LVM snapshot \"%s\" read-write: %s", targetName, output)
return err
}
}
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/input/types.go#L250-L252
|
func (t MouseType) MarshalEasyJSON(out *jwriter.Writer) {
out.String(string(t))
}
|
https://github.com/HiLittleCat/core/blob/ae2101184ecd36354d3fcff0ea69d67d3fdbe156/store.go#L95-L101
|
func (rp *redisProvider) Destroy(sid string) error {
var err error
redisPool.Exec(func(c *redis.Client) {
err = c.Del(sid).Err()
})
return err
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L5015-L5027
|
func (u LedgerKey) ArmForSwitch(sw int32) (string, bool) {
switch LedgerEntryType(sw) {
case LedgerEntryTypeAccount:
return "Account", true
case LedgerEntryTypeTrustline:
return "TrustLine", true
case LedgerEntryTypeOffer:
return "Offer", true
case LedgerEntryTypeData:
return "Data", true
}
return "-", false
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/inbound.go#L298-L307
|
func (call *InboundCall) Response() *InboundCallResponse {
if call.err != nil {
// While reading Thrift, we cannot distinguish between malformed Thrift and other errors,
// and so we may try to respond with a bad request. We should ensure that the response
// is marked as failed if the request has failed so that we don't try to shutdown the exchange
// a second time.
call.response.err = call.err
}
return call.response
}
|
https://github.com/kpango/glg/blob/68d2670cb2dbff047331daad841149a82ac37796/glg.go#L516-L522
|
func (g *Glg) TagStringToLevel(tag string) LEVEL {
l, ok := g.levelMap.Load(tag)
if !ok {
return 255
}
return l.(LEVEL)
}
|
https://github.com/shogo82148/txmanager/blob/5c0985a3720f2c462fe54ce430cae4ccfaf49d31/txmanager.go#L180-L191
|
func Do(d DB, f func(t Tx) error) error {
t, err := d.TxBegin()
if err != nil {
return err
}
defer t.TxFinish()
err = f(t)
if err != nil {
return err
}
return t.TxCommit()
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L375-L377
|
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
}
|
https://github.com/mota/klash/blob/ca6c37a4c8c2e69831c428cf0c6daac80ab56c22/params.go#L17-L22
|
func MakeParams(fieldCount int) *Params {
return &Params{
make(map[string]*Parameter),
make([]*Parameter, 0, fieldCount),
}
}
|
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/fbuf.go#L38-L48
|
func (b *Float64RingBuf) TwoContig(makeCopy bool) (first []float64, second []float64) {
extent := b.Beg + b.Readable
if extent <= b.N {
// we fit contiguously in this buffer without wrapping to the other.
// Let second stay an empty slice.
return b.A[b.Beg:(b.Beg + b.Readable)], second
}
return b.A[b.Beg:b.N], b.A[0:(extent % b.N)]
}
|
https://github.com/t3rm1n4l/nitro/blob/937fe99f63a01a8bea7661c49e2f3f8af6541d7c/nitro.go#L300-L305
|
func (cfg *Config) SetKeyComparator(cmp KeyCompare) {
cfg.keyCmp = cmp
cfg.insCmp = newInsertCompare(cmp)
cfg.iterCmp = newIterCompare(cmp)
cfg.existCmp = newExistCompare(cmp)
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/oci/archive/oci_dest.go#L44-L47
|
func (d *ociArchiveImageDestination) Close() error {
defer d.tempDirRef.deleteTempDir()
return d.unpackedDest.Close()
}
|
https://github.com/segmentio/nsq-go/blob/ff4eef968f46eb580d9dba4f637c5dfb1e5b2208/frame.go#L30-L44
|
func (t FrameType) String() string {
switch t {
case FrameTypeResponse:
return "response"
case FrameTypeError:
return "error"
case FrameTypeMessage:
return "message"
default:
return "frame <" + strconv.Itoa(int(t)) + ">"
}
}
|
https://github.com/malice-plugins/go-plugin-utils/blob/9ee76663c3b0a531b8c529f03f12a5a84ff9b61b/utils/utils.go#L45-L54
|
func CamelCase(src string) string {
byteSrc := []byte(src)
chunks := camelingRegex.FindAll(byteSrc, -1)
for idx, val := range chunks {
if idx > 0 {
chunks[idx] = bytes.Title(val)
}
}
return string(bytes.Join(chunks, nil))
}
|
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/routes.go#L29-L46
|
func newRoutes(ourself *localPeer, peers *Peers) *routes {
recalculate := make(chan *struct{}, 1)
wait := make(chan chan struct{})
action := make(chan func())
r := &routes{
ourself: ourself,
peers: peers,
unicast: unicastRoutes{ourself.Name: UnknownPeerName},
unicastAll: unicastRoutes{ourself.Name: UnknownPeerName},
broadcast: broadcastRoutes{ourself.Name: []PeerName{}},
broadcastAll: broadcastRoutes{ourself.Name: []PeerName{}},
recalc: recalculate,
wait: wait,
action: action,
}
go r.run(recalculate, wait, action)
return r
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/ostree/ostree_src.go#L266-L332
|
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
_, err := s.LayerInfosForCopy(ctx)
if err != nil {
return nil, -1, err
}
}
compressedBlob, found := s.compressed[info.Digest]
if found {
blob = compressedBlob.Hex()
}
branch := fmt.Sprintf("ociimage/%s", blob)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, 0, err
}
s.repo = repo
}
layerSize, err := s.getLayerSize(blob)
if err != nil {
return nil, 0, err
}
tarsplit, err := s.getTarSplitData(blob)
if err != nil {
return nil, 0, err
}
// if tarsplit is nil we are looking at the manifest. Return directly the file in /content
if tarsplit == nil {
file, err := s.readSingleFile(branch, "/content")
if err != nil {
return nil, 0, err
}
return file, layerSize, nil
}
mf := bytes.NewReader(tarsplit)
mfz, err := pgzip.NewReader(mf)
if err != nil {
return nil, 0, err
}
metaUnpacker := storage.NewJSONUnpacker(mfz)
getter, err := newOSTreePathFileGetter(s.repo, branch)
if err != nil {
mfz.Close()
return nil, 0, err
}
ots := asm.NewOutputTarStream(getter, metaUnpacker)
rc := ioutils.NewReadCloserWrapper(ots, func() error {
getter.Close()
mfz.Close()
return ots.Close()
})
return rc, layerSize, nil
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/raw/call.go#L35-L47
|
func ReadArgsV2(r tchannel.ArgReadable) ([]byte, []byte, error) {
var arg2, arg3 []byte
if err := tchannel.NewArgReader(r.Arg2Reader()).Read(&arg2); err != nil {
return nil, nil, err
}
if err := tchannel.NewArgReader(r.Arg3Reader()).Read(&arg3); err != nil {
return nil, nil, err
}
return arg2, arg3, nil
}
|
https://github.com/geoffgarside/ber/blob/27a1aff36ce64dbe5d93c08cc5f161983134ddc5/ber.go#L886-L888
|
func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
return UnmarshalWithParams(b, val, "")
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/easyjson.go#L2171-L2175
|
func (v SetKeyframeKeyParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoCss20(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/jobs.go#L503-L520
|
func (c *JobConfig) AllPostsubmits(repos []string) []Postsubmit {
var res []Postsubmit
for repo, v := range c.Postsubmits {
if len(repos) == 0 {
res = append(res, v...)
} else {
for _, r := range repos {
if r == repo {
res = append(res, v...)
break
}
}
}
}
return res
}
|
https://github.com/jinzhu/now/blob/8ec929ed50c3ac25ce77ba4486e1f277c552c591/now.go#L57-L60
|
func (now *Now) BeginningOfYear() time.Time {
y, _, _ := now.Date()
return time.Date(y, time.January, 1, 0, 0, 0, 0, now.Location())
}
|
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/merge.go#L155-L167
|
func (op *MergeOperator) Get() ([]byte, error) {
op.RLock()
defer op.RUnlock()
var existing []byte
err := op.db.View(func(txn *Txn) (err error) {
existing, err = op.iterateAndMerge(txn)
return err
})
if err == errNoMerge {
return existing, nil
}
return existing, err
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/storage_ceph_utils.go#L512-L542
|
func cephRBDVolumeGetParent(clusterName string, poolName string,
volumeName string, volumeType string, userName string) (string, error) {
msg, err := shared.RunCommand(
"rbd",
"--id", userName,
"--cluster", clusterName,
"--pool", poolName,
"info",
fmt.Sprintf("%s_%s", volumeType, volumeName))
if err != nil {
return "", err
}
idx := strings.Index(msg, "parent: ")
if idx == -1 {
return "", db.ErrNoSuchObject
}
msg = msg[(idx + len("parent: ")):]
msg = strings.TrimSpace(msg)
idx = strings.Index(msg, "\n")
if idx == -1 {
return "", fmt.Errorf("Unexpected parsing error")
}
msg = msg[:idx]
msg = strings.TrimSpace(msg)
return msg, nil
}
|
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/info/info.go#L13-L29
|
func New(opts *Options) (*genny.Generator, error) {
g := genny.New()
if err := opts.Validate(); err != nil {
return g, errors.WithStack(err)
}
g.RunFn(appDetails(opts))
cBox := packr.Folder(filepath.Join(opts.App.Root, "config"))
g.RunFn(configs(opts, cBox))
aBox := packr.Folder(opts.App.Root)
g.RunFn(pkgChecks(opts, aBox))
return g, nil
}
|
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/examples/increment-only-counter/state.go#L73-L75
|
func (st *state) Merge(other mesh.GossipData) (complete mesh.GossipData) {
return st.mergeComplete(other.(*state).copy().set)
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/spyglass/artifacts.go#L94-L146
|
func (s *Spyglass) FetchArtifacts(src string, podName string, sizeLimit int64, artifactNames []string) ([]lenses.Artifact, error) {
artStart := time.Now()
arts := []lenses.Artifact{}
keyType, key, err := splitSrc(src)
if err != nil {
return arts, fmt.Errorf("error parsing src: %v", err)
}
jobName, buildID, err := s.KeyToJob(src)
if err != nil {
return arts, fmt.Errorf("could not derive job: %v", err)
}
gcsKey := ""
switch keyType {
case gcsKeyType:
gcsKey = strings.TrimSuffix(key, "/")
case prowKeyType:
if gcsKey, err = s.prowToGCS(key); err != nil {
logrus.Warningln(err)
}
default:
return nil, fmt.Errorf("invalid src: %v", src)
}
podLogNeeded := false
for _, name := range artifactNames {
art, err := s.GCSArtifactFetcher.artifact(gcsKey, name, sizeLimit)
if err == nil {
// Actually try making a request, because calling GCSArtifactFetcher.artifact does no I/O.
// (these files are being explicitly requested and so will presumably soon be accessed, so
// the extra network I/O should not be too problematic).
_, err = art.Size()
}
if err != nil {
if name == "build-log.txt" {
podLogNeeded = true
}
continue
}
arts = append(arts, art)
}
if podLogNeeded {
art, err := s.PodLogArtifactFetcher.artifact(jobName, buildID, sizeLimit)
if err != nil {
logrus.Errorf("Failed to fetch pod log: %v", err)
} else {
arts = append(arts, art)
}
}
logrus.WithField("duration", time.Since(artStart)).Infof("Retrieved artifacts for %v", src)
return arts, nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/images.go#L107-L123
|
func (c *Cluster) ImageSourceInsert(id int, server string, protocol string, certificate string, alias string) error {
stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)`
protocolInt := -1
for protoInt, protoString := range ImageSourceProtocol {
if protoString == protocol {
protocolInt = protoInt
}
}
if protocolInt == -1 {
return fmt.Errorf("Invalid protocol: %s", protocol)
}
err := exec(c.db, stmt, id, server, protocolInt, certificate, alias)
return err
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/repoowners/repoowners.go#L457-L461
|
func ParseFullConfig(b []byte) (FullConfig, error) {
full := new(FullConfig)
err := yaml.Unmarshal(b, full)
return *full, err
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L5891-L5895
|
func (v EventFrameResized) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage62(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L50-L58
|
func (w *Simple) Register(name string, h Handler) error {
w.moot.Lock()
defer w.moot.Unlock()
if _, ok := w.handlers[name]; ok {
return fmt.Errorf("handler already mapped for name %s", name)
}
w.handlers[name] = h
return nil
}
|
https://github.com/kljensen/snowball/blob/115fa8f6419dcfb9ec4653997b1c6803a5eff962/french/step2a.go#L10-L31
|
func step2a(word *snowballword.SnowballWord) bool {
// Search for the longest among the following suffixes
// in RV and if found, delete if preceded by a non-vowel.
suffix, suffixRunes := word.FirstSuffixIn(word.RVstart, len(word.RS),
"issantes", "issaIent", "issions", "issants", "issante",
"iraIent", "issons", "issiez", "issent", "issant", "issait",
"issais", "irions", "issez", "isses", "iront", "irons", "iriez",
"irent", "irait", "irais", "îtes", "îmes", "isse", "irez",
"iras", "irai", "ira", "ies", "ît", "it", "is", "ir", "ie", "i",
)
if suffix != "" {
sLen := len(suffixRunes)
idx := len(word.RS) - sLen - 1
if idx >= 0 && word.FitsInRV(sLen+1) && isLowerVowel(word.RS[idx]) == false {
word.RemoveLastNRunes(len(suffixRunes))
return true
}
}
return false
}
|
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/token_bucket.go#L46-L48
|
func (tb *tokenBucket) capacityToken() time.Time {
return time.Now().Add(-tb.refillDuration).Truncate(tb.tokenInterval)
}
|
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/rule/rule.go#L674-L690
|
func (r *Rule) AttrStrings(key string) []string {
attr, ok := r.attrs[key]
if !ok {
return nil
}
list, ok := attr.RHS.(*bzl.ListExpr)
if !ok {
return nil
}
strs := make([]string, 0, len(list.List))
for _, e := range list.List {
if str, ok := e.(*bzl.StringExpr); ok {
strs = append(strs, str.Value)
}
}
return strs
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/lessor.go#L833-L838
|
func (l *Lease) refresh(extend time.Duration) {
newExpiry := time.Now().Add(extend + time.Duration(l.RemainingTTL())*time.Second)
l.expiryMu.Lock()
defer l.expiryMu.Unlock()
l.expiry = newExpiry
}
|
https://github.com/dailyburn/bigquery/blob/b6f18972580ed8882d195da0e9b7c9b94902a1ea/client/client.go#L43-L59
|
func New(pemPath string, options ...func(*Client) error) *Client {
c := Client{
pemPath: pemPath,
RequestTimeout: defaultRequestTimeout,
}
c.PrintDebug = false
for _, option := range options {
err := option(&c)
if err != nil {
return nil
}
}
return &c
}
|
https://github.com/xwb1989/sqlparser/blob/120387863bf27d04bc07db8015110a6e96d0146c/ast.go#L1287-L1313
|
func (node *Show) Format(buf *TrackedBuffer) {
if node.Type == "tables" && node.ShowTablesOpt != nil {
opt := node.ShowTablesOpt
if opt.DbName != "" {
if opt.Filter != nil {
buf.Myprintf("show %s%stables from %s %v", opt.Extended, opt.Full, opt.DbName, opt.Filter)
} else {
buf.Myprintf("show %s%stables from %s", opt.Extended, opt.Full, opt.DbName)
}
} else {
if opt.Filter != nil {
buf.Myprintf("show %s%stables %v", opt.Extended, opt.Full, opt.Filter)
} else {
buf.Myprintf("show %s%stables", opt.Extended, opt.Full)
}
}
return
}
if node.Scope == "" {
buf.Myprintf("show %s", node.Type)
} else {
buf.Myprintf("show %s %s", node.Scope, node.Type)
}
if node.HasOnTable() {
buf.Myprintf(" on %v", node.OnTable)
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L1721-L1725
|
func (v *SetBlackboxPatternsParams) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoDebugger17(&r, v)
return r.Error()
}
|
https://github.com/kpango/glg/blob/68d2670cb2dbff047331daad841149a82ac37796/glg.go#L793-L795
|
func Success(val ...interface{}) error {
return glg.out(OK, blankFormat(len(val)), val...)
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2v3/store.go#L582-L586
|
func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
normalForm := path.Clean(path.Join("/", nodePath))
n := strings.Count(normalForm, "/") + depth
return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/layertree/easyjson.go#L1761-L1765
|
func (v EventLayerTreeDidChange) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoLayertree15(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/taskcluster/taskcluster-client-go/blob/ef6acd428ae5844a933792ed6479d0e7dca61ef8/tcgithub/tcgithub.go#L126-L146
|
func (github *Github) Builds(continuationToken, limit, organization, repository, sha string) (*BuildsResponse, error) {
v := url.Values{}
if continuationToken != "" {
v.Add("continuationToken", continuationToken)
}
if limit != "" {
v.Add("limit", limit)
}
if organization != "" {
v.Add("organization", organization)
}
if repository != "" {
v.Add("repository", repository)
}
if sha != "" {
v.Add("sha", sha)
}
cd := tcclient.Client(*github)
responseObject, _, err := (&cd).APICall(nil, "GET", "/builds", new(BuildsResponse), v)
return responseObject.(*BuildsResponse), err
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domsnapshot/easyjson.go#L1283-L1287
|
func (v *NameValue) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomsnapshot5(&r, v)
return r.Error()
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/overlay/easyjson.go#L1673-L1677
|
func (v *GetHighlightObjectForTestParams) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoOverlay16(&r, v)
return r.Error()
}
|
https://github.com/golang/debug/blob/19561fee47cf8cd0400d1b094c5898002f97cf90/internal/gocore/region.go#L98-L103
|
func (r region) Uint8() uint8 {
if r.typ.Kind != KindUint || r.typ.Size != 1 {
panic("bad uint8 type " + r.typ.Name)
}
return r.p.proc.ReadUint8(r.a)
}
|
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/default_context.go#L112-L157
|
func (d *DefaultContext) Render(status int, rr render.Renderer) error {
start := time.Now()
defer func() {
d.LogField("render", time.Since(start))
}()
if rr != nil {
data := d.Data()
pp := map[string]string{}
for k, v := range d.params {
pp[k] = v[0]
}
data["params"] = pp
data["flash"] = d.Flash().data
data["session"] = d.Session()
data["request"] = d.Request()
data["status"] = status
bb := &bytes.Buffer{}
err := rr.Render(bb, data)
if err != nil {
if er, ok := errors.Cause(err).(render.ErrRedirect); ok {
return d.Redirect(er.Status, er.URL)
}
return HTTPError{Status: 500, Cause: err}
}
if d.Session() != nil {
d.Flash().Clear()
d.Flash().persist(d.Session())
}
d.Response().Header().Set("Content-Type", rr.ContentType())
if p, ok := data["pagination"].(paginable); ok {
d.Response().Header().Set("X-Pagination", p.Paginate())
}
d.Response().WriteHeader(status)
_, err = io.Copy(d.Response(), bb)
if err != nil {
return HTTPError{Status: 500, Cause: err}
}
return nil
}
d.Response().WriteHeader(status)
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/easyjson.go#L5216-L5220
|
func (v *GetComputedStyleForNodeReturns) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoCss45(&r, v)
return r.Error()
}
|
https://github.com/profitbricks/profitbricks-sdk-go/blob/1d2db5f00bf5dd0b6c29273541c71c60cdf4d4d4/server.go#L107-L117
|
func (c *Client) AttachCdrom(dcid string, srvid string, cdid string) (*Image, error) {
data := struct {
ID string `json:"id,omitempty"`
}{
cdid,
}
url := serverCdromColPath(dcid, srvid) + `?depth=` + c.client.depth + `&pretty=` + strconv.FormatBool(c.client.pretty)
ret := &Image{}
err := c.client.Post(url, data, ret, http.StatusAccepted)
return ret, err
}
|
https://github.com/janos/web/blob/0fb0203103deb84424510a8d5166ac00700f2b0e/client/http/http_client.go#L54-L58
|
func New(options *Options) *http.Client {
return &http.Client{
Transport: Transport(options),
}
}
|
https://github.com/xwb1989/sqlparser/blob/120387863bf27d04bc07db8015110a6e96d0146c/ast.go#L680-L722
|
func (node *DDL) Format(buf *TrackedBuffer) {
switch node.Action {
case CreateStr:
if node.TableSpec == nil {
buf.Myprintf("%s table %v", node.Action, node.NewName)
} else {
buf.Myprintf("%s table %v %v", node.Action, node.NewName, node.TableSpec)
}
case DropStr:
exists := ""
if node.IfExists {
exists = " if exists"
}
buf.Myprintf("%s table%s %v", node.Action, exists, node.Table)
case RenameStr:
buf.Myprintf("%s table %v to %v", node.Action, node.Table, node.NewName)
case AlterStr:
if node.PartitionSpec != nil {
buf.Myprintf("%s table %v %v", node.Action, node.Table, node.PartitionSpec)
} else {
buf.Myprintf("%s table %v", node.Action, node.Table)
}
case CreateVindexStr:
buf.Myprintf("%s %v %v", node.Action, node.VindexSpec.Name, node.VindexSpec)
case AddColVindexStr:
buf.Myprintf("alter table %v %s %v (", node.Table, node.Action, node.VindexSpec.Name)
for i, col := range node.VindexCols {
if i != 0 {
buf.Myprintf(", %v", col)
} else {
buf.Myprintf("%v", col)
}
}
buf.Myprintf(")")
if node.VindexSpec.Type.String() != "" {
buf.Myprintf(" %v", node.VindexSpec)
}
case DropColVindexStr:
buf.Myprintf("alter table %v %s %v", node.Table, node.Action, node.VindexSpec.Name)
default:
buf.Myprintf("%s table %v", node.Action, node.Table)
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domstorage/easyjson.go#L81-L85
|
func (v StorageID) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoDomstorage(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/aphistic/gomol/blob/1546845ba714699f76f484ad3af64cf0503064d1/log_adapter.go#L200-L202
|
func (la *LogAdapter) Warningm(m *Attrs, msg string, a ...interface{}) error {
return la.Log(LevelWarning, m, msg, a...)
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L401-L418
|
func (ap Approvers) NoIssueApprovers() map[string]Approval {
nia := map[string]Approval{}
reverseMap := ap.owners.GetReverseMap(ap.owners.GetApprovers())
for login, approver := range ap.approvers {
if !approver.NoIssue {
continue
}
if len(reverseMap[login]) == 0 {
continue
}
nia[login] = approver
}
return nia
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/http/request.go#L64-L85
|
func ReadRequest(call tchannel.ArgReadable) (*http.Request, error) {
var arg2 []byte
if err := tchannel.NewArgReader(call.Arg2Reader()).Read(&arg2); err != nil {
return nil, err
}
rb := typed.NewReadBuffer(arg2)
method := rb.ReadLen8String()
url := readVarintString(rb)
r, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
readHeaders(rb, r.Header)
if err := rb.Err(); err != nil {
return nil, err
}
r.Body, err = call.Arg3Reader()
return r, err
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/gen/praxisgen/api_analyzer.go#L116-L121
|
func (reg *TypeRegistry) FinalizeTypeNames(d *gen.APIDescriptor) {
for n, named := range reg.NamedTypes {
reg.InlineTypes[n] = append(reg.InlineTypes[n], named)
}
d.FinalizeTypeNames(reg.InlineTypes)
}
|
https://github.com/bsm/sarama-cluster/blob/d5779253526cc8a3129a0e5d7cc429f4b4473ab4/consumer.go#L748-L790
|
func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
offsets := make(map[string]map[int32]offsetInfo, len(subs))
req := &sarama.OffsetFetchRequest{
Version: 1,
ConsumerGroup: c.groupID,
}
for topic, partitions := range subs {
offsets[topic] = make(map[int32]offsetInfo, len(partitions))
for _, partition := range partitions {
offsets[topic][partition] = offsetInfo{Offset: -1}
req.AddPartition(topic, partition)
}
}
broker, err := c.client.Coordinator(c.groupID)
if err != nil {
c.closeCoordinator(broker, err)
return nil, err
}
resp, err := broker.FetchOffset(req)
if err != nil {
c.closeCoordinator(broker, err)
return nil, err
}
for topic, partitions := range subs {
for _, partition := range partitions {
block := resp.GetBlock(topic, partition)
if block == nil {
return nil, sarama.ErrIncompleteResponse
}
if block.Err == sarama.ErrNoError {
offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
} else {
return nil, block.Err
}
}
}
return offsets, nil
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/pkg/compression/compression.go#L50-L73
|
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
buffer := [8]byte{}
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
return nil, nil, err
}
var decompressor DecompressorFunc
for name, algo := range compressionAlgos {
if bytes.HasPrefix(buffer[:n], algo.prefix) {
logrus.Debugf("Detected compression format %s", name)
decompressor = algo.decompressor
break
}
}
if decompressor == nil {
logrus.Debugf("No compression detected")
}
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/token-counter/influx.go#L45-L59
|
func (config *InfluxConfig) CreateDatabaseClient() (*InfluxDB, error) {
client, err := influxdb.NewHTTPClient(influxdb.HTTPConfig{
Addr: config.Host,
Username: config.User,
Password: config.Password,
})
if err != nil {
return nil, err
}
return &InfluxDB{
client: client,
database: config.DB,
}, nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L5161-L5168
|
func (r *MonitoringMetric) Locator(api *API) *MonitoringMetricLocator {
for _, l := range r.Links {
if l["rel"] == "self" {
return api.MonitoringMetricLocator(l["href"])
}
}
return nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/tide.go#L277-L374
|
func (c *Controller) Sync() error {
start := time.Now()
defer func() {
duration := time.Since(start)
c.logger.WithField("duration", duration.String()).Info("Synced")
tideMetrics.syncDuration.Set(duration.Seconds())
}()
defer c.changedFiles.prune()
c.logger.Debug("Building tide pool.")
prs := make(map[string]PullRequest)
for _, query := range c.config().Tide.Queries {
q := query.Query()
results, err := search(c.ghc.Query, c.logger, q, time.Time{}, time.Now())
if err != nil && len(results) == 0 {
return fmt.Errorf("query %q, err: %v", q, err)
}
if err != nil {
c.logger.WithError(err).WithField("query", q).Warning("found partial results")
}
for _, pr := range results {
prs[prKey(&pr)] = pr
}
}
c.logger.WithField(
"duration", time.Since(start).String(),
).Debugf("Found %d (unfiltered) pool PRs.", len(prs))
var pjs []prowapi.ProwJob
var blocks blockers.Blockers
var err error
if len(prs) > 0 {
start := time.Now()
pjList, err := c.prowJobClient.List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
c.logger.WithField("duration", time.Since(start).String()).Debug("Failed to list ProwJobs from the cluster.")
return err
}
c.logger.WithField("duration", time.Since(start).String()).Debug("Listed ProwJobs from the cluster.")
pjs = pjList.Items
if label := c.config().Tide.BlockerLabel; label != "" {
c.logger.Debugf("Searching for blocking issues (label %q).", label)
orgExcepts, repos := c.config().Tide.Queries.OrgExceptionsAndRepos()
orgs := make([]string, 0, len(orgExcepts))
for org := range orgExcepts {
orgs = append(orgs, org)
}
orgRepoQuery := orgRepoQueryString(orgs, repos.UnsortedList(), orgExcepts)
blocks, err = blockers.FindAll(c.ghc, c.logger, label, orgRepoQuery)
if err != nil {
return err
}
}
}
// Partition PRs into subpools and filter out non-pool PRs.
rawPools, err := c.dividePool(prs, pjs)
if err != nil {
return err
}
filteredPools := c.filterSubpools(c.config().Tide.MaxGoroutines, rawPools)
// Notify statusController about the new pool.
c.sc.Lock()
c.sc.poolPRs = poolPRMap(filteredPools)
select {
case c.sc.newPoolPending <- true:
default:
}
c.sc.Unlock()
// Sync subpools in parallel.
poolChan := make(chan Pool, len(filteredPools))
subpoolsInParallel(
c.config().Tide.MaxGoroutines,
filteredPools,
func(sp *subpool) {
pool, err := c.syncSubpool(*sp, blocks.GetApplicable(sp.org, sp.repo, sp.branch))
if err != nil {
sp.log.WithError(err).Errorf("Error syncing subpool.")
}
poolChan <- pool
},
)
close(poolChan)
pools := make([]Pool, 0, len(poolChan))
for pool := range poolChan {
pools = append(pools, pool)
}
sortPools(pools)
c.m.Lock()
c.pools = pools
c.m.Unlock()
c.History.Flush()
return nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gerrit/adapter/adapter.go#L151-L173
|
func (c *Controller) Sync() error {
syncTime := c.lastUpdate
for instance, changes := range c.gc.QueryChanges(c.lastUpdate, c.config().Gerrit.RateLimit) {
for _, change := range changes {
if err := c.ProcessChange(instance, change); err != nil {
logrus.WithError(err).Errorf("Failed process change %v", change.CurrentRevision)
}
if syncTime.Before(change.Updated.Time) {
syncTime = change.Updated.Time
}
}
logrus.Infof("Processed %d changes for instance %s", len(changes), instance)
}
c.lastUpdate = syncTime
if err := c.SaveLastSync(syncTime); err != nil {
logrus.WithError(err).Errorf("last sync %v, cannot save to path %v", syncTime, c.lastSyncFallback)
}
return nil
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/auth/cmds/cmds.go#L294-L337
|
func GetCmd(noMetrics, noPortForwarding *bool) *cobra.Command {
get := &cobra.Command{
Use: "{{alias}} [<username>] <repo>",
Short: "Get the ACL for 'repo' or the access that 'username' has to 'repo'",
Long: "Get the ACL for 'repo' or the access that 'username' has to " +
"'repo'. For example, 'pachctl auth get github-alice private-data' " +
"prints \"reader\", \"writer\", \"owner\", or \"none\", depending on " +
"the privileges that \"github-alice\" has in \"repo\". Currently all " +
"Pachyderm authentication uses GitHub OAuth, so 'username' must be a " +
"GitHub username",
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) error {
c, err := client.NewOnUserMachine(!*noMetrics, !*noPortForwarding, "user")
if err != nil {
return fmt.Errorf("could not connect: %v", err)
}
defer c.Close()
if len(args) == 1 {
// Get ACL for a repo
repo := args[0]
resp, err := c.GetACL(c.Ctx(), &auth.GetACLRequest{
Repo: repo,
})
if err != nil {
return grpcutil.ScrubGRPC(err)
}
t := template.Must(template.New("ACLEntries").Parse(
"{{range .}}{{.Username }}: {{.Scope}}\n{{end}}"))
return t.Execute(os.Stdout, resp.Entries)
}
// Get User's scope on an acl
username, repo := args[0], args[1]
resp, err := c.GetScope(c.Ctx(), &auth.GetScopeRequest{
Repos: []string{repo},
Username: username,
})
if err != nil {
return grpcutil.ScrubGRPC(err)
}
fmt.Println(resp.Scopes[0].String())
return nil
}),
}
return cmdutil.CreateAlias(get, "auth get")
}
|
https://github.com/bazelbuild/bazel-gazelle/blob/e3805aaca69a9deb949b47bfc45b9b1870712f4f/language/go/fileinfo.go#L339-L395
|
func saveCgo(info *fileInfo, rel string, cg *ast.CommentGroup) error {
text := cg.Text()
for _, line := range strings.Split(text, "\n") {
orig := line
// Line is
// #cgo [GOOS/GOARCH...] LDFLAGS: stuff
//
line = strings.TrimSpace(line)
if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
continue
}
// Split at colon.
line = strings.TrimSpace(line[4:])
i := strings.Index(line, ":")
if i < 0 {
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
}
line, optstr := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
// Parse tags and verb.
f := strings.Fields(line)
if len(f) < 1 {
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
}
verb := f[len(f)-1]
tags := parseTagsInGroups(f[:len(f)-1])
// Parse options.
opts, err := splitQuoted(optstr)
if err != nil {
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
}
var ok bool
for i, opt := range opts {
if opt, ok = expandSrcDir(opt, rel); !ok {
return fmt.Errorf("%s: malformed #cgo argument: %s", info.path, orig)
}
opts[i] = opt
}
joinedStr := strings.Join(opts, optSeparator)
// Add tags to appropriate list.
switch verb {
case "CFLAGS", "CPPFLAGS", "CXXFLAGS":
info.copts = append(info.copts, taggedOpts{tags, joinedStr})
case "LDFLAGS":
info.clinkopts = append(info.clinkopts, taggedOpts{tags, joinedStr})
case "pkg-config":
return fmt.Errorf("%s: pkg-config not supported: %s", info.path, orig)
default:
return fmt.Errorf("%s: invalid #cgo verb: %s", info.path, orig)
}
}
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L2426-L2430
|
func (v NavigateToHistoryEntryParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage25(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/crds/resource_crd.go#L97-L102
|
func (in *ResourceObject) DeepCopyObject() runtime.Object {
if c := in.deepCopy(); c != nil {
return c
}
return nil
}
|
https://github.com/mrd0ll4r/tbotapi/blob/edc257282178bb5cebbfcc41260ec04c1ec7ac19/ctors.go#L255-L263
|
func (api *TelegramBotAPI) NewOutgoingChatAction(recipient Recipient, action ChatAction) *OutgoingChatAction {
return &OutgoingChatAction{
outgoingBase: outgoingBase{
api: api,
Recipient: recipient,
},
Action: action,
}
}
|
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/reflect.go#L81-L97
|
func derefPointersZero(rv reflect.Value) (drv reflect.Value, isPtr bool, isNilPtr bool) {
for rv.Kind() == reflect.Ptr {
isPtr = true
if rv.IsNil() {
isNilPtr = true
rt := rv.Type().Elem()
for rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
drv = reflect.New(rt).Elem()
return
}
rv = rv.Elem()
}
drv = rv
return
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/client/clientset/versioned/typed/prowjobs/v1/fake/fake_prowjob.go#L132-L140
|
func (c *FakeProwJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *prowjobsv1.ProwJob, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(prowjobsResource, c.ns, name, data, subresources...), &prowjobsv1.ProwJob{})
if obj == nil {
return nil, err
}
return obj.(*prowjobsv1.ProwJob), err
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L1175-L1183
|
func (u LedgerEntryData) MustData() DataEntry {
val, ok := u.GetData()
if !ok {
panic("arm Data is not set")
}
return val
}
|
https://github.com/t3rm1n4l/nitro/blob/937fe99f63a01a8bea7661c49e2f3f8af6541d7c/skiplist/builder.go#L36-L55
|
func (s *Segment) Add(itm unsafe.Pointer) {
itemLevel := s.builder.store.NewLevel(s.rand.Float32)
x := s.builder.store.newNode(itm, itemLevel)
s.sts.AddInt64(&s.sts.nodeAllocs, 1)
s.sts.AddInt64(&s.sts.levelNodesCount[itemLevel], 1)
s.sts.AddInt64(&s.sts.usedBytes, int64(s.builder.store.Size(x)))
for l := 0; l <= itemLevel; l++ {
if s.tail[l] != nil {
s.tail[l].setNext(l, x, false)
} else {
s.head[l] = x
}
s.tail[l] = x
}
if s.callb != nil {
s.callb(x)
}
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/ranch/ranch.go#L331-L366
|
func (r *Ranch) Metric(rtype string) (common.Metric, error) {
metric := common.Metric{
Type: rtype,
Current: map[string]int{},
Owners: map[string]int{},
}
resources, err := r.Storage.GetResources()
if err != nil {
logrus.WithError(err).Error("cannot find resources")
return metric, &ResourceNotFound{rtype}
}
for _, res := range resources {
if res.Type != rtype {
continue
}
if _, ok := metric.Current[res.State]; !ok {
metric.Current[res.State] = 0
}
if _, ok := metric.Owners[res.Owner]; !ok {
metric.Owners[res.Owner] = 0
}
metric.Current[res.State]++
metric.Owners[res.Owner]++
}
if len(metric.Current) == 0 && len(metric.Owners) == 0 {
return metric, &ResourceNotFound{rtype}
}
return metric, nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/utils.go#L30-L51
|
func archive(baseDir, etcdLogPath, dataDir string) error {
dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339))
if existDir(dir) {
dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339))
}
if err := fileutil.TouchDirAll(dir); err != nil {
return err
}
if err := os.Rename(etcdLogPath, filepath.Join(dir, "etcd.log")); err != nil {
if !os.IsNotExist(err) {
return err
}
}
if err := os.Rename(dataDir, filepath.Join(dir, filepath.Base(dataDir))); err != nil {
if !os.IsNotExist(err) {
return err
}
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.