query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
listlengths
19
19
metadata
dict
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile
func detectGoPackageForProject(projectFile string) (string, error) { var goPkg string projectDir := filepath.Dir(projectFile) if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error { // already set if goPkg != "" { return nil } if !strings.HasSuffix(protoFile, ".proto") { return nil } // search for go_package on protos in the same dir as the project.json if projectDir != filepath.Dir(protoFile) { return nil } content, err := ioutil.ReadFile(protoFile) if err != nil { return err } lines := strings.Split(string(content), "\n") for _, line := range lines { goPackage := goPackageStatementRegex.FindStringSubmatch(line) if len(goPackage) == 0 { continue } if len(goPackage) != 2 { return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage) } goPkg = goPackage[1] break } return nil }); err != nil { return "", err } if goPkg == "" { return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile) } return goPkg, nil }
[ "func (c *common) GetPackage() string { return c.file.GetPackage() }", "func (pkg *goPackage) firstGoFile() string {\n\tgoSrcs := []platformStringsBuilder{\n\t\tpkg.library.sources,\n\t\tpkg.binary.sources,\n\t\tpkg.test.sources,\n\t}\n\tfor _, sb := range goSrcs {\n\t\tif sb.strs != nil {\n\t\t\tfor s := range s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewQueueManager instantiates a new QueueManager object This constructor will assign default values to properties that have it defined, and makes sure properties required by API are set, but the set of arguments will change when the set of required properties is changed
func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager { this := QueueManager{} this.Name = name this.Clusters = clusters this.AliasQueues = aliasQueues this.RemoteQueues = remoteQueues this.ClusterQueues = clusterQueues return &this }
[ "func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}", "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set
func NewQueueManagerWithDefaults() *QueueManager { this := QueueManager{} return &this }
[ "func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}", "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetClusters returns the Clusters field value
func (o *QueueManager) GetClusters() []string { if o == nil { var ret []string return ret } return o.Clusters }
[ "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.Clus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClustersOk() (*[]string, bool) { if o == nil { return nil, false } return &o.Clusters, true }
[ "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func (o *VirtualizationVmwareVcenterAllOf) GetClusterCountOk() (*int64, bool) {\n\tif o == nil || o.ClusterCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterCount, true\n}", "func (o *ResourceLimits) GetK8sClustersProvisioned...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetClusters sets field value
func (o *QueueManager) SetClusters(v []string) { o.Clusters = v }
[ "func (s *RaftDatabase) SetClusters(clusters int) {\n\ts.clusters = clusters\n}", "func setSomeClusterValues(ch chan error, manager ConfigManager) error {\n\t// prepare expected cluster config\n\tconf := new(ClusterConfig)\n\tconf.ClusterId = \"myClusterID\"\n\tconf.Description = \"myDescription\"\n\n\tif err := ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetAliasQueues returns the AliasQueues field value
func (o *QueueManager) GetAliasQueues() []AliasQueue { if o == nil { var ret []AliasQueue return ret } return o.AliasQueues }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) { if o == nil { return nil, false } return &o.AliasQueues, true }
[ "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetAliasQueues sets field value
func (o *QueueManager) SetAliasQueues(v []AliasQueue) { o.AliasQueues = v }
[ "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetRemoteQueues returns the RemoteQueues field value
func (o *QueueManager) GetRemoteQueues() []RemoteQueue { if o == nil { var ret []RemoteQueue return ret } return o.RemoteQueues }
[ "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *RemoteQueue) GetRemoteQueue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueue\n}", "func (o *QueueManage...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) { if o == nil { return nil, false } return &o.RemoteQueues, true }
[ "func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}", "func (o *RemoteQueue) GetRemoteQueueManagerOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueueManager, true\n}", "func (o *Remo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetRemoteQueues sets field value
func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) { o.RemoteQueues = v }
[ "func (o *RemoteQueue) SetRemoteQueue(v string) {\n\to.RemoteQueue = v\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetClusterQueues returns the ClusterQueues field value
func (o *QueueManager) GetClusterQueues() []ClusterQueue { if o == nil { var ret []ClusterQueue return ret } return o.ClusterQueues }
[ "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (client *Clie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) { if o == nil { return nil, false } return &o.ClusterQueues, true }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (o *QueueManager) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a tiploc to the result so that it will be included in the tiploc map
func (bf *boardFilter) addTiploc(tiploc string) { if tiploc != "" { bf.tiplocs[tiploc] = nil } }
[ "func (r *LocationMap) Add(t *Location) {\n\tif _, ok := r.m[t.Tiploc]; !ok {\n\t\tr.m[t.Tiploc] = t\n\t}\n}", "func (bd *BlockDAG) updateTips(b *Block) {\n\tif bd.tips == nil {\n\t\tbd.tips = NewHashSet()\n\t\tbd.tips.AddPair(b.GetHash(), b)\n\t\treturn\n\t}\n\tfor k := range bd.tips.GetMap() {\n\t\tblock := bd....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process calling points so that we generate the appropriate via and include their tiplocs
func (bf *boardFilter) processCallingPoints(s ldb.Service) { if len(s.CallingPoints) > 0 { viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc) for _, cp := range s.CallingPoints { bf.addTiploc(cp.Tiploc) viaRequest.AppendTiploc(cp.Tiploc) } } }
[ "func TipCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(*Tip))(params[0].(*Tip))\n}", "func processCoords(gpspoints []GPSPoint) (points Points) {\n\tfor i := 0; i < len(gpspoints); i++ {\n\t\tpoints = append(points, Point{gpspoints[i].Lon, gpspoints[i].Lat, gpspoints[i].SignalDbm})\n\t}\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process any associations, pulling in their schedules
func (bf *boardFilter) processAssociations(s ldb.Service) { for _, assoc := range s.Associations { assoc.AddTiplocs(bf.tiplocs) //if assoc.IsJoin() || assoc.IsSplit() { ar := assoc.Main.RID ai := assoc.Main.LocInd if ar == s.RID { ar = assoc.Assoc.RID ai = assoc.Assoc.LocInd } // Resolve the schedule if a split, join or if NP only if previous service & we are not yet running //if ar != s.RID { if assoc.Category != "NP" || (s.LastReport.Tiploc == "" && assoc.Assoc.RID == s.RID) { as := bf.d.ldb.GetSchedule(ar) if as != nil { assoc.Schedule = as as.AddTiplocs(bf.tiplocs) as.LastReport = as.GetLastReport() bf.processToc(as.Toc) if ai < (len(as.Locations) - 1) { if as.Origin != nil { bf.addTiploc(as.Destination.Tiploc) } destination := as.Locations[len(as.Locations)-1].Tiploc if as.Destination != nil { destination = as.Destination.Tiploc } viaRequest := bf.addVia(ar, destination) for _, l := range as.Locations[ai:] { bf.addTiploc(l.Tiploc) viaRequest.AppendTiploc(l.Tiploc) } } bf.processReason(as.CancelReason, true) bf.processReason(as.LateReason, false) } } } }
[ "func (s *candidate) Schedule() (constructedSchedule, error) {\n\tsch := constructedSchedule{\n\t\tearliest: s.earliest,\n\t\teventsByAttendee: make(map[AttendeeID]*attendeeEvents),\n\t}\n\tfor _, event := range s.order {\n\t\tif err := sch.Add(s.reqs[event]); err != nil {\n\t\t\treturn sch, err\n\t\t}\n\t}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
acceptService returns true if the service is to be accepted, false if it's to be ignored
func (bf *boardFilter) acceptService(service ldb.Service) bool { // Original requirement, must have an RID if service.RID == "" { return false } // remove terminating services if bf.terminated && bf.atStation(service.Destination) { return false } if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) { return false } return true }
[ "func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}", "func (r *RPCAcceptor) Accept(req *ChannelAcceptRequest) bool {\n\treturn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rowToRecord converts from pgx.Row to a store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { if err == sql.ErrNoRows { return record, store.ErrNotFound } return nil, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } return record, nil }
[ "func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rowsToRecords converts from pgx.Rows to []store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) { var records []*store.Record for rows.Next() { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { return records, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } records = append(records, record) } return records, nil }
[ "func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
findConflict finds the index of the conflict. It returns the first pair of conflicting entries between the existing entries and the given entries, if there are any. If there is no conflicting entries, and the existing entries contains all the given entries, zero will be returned. If there is no conflicting entries, but the given entries contains new entries, the index of the first new entry will be returned. An entry is considered to be conflicting if it has the same index but a different term. The first entry MUST have an index equal to the argument 'from'. The index of the given entries MUST be continuously increasing.
func (l *LogStore) findConflict(entries []*pb.Entry) uint64 { // TODO: 会有第0个冲突么? for _, ne := range entries { if !l.matchTerm(ne.Index, ne.Term) { if ne.Index <= l.lastIndex() { l.logger.Info("log found conflict", zap.Uint64("conflictIndex", ne.Index), zap.Uint64("conflictTerm", ne.Term), zap.Uint64("existTerm", l.termOrPanic(l.term(ne.Index)))) } return ne.Index } } return 0 }
[ "func (l *raftLog) findConflict(from uint64, ents []pb.Entry) uint64 {\n\t// TODO(xiangli): validate the index of ents\n\tfor i, ne := range ents {\n\t\tif oe := l.at(from + uint64(i)); oe == nil || oe.Term != ne.Term {\n\t\t\treturn from + uint64(i)\n\t\t}\n\t}\n\treturn 0\n}", "func FindConflictsByUser(entries ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured.
func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error { var errs []error for _, u := range uri { var err error u := u u.Fragment = "" // reset fragment, we don't want it messing our visited list if source != nil { u = source.ResolveReference(u) } if u.Scheme != "http" && u.Scheme != "https" { err = ErrUnsupportedScheme } else if err == nil && c.filter != nil && !c.filter(u) { err = ErrFilteredOut } us := u.String() // For the already-visited test we need to clean up each URL a bit vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash if err == nil { c.toVisitMu.RLock() if _, ok := c.toVisit[vkey]; ok { err = ErrAlreadyInList } c.toVisitMu.RUnlock() } if err == nil { c.logger.Debugf("Add(%v %v): OK", source, us) atomic.AddUint64(&c.numQueued, 1) } else if err != nil { //c.logger.Warnf("Add(%v %v): %v", source, us, err) atomic.AddUint64(&c.numEncountered, 1) errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u)) continue } c.toVisitMu.Lock() c.toVisit[vkey] = struct{}{} c.toVisitMu.Unlock() { uu := *u uu.Scheme = "" if source != nil && source.Host == uu.Host { uu.Host = "" } if source == nil { c.mapper.Add("<root>", uu.String()) } else { c.mapper.Add(source.String(), uu.String()) } } v := visit{ source: source, target: u, } select { case c.visitChan <- v: case <-c.ctx.Done(): return append(errs, c.ctx.Err()) } } return errs }
[ "func add(url string, verbose bool, scrapeURLs *scrapeURL) {\n\tscrapeURLs.AddedURLsCount++\n\tscrapeURLs.AddedURLs = append(scrapeURLs.AddedURLs, url)\n\tif verbose {\n\t\tlog.Println(\"Added: \" + url)\n\t}\n}", "func (s *Sources) Add(src string) error {\n\tif src == \"\" {\n\t\treturn errors.New(\"src is an em...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance.
func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) { var result struct { Site struct { ProductVersion string } } ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result) if err != nil || !ok { return "", err } return result.Site.ProductVersion, err }
[ "func GetVersion() string {\n\treturn version\n}", "func (_ EntityAliases) SensuAgentVersion(p graphql.ResolveParams) (string, error) {\n\tval, err := graphql.DefaultResolver(p.Source, p.Info.FieldName)\n\tret, ok := val.(string)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif !ok {\n\t\treturn ret, errors.New...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DetermineFeatureFlags fetches the version of the configured Sourcegraph instance and then sets flags on the Service itself to use features available in that version, e.g. gzip compression.
func (svc *Service) DetermineFeatureFlags(ctx context.Context) error { version, err := svc.getSourcegraphVersion(ctx) if err != nil { return errors.Wrap(err, "failed to query Sourcegraph version to check for available features") } return svc.features.setFromVersion(version) }
[ "func InitFeatureFlags(flag *pflag.FlagSet) {\n\tflag.Bool(FeatureFlagAccessCode, false, \"Flag (bool) to enable requires-access-code\")\n\tflag.Bool(FeatureFlagRoleBasedAuth, false, \"Flag (bool) to enable role-based-auth\")\n\tflag.Bool(FeatureFlagConvertPPMsToGHC, false, \"Flag (bool) to enable convert-ppms-to-g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat a zerolength section as the end of the input CAR file. For example, this can be useful to allow "null padding" after a CARv1 without knowing where the padding begins.
func ZeroLengthSectionAsEOF(enable bool) Option { return func(o *Options) { o.ZeroLengthSectionAsEOF = enable } }
[ "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func (h Header) LastFourZero() bool {\n\tfor i := 1; i < le...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize.
func UseDataPadding(p uint64) Option { return func(o *Options) { o.DataPadding = p } }
[ "func PaddingData(method string, params ...string) string {\n\tvar res string\n\tif !strings.HasPrefix(method, HexPrefix) {\n\t\tres = HexPrefix + method\n\t}\n\tfor _, item := range params {\n\t\tif strings.HasPrefix(item, HexPrefix) {\n\t\t\titem = item[2:]\n\t\t}\n\t\tpaddingString := paddingstr[:64-len(item)]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UseIndexPadding sets the padding between data payload and its index on Finalize.
func UseIndexPadding(p uint64) Option { return func(o *Options) { o.IndexPadding = p } }
[ "func (s *BasePlSqlParserListener) ExitUsing_index_clause(ctx *Using_index_clauseContext) {}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func IndexFixer(ind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UseIndexCodec sets the codec used for index generation.
func UseIndexCodec(c multicodec.Code) Option { return func(o *Options) { o.IndexCodec = c } }
[ "func encodeIndex(d *Index) *internal.Index {\n\treturn &internal.Index{\n\t\tName: d.name,\n\t\tMeta: &internal.IndexMeta{\n\t\t\tColumnLabel: d.columnLabel,\n\t\t\tTimeQuantum: string(d.timeQuantum),\n\t\t},\n\t\tMaxSlice: d.MaxSlice(),\n\t\tFrames: encodeFrames(d.Frames()),\n\t}\n}", "func UseIndex(designDoc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithoutIndex flags that no index should be included in generation.
func WithoutIndex() Option { return func(o *Options) { o.IndexCodec = index.CarIndexNone } }
[ "func IndexOptionsNone() IndexOptions {\n\tresult := IndexOptions{}\n\n\treturn result\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func (_m *DirectRepositoryWriter) DisableIndexRefresh() {\n\t_m.Called()\n}", "func (r *Search) AllowNoIndices(allownoindice...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StoreIdentityCIDs sets whether to persist sections that are referenced by CIDs with multihash.IDENTITY digest. When writing CAR files with this option, Characteristics.IsFullyIndexed will be set. By default, the blockstore interface will always return true for Has() called with identity CIDs, but when this option is turned on, it will defer to the index. When creating an index (or loading a CARv1 as a blockstore), when this option is on, identity CIDs will be included in the index. This option is disabled by default.
func StoreIdentityCIDs(b bool) Option { return func(o *Options) { o.StoreIdentityCIDs = b } }
[ "func (cosi *cosiAggregate) StoreIdentities(idents map[string]proto.Message) {\n\tfor k, v := range idents {\n\t\tpoint := suite.G2().Point()\n\t\terr := point.UnmarshalBinary(v.(*BdnIdentity).PublicKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcosi.skipchain.identities[k] = point\n\t}\n}", "func Stor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. Indexing a CID with larger than the allowed size results in ErrCidTooLarge error.
func MaxIndexCidSize(s uint64) Option { return func(o *Options) { o.MaxIndexCidSize = s } }
[ "func (r *CustomerResourceCollection) MaximumSize() int {\n\treturn len(r.ids)\n}", "func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {\n\tstat, err := os.Stat(p.indexFilenameByMessageId(fileId))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tentriesInIndex := uint64(stat.Si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithTraversalPrototypeChooser specifies the prototype chooser that should be used when performing traversals in writes from a linksystem.
func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option { return func(o *Options) { o.TraversalPrototypeChooser = t } }
[ "func NewSocketsTraversalExtension() *SocketsTraversalExtension {\n\treturn &SocketsTraversalExtension{\n\t\tSocketsToken: traversalSocketsToken,\n\t}\n}", "func WithSortingByPathAscAndRevisionDesc() GetImplementationOption {\n\treturn func(options *ListImplementationRevisionsOptions) {\n\t\toptions.sortByPathAsc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithTrustedCAR specifies whether CIDs match the block data as they are read from the CAR files.
func WithTrustedCAR(t bool) Option { return func(o *Options) { o.TrustedCAR = t } }
[ "func isSpecTrustedCASet(proxyConfig *configv1.ProxySpec) bool {\n\treturn len(proxyConfig.TrustedCA.Name) > 0\n}", "func WithTrusted(trusted bool) Option {\n\treturn func(linter *Linter) {\n\t\tlinter.trusted = trusted\n\t}\n}", "func WithTrustedCertificatesFile(path string) Option {\n\treturn func(c *Client) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring.
func MaxAllowedHeaderSize(max uint64) Option { return func(o *Options) { o.MaxAllowedHeaderSize = max } }
[ "func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (e *endpoint) M...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxAllowedSectionSize overrides the default maximum size (of 8 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring. Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless atypical data is expected, this should not be a large value.
func MaxAllowedSectionSize(max uint64) Option { return func(o *Options) { o.MaxAllowedSectionSize = max } }
[ "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (s *fseEncoder) m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WriteAsCarV1 is a write option which makes a CAR interface (blockstore or storage) write the output as a CARv1 only, with no CARv2 header or index. Indexing is used internally during write but is discarded upon finalization. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func WriteAsCarV1(asCarV1 bool) Option { return func(o *Options) { o.WriteAsCarV1 = asCarV1 } }
[ "func WriteV1Header(h *tar.Header, w io.Writer) {\n\tfor _, elem := range v1TarHeaderSelect(h) {\n\t\tw.Write([]byte(elem[0] + elem[1]))\n\t}\n}", "func (r *RAMOutputStream) WriteToV1(bytes []byte) error {\n\terr := r.flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tend := int(r.file.length)\n\tpos, buffer, b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AllowDuplicatePuts is a write option which makes a CAR interface (blockstore or storage) not deduplicate blocks in Put and PutMany. The default is to deduplicate, which matches the current semantics of goipfsblockstore v1. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func AllowDuplicatePuts(allow bool) Option { return func(o *Options) { o.BlockstoreAllowDuplicatePuts = allow } }
[ "func AllowDuplicatePuts(allow bool) carv2.Option {\n\treturn func(o *carv2.Options) {\n\t\to.BlockstoreAllowDuplicatePuts = allow\n\t}\n}", "func DisallowDuplicateKey() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.disallowDuplicateKey = true\n\t\treturn nil\n\t}\n}", "func ChangeAllowDuplication(m ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 { return time.Duration(d).Minutes() }
[ "func (period Period) Minutes() int {\n\treturn int(period.MinutesFloat())\n}", "func (f *Formatter) Minutes() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d minutes\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Minutes()))\n}", "func GetDurat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return time.Duration(d).Nanoseconds() }
[ "func nanoseconds(t uint64) time.Duration {\n\treturn time.Duration(t) * time.Nanosecond\n}", "func (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}", "func (ft *filetime) Nanoseconds() int64 {\n\t// 100-nanosecond intervals since January 1, 1601\n\tnsec := int64(ft.HighDateTime)<<32 + int64...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 { return time.Duration(d).Seconds() }
[ "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (period Period) Seconds() int {\n\treturn int(period.SecondsFloat())\n}", "func (s *Sam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string representation of the approximate duration in russian language
func (d Duration) StringApproximate() (result string) { var seconds, minutes, hours, days, months, years int seconds = int(d.Seconds()) if seconds > 60 { minutes = int(d.Minutes()) } if minutes > 59 { hours = int(d.Hours()) minutes = minutes - hours*60 } if hours > 24 { days = (hours - hours%24) / 24 hours = hours - days*24 } if days > 365 { years = (days - days%365) / 365 days = days - years*365 } if days > 30 { months = (days - days%30) / 30 days = days - months*30 } if years > 0 { if months < 3 { result = numberInString(years, false) + " " + yearsTail(years) } else { result = "Более" if years > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(years, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(years, false)) + " " + strings.ToLower(yearsTailInGenitiveCase(years)) } } else if months > 0 { if days < 8 { result = numberInString(months, false) + " " + monthsTail(months) } else { result = "Более" if months > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(months, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(months, false)) + " " + strings.ToLower(monthsTailInGenitiveCase(months)) } } else if days > 0 { if hours < 5 { result = numberInString(days, false) + " " + daysTail(days) } else { result = "Более " if days == 1 { result += "суток" } else { result += strings.ToLower(numberStringInGenitiveCase(days, false)) + " суток" } } } else if hours > 0 { if minutes < 16 { result = numberInString(hours, false) + " " + hoursTail(hours) } else { result = "Более " if hours == 1 { result += "часа" } else { result += strings.ToLower(numberStringInGenitiveCase(hours, false)) result += " " + strings.ToLower(hoursTailInGenitiveCase(hours)) } } } else if minutes > 0 { if minutes == 1 { result = "Минуту" } else { result = numberInString(minutes, true) + " " + minutesTail(minutes) } } else { result = "Менее минуты" } result += " назад" return }
[ "func (d *Duration) String() string { return (*time.Duration)(d).String() }", "func (t Throughput) StringDuration() string {\n\treturn fmt.Sprintf(\"Duration: %v, starting %v\", time.Duration(t.MeasureDurationMillis)*time.Millisecond, t.StartTime.Format(\"15:04:05 MST\"))\n}", "func (d Duration) String() string...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetIssueLabels gets the current labels on the specified PR or issue
func (fc *fakeClient) GetIssueLabels(owner, repo string, number int) ([]github.Label, error) { var la []github.Label for _, l := range fc.labels { la = append(la, github.Label{Name: l}) } return la, nil }
[ "func (c *client) GetIssueLabels(org, repo string, number int) ([]Label, error) {\n\tdurationLogger := c.log(\"GetIssueLabels\", org, repo, number)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/issues/%d/labels\", org, repo, number), org)\n}", "func (c *client) GetIssueLabels(owner,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateComment adds and tracks a comment in the client
func (fc *fakeClient) CreateComment(owner, repo string, number int, comment string) error { fc.commentsAdded[number] = append(fc.commentsAdded[number], comment) return nil }
[ "func CreateComment(w http.ResponseWriter, r *http.Request) {\n\tcomment := jsonToComment(w, r)\n\tcomment = CommentDAO.AddComment(comment)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(comment); err != nil {\n\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NumComments counts the number of tracked comments
func (fc *fakeClient) NumComments() int { n := 0 for _, comments := range fc.commentsAdded { n += len(comments) } return n }
[ "func NumberOfComments(c context.Context, id int64) (int, error) {\n\tcount, err := datastore.NewQuery(\"Post\").Filter(\"Parent =\", id).Count(c)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"NumberOfComments: could not collect posts: %v\", err)\n\t}\n\treturn count, nil\n}", "func (o IncidentAdditionalDataResp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewOutput instantiates a new output plugin instance publishing to elasticsearch.
func (f elasticsearchOutputPlugin) NewOutput( config *outputs.MothershipConfig, topologyExpire int, ) (outputs.Outputer, error) { // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } output := &elasticsearchOutput{} err := output.init(*config, topologyExpire) if err != nil { return nil, err } return output, nil }
[ "func NewOutput(qs url.Values, fragment string) (plogd.OutputWriter, error) {\n\tvar err error\n\tw := defaultWriter\n\tesurl, err := url.Parse(qs.Get(\"url\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdelete(qs, \"url\")\n\n\tif err := w.configure(qs); err != nil {\n\t\treturn nil, err\n\t}\n\n\tesurl.RawQu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New returns a new PagerDuty notifier.
func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) { client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...) if err != nil { return nil, err } n := &Notifier{conf: c, tmpl: t, logger: l, client: client} if c.ServiceKey != "" || c.ServiceKeyFile != "" { n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/trigger-events n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusForbidden}, CustomDetailsFunc: errDetails} } else { // Retrying can solve the issue on 429 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}, CustomDetailsFunc: errDetails} } return n, nil }
[ "func New(c *config.DingTalkConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, \"dingtalk\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func Ne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewEndpoint creates a new endpoint. To keep things simple, the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint { // Create a new Endpoint with an empty list of handler funcs. return &Endpoint{ handler: map[string]HandleFunc{}, } }
[ "func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}", "func NewEndpoint(resource, httpMethod, route string) *Endpoint {\n\treturn &Endpoint{\n\t\tResource:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) { e.mutex.Lock() e.handler[name] = f e.mutex.Unlock() }
[ "func (p *ServerParams) AddHandleFunc(handlerFunc GrpcHandler, grpcProxyHandler GrpcProxyHandler) {\n\tif handlerFunc != nil {\n\t\tp.handlersForGrpc = append(p.handlersForGrpc, handlerFunc)\n\t}\n\tif grpcProxyHandler != nil {\n\t\tp.handlersForGrpcProxy = append(p.handlersForGrpcProxy, grpcProxyHandler)\n\t}\n}",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listen starts listening on the endpoint port on all interfaces. At least one handler function must have been added through AddHandleFunc() before.
func (e *Endpoint) Listen() error { var err error e.listener, err = net.Listen("tcp", Port) if err != nil { return errors.Wrapf(err, "Unable to listen on port %s\n", Port) } log.Println("Listen on", e.listener.Addr().String()) for { log.Println("Accept a connection request.") conn, err := e.listener.Accept() if err != nil { log.Println("Failed accepting a connection request:", err) continue } log.Println("Handle incoming messages.") go e.handleMessages(conn) } }
[ "func (r *EndpointRegistry) Listen(listener Listener) {\n\tif !r.OnCloseAlways(func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: closing listener OnClose: %v\", err)\n\t\t}\n\t}) {\n\t\treturn\n\t}\n\n\t// Start listener and accept all incoming peer connections,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
handleMessages reads the connection up to the first newline. Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) { // Wrap the connection into a buffered reader for easier reading. rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) defer conn.Close() // Read from the connection until EOF. Expect a command name as the // next input. Call the handler that is registered for this command. for { log.Print("Receive command '") cmd, err := rw.ReadString('\n') switch { case err == io.EOF: log.Println("Reached EOF - close this connection.\n ---") return case err != nil: log.Println("\nError reading command. Got: '"+cmd+"'\n", err) return } // Trim the request string - ReadString does not strip any newlines. cmd = strings.Trim(cmd, "\n ") log.Println(cmd + "'") // Fetch the appropriate handler function from the 'handler' map and call it. e.mutex.RLock() handleCommand, ok := e.handler[cmd] e.mutex.RUnlock() if !ok { log.Println("Command '" + cmd + "' is not registered.") return } handleCommand(rw) } }
[ "func (cli *Client) HandleIncomingMessages(writeCh chan<- IncomingMessage) {\n\tfor {\n\t\tline, err := cli.r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[%d] Client error: %s\\n\", cli.id, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(line[:len(line)-1], \" \", 2)\n\t\tswitch part...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/ Now let's create two handler functions. The easiest case is where our adhoc protocol only sends string data. The second handler receives and processes a struct that was sent as GOB data. handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) { // Receive a string. log.Print("Receive STRING message:") s, err := rw.ReadString('\n') if err != nil { log.Println("Cannot read from connection.\n", err) } s = strings.Trim(s, "\n ") log.Println(s) _, err = rw.WriteString("Thank you.\n") if err != nil { log.Println("Cannot write to connection.\n", err) } err = rw.Flush() if err != nil { log.Println("Flush failed.", err) } }
[ "func (serv *Server) handleText(conn int, payload []byte) {\n\tvar (\n\t\tlogp = `handleText`\n\n\t\thandler RouteHandler\n\t\terr error\n\t\tctx context.Context\n\t\treq *Request\n\t\tres *Response\n\t\tok bool\n\t)\n\n\tres = _resPool.Get().(*Response)\n\tres.reset()\n\n\tctx, ok = serv.Clien...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
handleGob handles the "GOB" request. It decodes the received GOB data into a struct.
func handleGob(rw *bufio.ReadWriter) { log.Print("Receive GOB data:") var data complexData // Create a decoder that decodes directly into a struct variable. dec := gob.NewDecoder(rw) err := dec.Decode(&data) if err != nil { log.Println("Error decoding GOB data:", err) return } // Print the complexData struct and the nested one, too, to prove // that both travelled across the wire. log.Printf("Outer complexData struct: \n%#v\n", data) log.Printf("Inner complexData struct: \n%#v\n", data.C) }
[ "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func GobDecode(data []byte, obj interface{}) error {\n\treturn g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
server listens for incoming requests and dispatches them to registered handler functions.
func server() error { endpoint := NewEndpoint() // Add the handle funcs. endpoint.AddHandleFunc("STRING", handleStrings) endpoint.AddHandleFunc("GOB", handleGob) // Start listening. return endpoint.Listen() }
[ "func handlerServer(w http.ResponseWriter, r *http.Request) {\n\tsetHeader(w, r)\n\treadCookies(r)\n\tserver := r.URL.Query().Get(\"server\")\n\taction := r.URL.Query().Get(\"action\")\n\tswitch action {\n\tcase \"reloader\":\n\t\t_, _ = io.WriteString(w, getServerTubes(server))\n\t\treturn\n\tcase \"clearTubes\":\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/ Main Main starts either a client or a server, depending on whether the `connect` flag is set. Without the flag, the process starts as a server, listening for incoming requests. With the flag the process starts as a client and connects to the host specified by the flag value. Try "localhost" or "127.0.0.1" when running both processes on the same machine. main
func main() { connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.") flag.Parse() // If the connect flag is set, go into client mode. if *connect != "" { err := client(*connect) if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Client done.") return } // Else go into server mode. err := server() if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Server done.") }
[ "func main() {\n\tserver.New().Start()\n}", "func ServerMain() {\n\tRunServer(ParseCommandLine())\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fatal(\"Usage: ./server-go [server port]\")\n\t}\n\tserverPort := os.Args[1]\n\tserver(serverPort)\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Lshortfile flag includes file name and line number in log messages.
func init() { log.SetFlags(log.Lshortfile) }
[ "func (f *FileDetail) ShortenSourceFile(n int) string {\n\tx := len(f.SourceFile) - n - 1\n\tif x <= 0 {\n\t\treturn f.SourceFile\n\t}\n\n\tidx := strings.Index(f.SourceFile[x:], string(filepath.Separator))\n\tif idx >= 0 {\n\t\tx = x + idx\n\t}\n\treturn fmt.Sprintf(\"...%s\", f.SourceFile[x:])\n}", "func Status...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewHealthController creates a health controller.
func NewHealthController(service *goa.Service) *HealthController { return &HealthController{Controller: service.NewController("HealthController")} }
[ "func NewHealthController(router *mux.Router, r *render.Render) *HealthController {\n\tctrl := &HealthController{router, r}\n\tctrl.Register()\n\treturn ctrl\n}", "func NewController(w http.ResponseWriter, r *http.Request, name string) *Controller {\n\treturn &Controller{w, r, name, make(map[string]interface{})}\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Health runs the health action.
func (c *HealthController) Health(ctx *app.HealthHealthContext) error { // HealthController_Health: start_implement ver := "unknown" semVer, err := semver.Make(MajorMinorPatch + "-" + ReleaseType + "+git.sha." + GitCommit) if err == nil { ver = semVer.String() } return ctx.OK([]byte("Health OK: " + time.Now().String() + ", semVer: " + ver + "\n")) // HealthController_Health: end_implement }
[ "func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/api/_ah/health\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGenerator starts foreground goroutine which generates sequence of unsigned ints and puts them in input channel, also it returnes stop channel which need to be triggered when generator need to be stopped
func NewGenerator(input chan<- uint) chan<- bool { stop := make(chan bool) go func() { var current uint = 1 for { select { case input <- current: current++ case <-stop: close(input) return } } }() return stop }
[ "func generator(ctx context.Context) <-chan int {\n\tch := make(chan int)\n\tn := 0\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tfmt.Println(\"Got cancel signal.\")\n\t\t\t\treturn\n\t\t\tcase ch <- n:\n\t\t\t\tfmt.Println(\"Pushing number to channel\")\n\t\t\t\tn++\n\t\t\t}\n\t\t}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
genFields generates fields config for given AST
func genFields(fs []*ast.FieldDefinition) *jen.Statement { // // Generate config for fields // // == Example input SDL // // type Dog { // name(style: NameComponentsStyle = SHORT): String! // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // graphql.Fields{ // "name": graphql.Field{ ... }, // "givenName": graphql.Field{ ... }, // } // return jen.Qual(defsPkg, "Fields").Values(jen.DictFunc(func(d jen.Dict) { for _, f := range fs { d[jen.Lit(f.Name.Value)] = genField(f) } })) }
[ "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
genField generates field config for given AST
func genField(field *ast.FieldDefinition) *jen.Statement { // // Generate config for field // // == Example input SDL // // interface Pet { // "name of the pet" // name(style: NameComponentsStyle = SHORT): String! // """ // givenName of the pet ★ // """ // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // &graphql.Field{ // Name: "name", // Type: graphql.NonNull(graphql.String), // Description: "name of the pet", // DeprecationReason: "", // Args: FieldConfigArgument{ ... }, // } // // &graphql.Field{ // Name: "givenName", // Type: graphql.String, // Description: "givenName of the pet", // DeprecationReason: "No longer supported; please use name field.", // Args: FieldConfigArgument{ ... }, // } // return jen.Op("&").Qual(defsPkg, "Field").Values(jen.Dict{ jen.Id("Args"): genArguments(field.Arguments), jen.Id("DeprecationReason"): genDeprecationReason(field.Directives), jen.Id("Description"): genDescription(field), jen.Id("Name"): jen.Lit(field.Name.Value), jen.Id("Type"): genOutputTypeReference(field.Type), }) }
[ "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
genArguments generates argument field config for given AST
func genArguments(args []*ast.InputValueDefinition) *jen.Statement { // // Generate config for arguments // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // FieldConfigArgument{ // "style": &ArgumentConfig{ ... } // }, // return jen.Qual(defsPkg, "FieldConfigArgument").Values( jen.DictFunc(func(d jen.Dict) { for _, arg := range args { d[jen.Lit(arg.Name.Value)] = genArgument(arg) } }), ) }
[ "func genArgument(arg *ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for argument\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Ex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
genArgument generates argument config for given AST
func genArgument(arg *ast.InputValueDefinition) *jen.Statement { // // Generate config for argument // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // &ArgumentConfig{ // Type: graphql.NonNull(graphql.String), // DefaultValue: "SHORT", // TODO: ??? // Description: "style is stylish", // } // return jen.Op("&").Qual(defsPkg, "ArgumentConfig").Values(jen.Dict{ jen.Id("DefaultValue"): genValue(arg.DefaultValue), jen.Id("Description"): genDescription(arg), jen.Id("Type"): genInputTypeReference(arg.Type), }) }
[ "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddReceipt adds receipt for user.
func (client Client) AddReceipt(userId string, text string) error { addReceiptUrl := client.backendUrl + "/internal/receipt" request := addReceiptRequest{ReceiptString: text, UserId: userId} reader, err := getReader(request) if err != nil { return err } response, err := http.Post(addReceiptUrl, "text/javascript", reader) if err != nil { return err } switch response.StatusCode { case http.StatusOK: return nil default: return errors.New(response.Status) } return nil }
[ "func (ptu *PaymentTypeUpdate) AddReceipt(r ...*Receipt) *PaymentTypeUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn ptu.AddReceiptIDs(ids...)\n}", "func (cc *CustomerCreate) AddReceipt(r ...*Receipt) *CustomerCreate {\n\tids := make([]int, len(r))\n\tfor i := ran...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MergeUnique Merges `source` string slice into `dest` and returns result. Inserts from `source` only when `dest` does not `Contain` given string.
func MergeUnique(dest, source []string) []string { for _, str := range source { if !Contain(dest, str) { dest = append(dest, str) } } return dest }
[ "func MergeAndDeduplicateSlice(src []string, target []string) []string {\n\tm := make(map[string]bool)\n\tfor i := range src {\n\t\tm[src[i]] = true\n\t}\n\n\tfor i := range target {\n\t\tif _, ok := m[target[i]]; !ok {\n\t\t\tsrc = append(src, target[i])\n\t\t}\n\t}\n\n\treturn src\n}", "func mergeStrings(tgt *[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Empty indicates if kv is not set
func (kv BatchKeyRotateKV) Empty() bool { return kv.Key == "" && kv.Value == "" }
[ "func hasNonEmptyKV(kvMap map[string]string) bool {\n\tfor k, v := range kvMap {\n\t\tif strings.TrimSpace(k) == \"\" && strings.TrimSpace(v) == \"\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (kv BatchJobReplicateKV) Empty() bool {\n\treturn kv.Key == \"\" && kv.Value == \"\"\n}", "func (k Kin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Match matches input kv with kv, value will be wildcard matched depending on the user input
func (kv BatchKeyRotateKV) Match(ikv BatchKeyRotateKV) bool { if kv.Empty() { return true } if strings.EqualFold(kv.Key, ikv.Key) { return wildcard.Match(kv.Value, ikv.Value) } return false }
[ "func Match(goos, kv, key string) (value string, ok bool) {\n\tif len(kv) <= len(key) || kv[len(key)] != '=' {\n\t\treturn \"\", false\n\t}\n\n\tif goos == \"windows\" {\n\t\t// Case insensitive.\n\t\tif !strings.EqualFold(kv[:len(key)], key) {\n\t\t\treturn \"\", false\n\t\t}\n\t} else {\n\t\t// Case sensitive.\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate validates input replicate retries.
func (r BatchKeyRotateRetry) Validate() error { if r.Attempts < 0 { return errInvalidArgument } if r.Delay < 0 { return errInvalidArgument } return nil }
[ "func (r BatchReplicateRetry) Validate() error {\n\tif r.Attempts < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Delay < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\treturn nil\n}", "func (o *RetryOptions) Validate() {\n\tif o.MaxAttempts <= 0 {\n\t\to.MaxAttempts = 1\n\t}\n\n\tconst floor = 100 * time.Milli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate validates input key rotation encryption options.
func (e BatchJobKeyRotateEncryption) Validate() error { if e.Type != sses3 && e.Type != ssekms { return errInvalidArgument } spaces := strings.HasPrefix(e.Key, " ") || strings.HasSuffix(e.Key, " ") if e.Type == ssekms && spaces { return crypto.ErrInvalidEncryptionKeyID } if e.Type == ssekms && GlobalKMS != nil { ctx := kms.Context{} if e.Context != "" { b, err := base64.StdEncoding.DecodeString(e.Context) if err != nil { return err } json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(b, &ctx); err != nil { return err } } e.kmsContext = kms.Context{} for k, v := range ctx { e.kmsContext[k] = v } ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil { return err } } return nil }
[ "func (k EncryptionKeyDerived) Validate() error {\n\tif k.Length == 0 {\n\t\treturn ErrMissingField(\"length\")\n\t}\n\tif k.Length <= 0 {\n\t\treturn ErrInvalidKeyLength\n\t}\n\tif k.Algorithm == \"\" {\n\t\treturn ErrMissingField(\"algorithm\")\n\t}\n\tif k.Algorithm != KeyDerivationAlgorithmScrypt {\n\t\treturn ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
KeyRotate rotates encryption key of an object
func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, objInfo ObjectInfo) error { srcBucket := r.Bucket srcObject := objInfo.Name if objInfo.DeleteMarker || !objInfo.VersionPurgeStatus.Empty() { return nil } sseKMS := crypto.S3KMS.IsEncrypted(objInfo.UserDefined) sseS3 := crypto.S3.IsEncrypted(objInfo.UserDefined) if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed return errInvalidEncryptionParameters } if sseKMS && r.Encryption.Type == sses3 { // previously encrypted with sse-kms, now sse-s3 disallowed return errInvalidEncryptionParameters } versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject) lock := api.NewNSLock(r.Bucket, objInfo.Name) lkctx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { return err } ctx = lkctx.Context() defer lock.Unlock(lkctx) opts := ObjectOptions{ VersionID: objInfo.VersionID, Versioned: versioned, VersionSuspended: versionSuspended, NoLock: true, } obj, err := api.GetObjectInfo(ctx, r.Bucket, objInfo.Name, opts) if err != nil { return err } oi := obj.Clone() var ( newKeyID string newKeyContext kms.Context ) encMetadata := make(map[string]string) for k, v := range oi.UserDefined { if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { encMetadata[k] = v } } if (sseKMS || sseS3) && r.Encryption.Type == ssekms { if err = r.Encryption.Validate(); err != nil { return err } newKeyID = strings.TrimPrefix(r.Encryption.Key, crypto.ARNPrefix) newKeyContext = r.Encryption.kmsContext } if err = rotateKey(ctx, []byte{}, newKeyID, []byte{}, r.Bucket, oi.Name, encMetadata, newKeyContext); err != nil { return err } // Since we are rotating the keys, make sure to update the metadata. oi.metadataOnly = true oi.keyRotation = true for k, v := range encMetadata { oi.UserDefined[k] = v } if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{ VersionID: oi.VersionID, }, ObjectOptions{ VersionID: oi.VersionID, NoLock: true, }); err != nil { return err } return nil }
[ "func rotateKey(oldKey []byte, newKey []byte, metadata map[string]string) error {\n\tdelete(metadata, SSECustomerKey) // make sure we do not save the key by accident\n\n\tif metadata[ServerSideEncryptionSealAlgorithm] != SSESealAlgorithmDareSha256 { // currently DARE-SHA256 is the only option\n\t\treturn errObjectT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the batch key rottion job, resumes if there was a pending job via "job.ID"
func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error { ri := &batchJobInfo{ JobID: job.ID, JobType: string(job.Type()), StartTime: job.Started, } if err := ri.load(ctx, api, job); err != nil { return err } globalBatchJobsMetrics.save(job.ID, ri) lastObject := ri.Object delay := job.KeyRotate.Flags.Retry.Delay if delay == 0 { delay = batchKeyRotateJobDefaultRetryDelay } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) skip := func(info FileInfo) (ok bool) { if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan { // skip all objects that are newer than specified older duration return false } if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan { // skip all objects that are older than specified newer duration return false } if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) { // skip all objects that are created before the specified time. return false } if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) { // skip all objects that are created after the specified time. return false } if len(r.Flags.Filter.Tags) > 0 { // Only parse object tags if tags filter is specified. tagMap := map[string]string{} tagStr := info.Metadata[xhttp.AmzObjectTagging] if len(tagStr) != 0 { t, err := tags.ParseObjectTags(tagStr) if err != nil { return false } tagMap = t.ToMap() } for _, kv := range r.Flags.Filter.Tags { for t, v := range tagMap { if kv.Match(BatchKeyRotateKV{Key: t, Value: v}) { return true } } } // None of the provided tags filter match skip the object return false } if len(r.Flags.Filter.Metadata) > 0 { for _, kv := range r.Flags.Filter.Metadata { for k, v := range info.Metadata { if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { continue } // We only need to match x-amz-meta or standardHeaders if kv.Match(BatchKeyRotateKV{Key: k, Value: v}) { return true } } } // None of the provided metadata filters match skip the object. return false } if r.Flags.Filter.KMSKeyID != "" { if v, ok := info.Metadata[xhttp.AmzServerSideEncryptionKmsID]; ok && strings.TrimPrefix(v, crypto.ARNPrefix) != r.Flags.Filter.KMSKeyID { return false } } return true } workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) if err != nil { return err } wk, err := workers.New(workerSize) if err != nil { // invalid worker size. return err } retryAttempts := ri.RetryAttempts ctx, cancel := context.WithCancel(ctx) results := make(chan ObjectInfo, 100) if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{ WalkMarker: lastObject, WalkFilter: skip, }); err != nil { cancel() // Do not need to retry if we can't list objects on source. return err } for result := range results { result := result sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined) sseS3 := crypto.S3.IsEncrypted(result.UserDefined) if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed continue } wk.Take() go func() { defer wk.Give() for attempts := 1; attempts <= retryAttempts; attempts++ { attempts := attempts stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result) success := true if err := r.KeyRotate(ctx, api, result); err != nil { stopFn(err) logger.LogIf(ctx, err) success = false } else { stopFn(nil) } ri.trackCurrentBucketObject(r.Bucket, result, success) ri.RetryAttempts = attempts globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if success { break } } }() } wk.Wait() ri.Complete = ri.ObjectsFailed == 0 ri.Failed = ri.ObjectsFailed > 0 globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) buf, _ := json.Marshal(ri) if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil { logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() if ri.Failed { ri.ObjectsFailed = 0 ri.Bucket = "" ri.Object = "" ri.Objects = 0 time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay))) } return nil }
[ "func (r *JobRunner) startJob(ctx context.Context, startedAt time.Time) error {\n\tr.conf.Job.StartedAt = startedAt.UTC().Format(time.RFC3339Nano)\n\n\treturn roko.NewRetrier(\n\t\troko.WithMaxAttempts(7),\n\t\troko.WithStrategy(roko.Exponential(2*time.Second, 0)),\n\t).DoWithContext(ctx, func(rtr *roko.Retrier) er...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
toGA is an utility method to return the baseInstance data as a GA Instance object
func (bi *baseInstance) toGA() *ga.Instance { inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}} if bi.aliasRange != "" { inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{ {IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName}, } } return inst }
[ "func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
toGA is an utility method to return the baseInstance data as a beta Instance object
func (bi *baseInstance) toBeta() *beta.Instance { inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}} if bi.aliasRange != "" { inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{ {IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName}, } } return inst }
[ "func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newBaseInstanceList is the baseInstanceList constructor
func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList { cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize) return &baseInstanceList{ allocateCIDR: allocateCIDR, clusterCIDR: clusterCIDR, subnetMaskSize: subnetMaskSize, cidrSet: cidrSet, instances: make(map[meta.Key]*baseInstance), } }
[ "func newList(e exec.Executor) *chunk.List {\n\tbase := e.Base()\n\treturn chunk.NewList(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize())\n}", "func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseIns...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true
func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance { bil.lock.Lock() defer bil.lock.Unlock() inst, found := bil.instances[*key] if !found { inst = &baseInstance{name: key.Name, zone: key.Zone} if bil.allocateCIDR { nextRange, _ := bil.cidrSet.AllocateNext() inst.aliasRange = nextRange.String() } bil.instances[*key] = inst } return inst }
[ "func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tci...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook
func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) { return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) { m.Lock.Lock() defer m.Lock.Unlock() if _, found := m.Objects[*key]; !found { m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()} } return false, nil, nil } }
[ "func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook
func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) { return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) { m.Lock.Lock() defer m.Lock.Unlock() if _, found := m.Objects[*key]; !found { m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()} } return false, nil, nil } }
[ "func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.O...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newMockCloud returns a mock GCE instance with the appropriate handlers hooks
func (bil *baseInstanceList) newMockCloud() cloud.Cloud { c := cloud.NewMockGCE(nil) // insert hooks to lazy create a instance when needed c.MockInstances.GetHook = bil.newGAGetHook() c.MockBetaInstances.GetHook = bil.newBetaGetHook() return c }
[ "func NewCloudMock() *CloudMock {\n\taddress, grpcServer, mockTrace := startMockServer()\n\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t}\n\n\ttraceClient := cloudtrace.NewTraceServiceClient(conn)\n\tmetricClient := monitoring.NewMetricS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetTask returns a new task for the action
func GetTask(name, action string, conf *config.MountConfig) (iface.Task, error) { switch action { case "", "create": return NewCreateTask(name, conf), nil case "remove", "rm": return NewRemoveTask(name, conf), nil default: return nil, fmt.Errorf("Invalid mount action %q for task %q", action, name) } }
[ "func GetTask(name, action string, conf *config.ComposeConfig) (iface.Task, error) {\n\tcomposeAction, err := getAction(action, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewTask(name, conf, composeAction), nil\n}", "func GetTask(r *http.Request) *task.Task {\n\tif rv := r.Context().Value(model....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewCommonTime returns a simple 4/4 meter at the specified tempo
func NewCommonTime(bpm float64) *Meter { return &Meter{ BeatsPerMinute: bpm, BeatsPerBar: 4, BeatValue: notes.Quarter, } }
[ "func monotime() int64", "func (t Time) Nanosecond() int {}", "func newFakeTime() {\n\tfakeCurrentTime = fakeTime().Add(time.Hour * 24 * 2)\n}", "func NanoTime() int64", "func (t Time) Clock() (hour, min, sec int) {}", "func New(h, m int) Time {\n\treturn minToTime(h * 60 + m)\n}", "func newTime(year in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New returns a new meter with the specified parameters
func New(bpm, beatsPerBar float64, beatValue notes.Duration) *Meter { return &Meter{ BeatsPerMinute: bpm, BeatsPerBar: beatsPerBar, BeatValue: beatValue, } }
[ "func NewMeter(t time.Time, interval int, staleThreshold int) Meter {\n\tm := &StandardMeter{\n\t\t0,\n\t\tNewEWMA1(t),\n\t\tNewEWMA5(t),\n\t\tNewEWMA15(t),\n\t\tt,\n\t\tt,\n\t\tinterval,\n\t\tstaleThreshold,\n\t}\n\n\treturn m\n}", "func NewMeter(name string, options ...Option) Meter {\n\treturn newMeter(name, o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NoteToTime converts a notes.Duration to a time.Duration based on the meter
func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration { return time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute)) }
[ "func ToDuration(tm uint64, hz uint32) time.Duration {\n\treturn time.Duration(tm * uint64(time.Second) / uint64(hz))\n}", "func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}", "func (e *Exact) convertToDuration() time.Duration {\n\tif isValidUnitOfTime(e.Unit) {\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NoteToFreq converts a notes.Duration into a frequency with period equal to that note length
func (m Meter) NoteToFreq(noteVal notes.Duration) float64 { duration := m.NoteToTime(noteVal) return 1 / float64(duration.Seconds()) }
[ "func midi_to_frequency(note uint) float32 {\n\treturn float32(A5_FREQUENCY * math.Pow(2.0, float64(int(note)-MIDI_NOTE_A5)/NOTES_IN_OCTAVE))\n}", "func getFreq(beatLen int) float64 {\n\treturn float64(fss) / float64(beatLen)\n}", "func toFreq(s semi, tonic freq) freq {\n\treturn tonic * freq(math.Pow(root12, f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GenerateJWTToken generates a JWT token with the username and singed by the given secret key
func GenerateJWTToken(userName, jwtAccSecretKey string) (string, error) { claims := jwt.MapClaims{ "username": userName, "ExpiresAt": jwt.TimeFunc().Add(1 * time.Minute).Unix(), "IssuedAt": jwt.TimeFunc().Unix(), } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) return token.SignedString([]byte(jwtAccSecretKey)) }
[ "func GenerateJWTToken(parent context.Context, secretKey string, userId int64) (string, error) {\n\t_, cancel := context.WithTimeout(parent, time.Duration(1)*time.Second)\n\tdefer cancel()\n\n\t// Timestamp the beginning.\n\tnow := time.Now()\n\n\tuserIdStr := fmt.Sprintf(\"%d\", userId)\n\n\t// Define a signer.\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WriteCloserWithContext converts ContextCloser to io.Closer, whenever new Close method will be called, the ctx will be passed to it
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser { return &closerWithContext{ WriteContextCloser: closer, ctx: ctx, } }
[ "func (c *Closer) Ctx() context.Context { return (*closerCtx)(c) }", "func (fw *FileWriter) CloseWithContext(ctx context.Context, opts ...FlushRowGroupOption) error {\n\tif fw.schemaWriter.rowGroupNumRecords() > 0 {\n\t\tif err := fw.FlushRowGroup(opts...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkv := ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NilCloser returns closer if it's not nil otherwise returns a nop closer
func NilCloser(r io.Closer) io.Closer { if r == nil { return &nilCloser{} } return r }
[ "func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}", "func noopCloser(r io.Reader, err error) io.ReadCloser {\n\treturn &nopCloser{\n\t\tReader: r,\n\t\terr: err,\n\t}\n}", "func NopCloser() error { return nil }", "func (noopCloser) Close() error {\n\treturn nil\n}", "func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NopWriteCloser returns a WriteCloser with a noop Close method wrapping the provided Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser { return nopWriteCloser{r} }
[ "func NopCloser(r io.Writer) io.WriteCloser { return nopCloser{r} }", "func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}", "func NopCloser() error { return nil }", "func noopCloser(r io.Reader, err error) io.ReadCloser {\n\treturn &nopCloser{\n\t\tReader: r,\n\t\terr: err,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewTracer returns a new tracer
func NewTracer(description string) *Tracer { return &Tracer{Started: time.Now().UTC(), Description: description} }
[ "func NewTracer(name string) *Tracer {\n\tname = fmt.Sprintf(namePattern, name)\n\treturn &Tracer{\n\t\tname: name,\n\t}\n}", "func NewTracer(region string) Shooter {\n\treturn &Tracer{\n\t\tName: fmt.Sprintf(\"Request from %s\", region),\n\t\tAttacker: &http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ThisFunction returns calling function name
func ThisFunction() string { var pc [32]uintptr runtime.Callers(2, pc[:]) return runtime.FuncForPC(pc[0]).Name() }
[ "func CurrentFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\treturn funcname\n}", "func ThisFunc() *runtime.Func {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc)\n}", "func myCaller() string {\n\t// Skip GetCallerFunction...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Value returns value of the string
func (s *SyncString) Value() string { s.Lock() defer s.Unlock() return s.string }
[ "func (s *String) GetValue() string {\n\treturn s.value\n}", "func (sval *ScalarValue) Value() string {\n\tswitch {\n\tcase strings.HasPrefix(sval.Raw, `\"\"\"`):\n\t\treturn parseBlockString(sval.Raw)\n\tcase strings.HasPrefix(sval.Raw, `\"`):\n\t\treturn parseString(sval.Raw)\n\tdefault:\n\t\treturn sval.Raw\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ClickableURL fixes address in url to make sure it's clickable, e.g. it replaces "undefined" address like 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string { out, err := url.Parse(in) if err != nil { return in } host, port, err := net.SplitHostPort(out.Host) if err != nil { return in } ip := net.ParseIP(host) // if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast, // replace with localhost that is clickable if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() { out.Host = fmt.Sprintf("127.0.0.1:%v", port) return out.String() } return out.String() }
[ "func makeHTMLhref(url string) string {\n\treturn \"<a target=_blank href=\" + url + \">URL</a>\"\n}", "func URLButton(text, url string) Button {\n\treturn button{\n\t\tType: \"web_url\",\n\t\tTitle: text,\n\t\tURL: url,\n\t\tShareButton: \"hide\",\n\t\tExtensions: true,\n\t}\n}", "func Sa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AsBool converts string to bool, in case of the value is empty or unknown, defaults to false
func AsBool(v string) bool { if v == "" { return false } out, _ := apiutils.ParseBool(v) return out }
[ "func (s *Value) asBool() (bool, error) {\n\t// A missing value is considered false\n\tif s == nil {\n\t\treturn false, nil\n\t}\n\tswitch s.Name {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"invalid boolean: %s\", s.Name)\n\t}\n}", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ParseAdvertiseAddr validates advertise address, makes sure it's not an unreachable or multicast address returns address split into host and port, port could be empty if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) { advertiseIP = strings.TrimSpace(advertiseIP) host := advertiseIP port := "" if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") { var err error host, port, err = net.SplitHostPort(advertiseIP) if err != nil { return "", "", trace.BadParameter("failed to parse address %q", advertiseIP) } if _, err := strconv.Atoi(port); err != nil { return "", "", trace.BadParameter("bad port %q, expected integer", port) } if host == "" { return "", "", trace.BadParameter("missing host parameter") } } ip := net.ParseIP(host) if len(ip) != 0 { if ip.IsUnspecified() || ip.IsMulticast() { return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP) } } return host, port, nil }
[ "func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string { if in == nil { return nil } out := make([]string, 0, len(in)) for key := range in { out = append(out, key) } sort.Strings(out) return out }
[ "func (set StringSet) ToSlice() []string {\n\tkeys := make([]string, 0, len(set))\n\tfor k := range set {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func stringSliceFromGolangSet(sset map[string]struct{}) []string {\n\treturn tpgresource.StringSliceFromGolangSet(sset)\n}", "func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ParseOnOff parses whether value is "on" or "off", parameterName is passed for error reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) { switch val { case teleport.On: return true, nil case teleport.Off: return false, nil case "": return defaultValue, nil default: return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val) } }
[ "func parseBool(v interface{}, def bool) bool {\n\tswitch b := v.(type) {\n\tcase string:\n\t\tswitch strings.ToLower(b) {\n\t\tcase \"t\", \"y\", \"true\", \"yes\":\n\t\t\treturn true\n\t\tcase \"f\", \"n\", \"false\", \"no\":\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn def\n\t\t}\n\tcase bool:\n\t\treturn b\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) { groups, err := os.Getgroups() if err != nil { return false, trace.ConvertSystemError(err) } for _, group := range groups { if group == gid { return true, nil } } return false, nil }
[ "func (userser *UserService) IsGroupMember(userid , groupid string ) bool {\n\ter := userser.UserRepo.IsGroupMember(userid , groupid )\n\tif er != nil {\n\t\treturn false \n\t}\n\treturn true \n}", "func IsMemberOfGroup(group, userName string) (bool, error) {\n\treturn isMemberOfGroup(group, userName)\n}", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) { host, err := Host(hostport) if err != nil { return "", trace.Wrap(err) } if ip := net.ParseIP(host); len(ip) != 0 { return "", trace.BadParameter("%v is an IP address", host) } return host, nil }
[ "func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}", "func GetHostname(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}", "func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer { return &multiCloser{ closers: closers, } }
[ "func (mc *MultiCloser) Close() error {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\tresult := &multierror.Error{ErrorFormat: utils.SingleLineErrorFormatter}\n\n\tfor _, closer := range mc.closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tresult = multierror.Append(result, err)\n\t\t}\n\t}\n\n\tmc.closers ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OpaqueAccessDenied returns a generic NotFound instead of AccessDenied so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error { if trace.IsAccessDenied(err) { return trace.NotFound("not found") } return trace.Wrap(err) }
[ "func (aee *ActiveEndpointsError) Forbidden() {}", "func AccessDeny(msg string) Access {\n\treturn Access{Allow: false, Message: msg, StatusCode: http.StatusBadRequest}\n}", "func Forbidden(err error) Response {\n\tmessage := \"not authorized\"\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\treturn &error...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PopInt returns a value from the list, it panics if not enough values were allocated
func (p *PortList) PopInt() int { i, err := strconv.Atoi(p.Pop()) if err != nil { panic(err) } return i }
[ "func (s *SliceOfInt) Pop() int {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *SliceOfInt32) Pop() int32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *SliceOfInt64) Pop() ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) { list := make([]string, 0, n) start := PortStartingNumber if len(offset) != 0 { start = offset[0] } for i := start; i < start+n; i++ { list = append(list, strconv.Itoa(i)) } return PortList{ports: list}, nil }
[ "func getFreePorts(t *testing.T, n int) ports {\n\tports := make(ports, n)\n\tfor i := 0; i < n; i++ {\n\t\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := lis.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }