_id
stringlengths 86
170
| text
stringlengths 54
39.3k
|
|---|---|
https://github.com/kubicorn/kubicorn/blob/c4a4b80994b4333709c0f8164faabd801866b986/cloud/openstack/openstackSdk/sdk.go#L44-L83
|
func NewSdk(region string) (*Sdk, error) {
sdk := &Sdk{}
authOpts, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
// By default, enable re-authenticating when the token expires. This may be
// useful when the operator policy enforces a short token validity period
// and you run into a long deployment.
authOpts.AllowReauth = true
client, err := openstack.AuthenticatedClient(authOpts)
if err != nil {
return nil, err
}
//----------------------------
//
// Openstack Client Resources
//
//----------------------------
endpointOpts := gophercloud.EndpointOpts{
Region: region,
}
// Compute [Nova]
if sdk.Compute, err = openstack.NewComputeV2(client, endpointOpts); err != nil {
return nil, err
}
// Network [Neutron]
if sdk.Network, err = openstack.NewNetworkV2(client, endpointOpts); err != nil {
return nil, err
}
// Object Storage [Swift]
if sdk.ObjectStorage, err = openstack.NewObjectStorageV1(client, endpointOpts); err != nil {
return nil, err
}
return sdk, nil
}
|
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_mappings.go#L24-L26
|
func (a *App) GET(p string, h Handler) *RouteInfo {
return a.addRoute("GET", p, h)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/runtime.go#L779-L788
|
func (p *RunScriptParams) Do(ctx context.Context) (result *RemoteObject, exceptionDetails *ExceptionDetails, err error) {
// execute
var res RunScriptReturns
err = cdp.Execute(ctx, CommandRunScript, p, &res)
if err != nil {
return nil, nil, err
}
return res.Result, res.ExceptionDetails, nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/network/easyjson.go#L3843-L3847
|
func (v *Request) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoNetwork25(&r, v)
return r.Error()
}
|
https://github.com/mgutz/str/blob/968bf66e3da857419e4f6e71b2d5c9ae95682dc4/funcsPZ.go#L175-L186
|
func SliceIndexOf(slice []string, val string) int {
if slice == nil {
return -1
}
for i, it := range slice {
if it == val {
return i
}
}
return -1
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/types.go#L469-L511
|
func (t *APIType) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch APIType(in.String()) {
case APITypeLog:
*t = APITypeLog
case APITypeDebug:
*t = APITypeDebug
case APITypeInfo:
*t = APITypeInfo
case APITypeError:
*t = APITypeError
case APITypeWarning:
*t = APITypeWarning
case APITypeDir:
*t = APITypeDir
case APITypeDirxml:
*t = APITypeDirxml
case APITypeTable:
*t = APITypeTable
case APITypeTrace:
*t = APITypeTrace
case APITypeClear:
*t = APITypeClear
case APITypeStartGroup:
*t = APITypeStartGroup
case APITypeStartGroupCollapsed:
*t = APITypeStartGroupCollapsed
case APITypeEndGroup:
*t = APITypeEndGroup
case APITypeAssert:
*t = APITypeAssert
case APITypeProfile:
*t = APITypeProfile
case APITypeProfileEnd:
*t = APITypeProfileEnd
case APITypeCount:
*t = APITypeCount
case APITypeTimeEnd:
*t = APITypeTimeEnd
default:
in.AddError(errors.New("unknown APIType value"))
}
}
|
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/router.go#L310-L312
|
func (d *topologyGossipData) Encode() [][]byte {
return [][]byte{d.peers.encodePeers(d.update)}
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/docker/docker_image_dest.go#L274-L343
|
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
// First, check whether the blob happens to already exist at the destination.
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
return false, types.BlobInfo{}, err
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// Then try reusing blobs from other locations.
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
}
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
continue
}
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
// Checking candidateRepo, and mounting from it, requires an
// expanded token scope.
extraScope := &authScope{
remoteName: reference.Path(candidateRepo),
actions: "pull",
}
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
// so, be a nice client and don't create unnecesary upload sessions on the server.
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
}
if !exists {
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
continue // logrus.Debug() already happened in blobExists
}
if candidateRepo.Name() != d.ref.ref.Name() {
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
logrus.Debugf("... Mount failed: %v", err)
continue
}
}
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
}
return false, types.BlobInfo{}, nil
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pfs/server/driver.go#L3064-L3102
|
func (d *driver) upsertPutFileRecords(pachClient *client.APIClient, file *pfs.File, newRecords *pfs.PutFileRecords) error {
prefix, err := d.scratchFilePrefix(file)
if err != nil {
return err
}
ctx := pachClient.Ctx()
_, err = col.NewSTM(ctx, d.etcdClient, func(stm col.STM) error {
commitsCol := d.openCommits.ReadOnly(ctx)
var commit pfs.Commit
err := commitsCol.Get(file.Commit.ID, &commit)
if err != nil {
return err
}
// Dumb check to make sure the unmarshalled value exists (and matches the current ID)
// to denote that the current commit is indeed open
if commit.ID != file.Commit.ID {
return fmt.Errorf("commit %v is not open", file.Commit.ID)
}
recordsCol := d.putFileRecords.ReadWrite(stm)
var existingRecords pfs.PutFileRecords
return recordsCol.Upsert(prefix, &existingRecords, func() error {
if newRecords.Tombstone {
existingRecords.Tombstone = true
existingRecords.Records = nil
}
existingRecords.Split = newRecords.Split
existingRecords.Records = append(existingRecords.Records, newRecords.Records...)
existingRecords.Header = newRecords.Header
existingRecords.Footer = newRecords.Footer
return nil
})
})
if err != nil {
return err
}
return err
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/containers.go#L795-L811
|
func (c *Cluster) ContainerSetState(id int, state string) error {
err := c.Transaction(func(tx *ClusterTx) error {
// Set the new value
str := fmt.Sprintf("INSERT OR REPLACE INTO containers_config (container_id, key, value) VALUES (?, 'volatile.last_state.power', ?)")
stmt, err := tx.tx.Prepare(str)
if err != nil {
return err
}
defer stmt.Close()
if _, err = stmt.Exec(id, state); err != nil {
return err
}
return nil
})
return err
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/client.go#L583-L593
|
func (c *Client) CreateConfigMap(content ConfigMap) (ConfigMap, error) {
c.log("CreateConfigMap")
var retConfigMap ConfigMap
err := c.request(&request{
method: http.MethodPost,
path: fmt.Sprintf("/api/v1/namespaces/%s/configmaps", c.namespace),
requestBody: &content,
}, &retConfigMap)
return retConfigMap, err
}
|
https://github.com/kubicorn/kubicorn/blob/c4a4b80994b4333709c0f8164faabd801866b986/cmd/explain.go#L39-L73
|
func ExplainCmd() *cobra.Command {
var exo = &cli.ExplainOptions{}
var cmd = &cobra.Command{
Use: "explain",
Short: "Explain cluster",
Long: `Output expected and actual state of the given cluster`,
Run: func(cmd *cobra.Command, args []string) {
switch len(args) {
case 0:
exo.Name = viper.GetString(keyKubicornName)
case 1:
exo.Name = args[0]
default:
logger.Critical("Too many arguments.")
os.Exit(1)
}
if err := runExplain(exo); err != nil {
logger.Critical(err.Error())
os.Exit(1)
}
},
}
fs := cmd.Flags()
bindCommonStateStoreFlags(&exo.StateStoreOptions, fs)
bindCommonAwsFlags(&exo.AwsOptions, fs)
fs.StringVarP(&exo.Output, keyOutput, "o", viper.GetString(keyOutput), descOutput)
fs.StringVar(&exo.GitRemote, keyGitConfig, viper.GetString(keyGitConfig), descGitConfig)
return cmd
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/snapshot_merge.go#L31-L62
|
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
// get a snapshot of v2 store as []byte
clone := s.v2store.Clone()
d, err := clone.SaveNoCopy()
if err != nil {
if lg := s.getLogger(); lg != nil {
lg.Panic("failed to save v2 store data", zap.Error(err))
} else {
plog.Panicf("store save should never fail: %v", err)
}
}
// commit kv to write metadata(for example: consistent index).
s.KV().Commit()
dbsnap := s.be.Snapshot()
// get a snapshot of v3 KV as readCloser
rc := newSnapshotReaderCloser(s.getLogger(), dbsnap)
// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
// KV readCloser snapshot.
snapshot := raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
Index: snapi,
Term: snapt,
ConfState: confState,
},
Data: d,
}
m.Snapshot = snapshot
return *snap.NewMessage(m, rc, dbsnap.Size())
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/api/app.go#L653-L685
|
func setUnitStatus(w http.ResponseWriter, r *http.Request, t auth.Token) error {
unitName := r.URL.Query().Get(":unit")
if unitName == "" {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: "missing unit",
}
}
postStatus := InputValue(r, "status")
status, err := provision.ParseStatus(postStatus)
if err != nil {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: err.Error(),
}
}
appName := r.URL.Query().Get(":app")
a, err := app.GetByName(appName)
if err != nil {
return &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
allowed := permission.Check(t, permission.PermAppUpdateUnitStatus,
contextsForApp(a)...,
)
if !allowed {
return permission.ErrUnauthorized
}
err = a.SetUnitStatus(unitName, status)
if _, ok := err.(*provision.UnitNotFoundError); ok {
return &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
return err
}
|
https://github.com/glycerine/rbuf/blob/75b78581bebe959bc9a3df4c5f64e82c187d7531/atomic_rbuf.go#L132-L136
|
func (b *AtomicFixedSizeRingBuf) BytesTwo() TwoBuffers {
b.tex.Lock()
defer b.tex.Unlock()
return b.unatomic_BytesTwo()
}
|
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/compiler/compiler.go#L24-L40
|
func (c *BasicCompiler) Compile(ast *parser.AST) (*vm.ByteCode, error) {
ctx := &context{
ByteCode: vm.NewByteCode(),
}
for _, n := range ast.Root.Nodes {
compile(ctx, n)
}
// When we're done compiling, always append an END op
ctx.ByteCode.AppendOp(vm.TXOPEnd)
opt := &NaiveOptimizer{}
opt.Optimize(ctx.ByteCode)
ctx.ByteCode.Name = ast.Name
return ctx.ByteCode, nil
}
|
https://github.com/google/acme/blob/7c6dfc908d68ed254a16c126f6770f4d9d9352da/config.go#L168-L183
|
func printAccount(w io.Writer, a *acme.Account, kp string) {
tw := tabwriter.NewWriter(w, 0, 8, 0, '\t', 0)
fmt.Fprintln(tw, "URI:\t", a.URI)
fmt.Fprintln(tw, "Key:\t", kp)
fmt.Fprintln(tw, "Contact:\t", strings.Join(a.Contact, ", "))
fmt.Fprintln(tw, "Terms:\t", a.CurrentTerms)
agreed := a.AgreedTerms
if a.AgreedTerms == "" {
agreed = "no"
} else if a.AgreedTerms == a.CurrentTerms {
agreed = "yes"
}
fmt.Fprintln(tw, "Accepted:\t", agreed)
// TODO: print authorization and certificates
tw.Flush()
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/response.go#L419-L424
|
func ForwardedOperationResponse(project string, op *api.Operation) Response {
return &forwardedOperationResponse{
op: op,
project: project,
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/dom.go#L753-L758
|
func MoveTo(nodeID cdp.NodeID, targetNodeID cdp.NodeID) *MoveToParams {
return &MoveToParams{
NodeID: nodeID,
TargetNodeID: targetNodeID,
}
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/deck/job_history.go#L207-L240
|
func (bucket gcsBucket) listBuildIDs(root string) ([]int64, error) {
ids := []int64{}
if strings.HasPrefix(root, logsPrefix) {
dirs, err := bucket.listSubDirs(root)
if err != nil {
return ids, fmt.Errorf("failed to list GCS directories: %v", err)
}
for _, dir := range dirs {
i, err := strconv.ParseInt(path.Base(dir), 10, 64)
if err == nil {
ids = append(ids, i)
} else {
logrus.Warningf("unrecognized directory name (expected int64): %s", dir)
}
}
} else {
keys, err := bucket.listAll(root)
if err != nil {
return ids, fmt.Errorf("failed to list GCS keys: %v", err)
}
for _, key := range keys {
matches := linkRe.FindStringSubmatch(key)
if len(matches) == 2 {
i, err := strconv.ParseInt(matches[1], 10, 64)
if err == nil {
ids = append(ids, i)
} else {
logrus.Warningf("unrecognized file name (expected <int64>.txt): %s", key)
}
}
}
}
return ids, nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/watch.go#L483-L647
|
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
// substreams marked to close but goroutine still running; needed for
// avoiding double-closing recvc on grpc stream teardown
closing := make(map[*watcherStream]struct{})
defer func() {
w.closeErr = closeErr
// shutdown substreams and resuming substreams
for _, ws := range w.substreams {
if _, ok := closing[ws]; !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
for _, ws := range w.resuming {
if _, ok := closing[ws]; ws != nil && !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
w.joinSubstreams()
for range closing {
w.closeSubstream(<-w.closingc)
}
w.wg.Wait()
w.owner.closeStream(w)
}()
// start a stream with the etcd grpc server
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
for {
select {
// Watch() requested
case req := <-w.reqc:
switch wreq := req.(type) {
case *watchRequest:
outc := make(chan WatchResponse, 1)
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
id: -1,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
ws.donec = make(chan struct{})
w.wg.Add(1)
go w.serveSubstream(ws, w.resumec)
// queue up for watcher creation/resume
w.resuming = append(w.resuming, ws)
if len(w.resuming) == 1 {
// head of resume queue, can register a new watcher
wc.Send(ws.initReq.toPB())
}
case *progressRequest:
wc.Send(wreq.toPB())
}
// new events from the watch client
case pbresp := <-w.respc:
if cur == nil || pbresp.Created || pbresp.Canceled {
cur = pbresp
} else if cur != nil && cur.WatchId == pbresp.WatchId {
// merge new events
cur.Events = append(cur.Events, pbresp.Events...)
// update "Fragment" field; last response with "Fragment" == false
cur.Fragment = pbresp.Fragment
}
switch {
case pbresp.Created:
// response to head of queue creation
if ws := w.resuming[0]; ws != nil {
w.addSubstream(pbresp, ws)
w.dispatchEvent(pbresp)
w.resuming[0] = nil
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
// reset for next iteration
cur = nil
case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc
close(ws.recvc)
closing[ws] = struct{}{}
}
// reset for next iteration
cur = nil
case cur.Fragment:
// watch response events are still fragmented
// continue to fetch next fragmented event arrival
continue
default:
// dispatch to appropriate watch stream
ok := w.dispatchEvent(cur)
// reset for next iteration
cur = nil
if ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
cancelSet = make(map[int64]struct{})
case <-w.ctx.Done():
return
case ws := <-w.closingc:
w.closeSubstream(ws)
delete(closing, ws)
// no more watchers on this stream, shutdown
if len(w.substreams)+len(w.resuming) == 0 {
return
}
}
}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3rpc/header.go#L39-L49
|
func (h *header) fill(rh *pb.ResponseHeader) {
if rh == nil {
plog.Panic("unexpected nil resp.Header")
}
rh.ClusterId = uint64(h.clusterID)
rh.MemberId = uint64(h.memberID)
rh.RaftTerm = h.sg.Term()
if rh.Revision == 0 {
rh.Revision = h.rev()
}
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/gopherage/pkg/cov/filter.go#L29-L46
|
func FilterProfilePaths(profile []*cover.Profile, paths []string, include bool) ([]*cover.Profile, error) {
parenPaths := make([]string, len(paths))
for i, path := range paths {
parenPaths[i] = "(" + path + ")"
}
joined := strings.Join(parenPaths, "|")
re, err := regexp.Compile(joined)
if err != nil {
return nil, err
}
result := make([]*cover.Profile, 0, len(profile))
for _, p := range profile {
if re.MatchString(p.FileName) == include {
result = append(result, p)
}
}
return result, nil
}
|
https://github.com/tsuru/tsuru/blob/2f7fd515c5dc25a58aec80f0e497c49e49581b3e/git/git.go#L54-L64
|
func OpenRepository(p string) (*Repository, error) {
if !strings.HasSuffix(p, ".git") && !strings.HasSuffix(p, ".git/") {
p = filepath.Join(p, ".git")
}
p = strings.TrimRight(p, "/")
fi, err := os.Stat(filepath.Join(p, "config"))
if err == nil && !fi.IsDir() {
return &Repository{path: p}, nil
}
return nil, ErrRepositoryNotFound
}
|
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/json-decode.go#L358-L412
|
func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
if !rv.CanAddr() {
panic("rv not addressable")
}
if printLog {
fmt.Println("(d) decodeReflectJSONStruct")
defer func() {
fmt.Printf("(d) -> err: %v\n", err)
}()
}
// Map all the fields(keys) to their blobs/bytes.
// NOTE: In decodeReflectBinaryStruct, we don't need to do this,
// since fields are encoded in order.
var rawMap = make(map[string]json.RawMessage)
err = json.Unmarshal(bz, &rawMap)
if err != nil {
return
}
for _, field := range info.Fields {
// Get field rv and info.
var frv = rv.Field(field.Index)
var finfo *TypeInfo
finfo, err = cdc.getTypeInfo_wlock(field.Type)
if err != nil {
return
}
// Get value from rawMap.
var valueBytes = rawMap[field.JSONName]
if len(valueBytes) == 0 {
// TODO: Since the Go stdlib's JSON codec allows case-insensitive
// keys perhaps we need to also do case-insensitive lookups here.
// So "Vanilla" and "vanilla" would both match to the same field.
// It is actually a security flaw with encoding/json library
// - See https://github.com/golang/go/issues/14750
// but perhaps we are aiming for as much compatibility here.
// JAE: I vote we depart from encoding/json, than carry a vuln.
// Set nil/zero on frv.
frv.Set(reflect.Zero(frv.Type()))
continue
}
// Decode into field rv.
err = cdc.decodeReflectJSON(valueBytes, finfo, frv, fopts)
if err != nil {
return
}
}
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L2521-L2525
|
func (v NavigateReturns) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage26(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/goreleaser/archive/blob/9c6b0c177751034bab579499b81c69993ddfe563/archive.go#L22-L27
|
func New(file *os.File) Archive {
if filepath.Ext(file.Name()) == ".zip" {
return zip.New(file)
}
return tar.New(file)
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L4338-L4347
|
func (u OperationResult) GetTr() (result OperationResultTr, ok bool) {
armName, _ := u.ArmForSwitch(int32(u.Code))
if armName == "Tr" {
result = *u.Tr
ok = true
}
return
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/meta/bundle.go#L109-L137
|
func (b *Bundle) changes(target xdr.LedgerKey, maxOp int) (ret []xdr.LedgerEntryChange) {
for _, change := range b.FeeMeta {
key := change.LedgerKey()
if !key.Equals(target) {
continue
}
ret = append(ret, change)
}
for i, op := range b.TransactionMeta.MustOperations() {
if i > maxOp {
break
}
for _, change := range op.Changes {
key := change.LedgerKey()
if !key.Equals(target) {
continue
}
ret = append(ret, change)
}
}
return
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/codegen_client.go#L8004-L8006
|
func (api *API) PublicationLineageLocator(href string) *PublicationLineageLocator {
return &PublicationLineageLocator{Href(href), api}
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/cm15/examples/auditail/main.go#L72-L85
|
func fetchAuditEntries(client *cm15.API, filterEmail string) ([]*cm15.AuditEntry, error) {
auditLocator := client.AuditEntryLocator("/api/audit_entries")
var apiParams = rsapi.APIParams{"filter": []string{"user_email==" + filterEmail}}
auditEntries, err := auditLocator.Index(
tomorrow(), // End date
"100", // Limit
yesterday(), // Start date
apiParams,
)
if err != nil {
return auditEntries, err
}
return auditEntries, nil
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/cert/logging_conn.go#L94-L96
|
func (l *loggingConn) Write(b []byte) (n int, err error) {
return l.w.Write(b)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/dom.go#L944-L953
|
func (p *QuerySelectorParams) Do(ctx context.Context) (nodeID cdp.NodeID, err error) {
// execute
var res QuerySelectorReturns
err = cdp.Execute(ctx, CommandQuerySelector, p, &res)
if err != nil {
return 0, err
}
return res.NodeID, nil
}
|
https://github.com/rightscale/rsc/blob/96079a1ee7238dae9cbb7efa77dd94a479d217bd/ss/ssm/codegen_client.go#L1142-L1144
|
func (r *ScheduledAction) Locator(api *API) *ScheduledActionLocator {
return api.ScheduledActionLocator(r.Href)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/storage/easyjson.go#L164-L168
|
func (v *UntrackIndexedDBForOriginParams) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoStorage1(&r, v)
return r.Error()
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/enterprise/server/api_server.go#L255-L281
|
func (a *apiServer) GetState(ctx context.Context, req *ec.GetStateRequest) (resp *ec.GetStateResponse, retErr error) {
a.LogReq(req)
defer func(start time.Time) { a.pachLogger.Log(req, resp, retErr, time.Since(start)) }(time.Now())
expiration, ok := a.enterpriseExpiration.Load().(time.Time)
if !ok {
return nil, fmt.Errorf("could not retrieve enterprise expiration time")
}
if expiration.IsZero() {
return &ec.GetStateResponse{State: ec.State_NONE}, nil
}
expirationProto, err := types.TimestampProto(expiration)
if err != nil {
return nil, fmt.Errorf("could not convert expiration time \"%s\" to response proto: %s", expiration.String(), err.Error())
}
resp = &ec.GetStateResponse{
Info: &ec.TokenInfo{
Expires: expirationProto,
},
}
if time.Now().After(expiration) {
resp.State = ec.State_EXPIRED
} else {
resp.State = ec.State_ACTIVE
}
return resp, nil
}
|
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/errors.go#L70-L97
|
func (a *App) PanicHandler(next Handler) Handler {
return func(c Context) error {
defer func() { //catch or finally
r := recover()
var err error
if r != nil { //catch
switch t := r.(type) {
case error:
err = t
case string:
err = errors.New(t)
default:
err = errors.New(fmt.Sprint(t))
}
err = err
events.EmitError(events.ErrPanic, err,
map[string]interface{}{
"context": c,
"app": a,
},
)
eh := a.ErrorHandlers.Get(500)
eh(500, err, c)
}
}()
return next(c)
}
}
|
https://github.com/brankas/sentinel/blob/0ff081867c31a45cb71f5976ea6144fd06a557b5/opts.go#L106-L111
|
func Errorf(f func(string, ...interface{})) Option {
return func(s *Sentinel) error {
s.errf = f
return nil
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L4888-L4892
|
func (v EventShadowRootPushed) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoDom55(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/read_only.go#L52-L59
|
func (ro *readOnly) addRequest(index uint64, m pb.Message) {
ctx := string(m.Entries[0].Data)
if _, ok := ro.pendingReadIndex[ctx]; ok {
return
}
ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
ro.readIndexQueue = append(ro.readIndexQueue, ctx)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/page/easyjson.go#L3425-L3429
|
func (v GetLayoutMetricsReturns) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoPage36(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/lessor.go#L613-L631
|
func (le *lessor) checkpointScheduledLeases() {
var cps []*pb.LeaseCheckpoint
// rate limit
for i := 0; i < leaseCheckpointRate/2; i++ {
le.mu.Lock()
if le.isPrimary() {
cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize)
}
le.mu.Unlock()
if len(cps) != 0 {
le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps})
}
if len(cps) < maxLeaseCheckpointBatchSize {
return
}
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/css.go#L531-L537
|
func SetKeyframeKey(styleSheetID StyleSheetID, rangeVal *SourceRange, keyText string) *SetKeyframeKeyParams {
return &SetKeyframeKeyParams{
StyleSheetID: styleSheetID,
Range: rangeVal,
KeyText: keyText,
}
}
|
https://github.com/uber/tchannel-go/blob/3c9ced6d946fe2fec6c915703a533e966c09e07a/thrift/headers.go#L32-L65
|
func WriteHeaders(w io.Writer, headers map[string]string) error {
// TODO(prashant): Since we are not writing length-prefixed data here,
// we can write out to the buffer, and if it fills up, flush it.
// Right now, we calculate the size of the required buffer and write it out.
// Calculate the size of the buffer that we need.
size := 2
for k, v := range headers {
size += 4 /* size of key/value lengths */
size += len(k) + len(v)
}
buf := make([]byte, size)
writeBuffer := typed.NewWriteBuffer(buf)
writeBuffer.WriteUint16(uint16(len(headers)))
for k, v := range headers {
writeBuffer.WriteLen16String(k)
writeBuffer.WriteLen16String(v)
}
if err := writeBuffer.Err(); err != nil {
return err
}
// Safety check to ensure the bytes written calculation is correct.
if writeBuffer.BytesWritten() != size {
return fmt.Errorf(
"writeHeaders size calculation wrong, expected to write %v bytes, only wrote %v bytes",
size, writeBuffer.BytesWritten())
}
_, err := writeBuffer.FlushTo(w)
return err
}
|
https://github.com/256dpi/fire/blob/fa66e74352b30b9a4c730f7b8dc773302941b0fb/coal/stream.go#L52-L78
|
func OpenStream(store *Store, model Model, token []byte, receiver Receiver, opened func(), manager func(error) bool) *Stream {
// prepare resume token
var resumeToken *bson.Raw
// create resume token if available
if token != nil {
resumeToken = &bson.Raw{
Kind: bson.ElementDocument,
Data: token,
}
}
// create stream
s := &Stream{
store: store,
model: model,
token: resumeToken,
receiver: receiver,
opened: opened,
manager: manager,
}
// open stream
go s.open()
return s
}
|
https://github.com/jhillyerd/enmime/blob/874cc30e023f36bd1df525716196887b0f04851b/envelope.go#L350-L406
|
func ensureCommaDelimitedAddresses(s string) string {
// This normalizes the whitespace, but may interfere with CFWS (comments with folding whitespace)
// RFC-5322 3.4.0:
// because some legacy implementations interpret the comment,
// comments generally SHOULD NOT be used in address fields
// to avoid confusing such implementations.
s = strings.Join(strings.Fields(s), " ")
inQuotes := false
inDomain := false
escapeSequence := false
sb := strings.Builder{}
for _, r := range s {
if escapeSequence {
escapeSequence = false
sb.WriteRune(r)
continue
}
if r == '"' {
inQuotes = !inQuotes
sb.WriteRune(r)
continue
}
if inQuotes {
if r == '\\' {
escapeSequence = true
sb.WriteRune(r)
continue
}
} else {
if r == '@' {
inDomain = true
sb.WriteRune(r)
continue
}
if inDomain {
if r == ';' {
sb.WriteRune(r)
break
}
if r == ',' {
inDomain = false
sb.WriteRune(r)
continue
}
if r == ' ' {
inDomain = false
sb.WriteRune(',')
sb.WriteRune(r)
continue
}
}
}
sb.WriteRune(r)
}
return sb.String()
}
|
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/cmd/aefix/fix.go#L352-L355
|
func isCall(t ast.Expr, pkg, name string) bool {
call, ok := t.(*ast.CallExpr)
return ok && isPkgDot(call.Fun, pkg, name)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/animation/easyjson.go#L858-L862
|
func (v KeyframeStyle) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoAnimation8(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/llgcode/draw2d/blob/f52c8a71aff06ab8df41843d33ab167b36c971cd/draw2dbase/stack_gc.go#L54-L68
|
func NewStackGraphicContext() *StackGraphicContext {
gc := &StackGraphicContext{}
gc.Current = new(ContextStack)
gc.Current.Tr = draw2d.NewIdentityMatrix()
gc.Current.Path = new(draw2d.Path)
gc.Current.LineWidth = 1.0
gc.Current.StrokeColor = image.Black
gc.Current.FillColor = image.White
gc.Current.Cap = draw2d.RoundCap
gc.Current.FillRule = draw2d.FillRuleEvenOdd
gc.Current.Join = draw2d.RoundJoin
gc.Current.FontSize = 10
gc.Current.FontData = DefaultFontData
return gc
}
|
https://github.com/blacklabeldata/namedtuple/blob/c341f1db44f30b8164294aa8605ede42be604aba/schema/parser.go#L28-L79
|
func LoadDirectory(dir string, parser Parser) (err error) {
// Open dir for reading
d, err := os.Open(dir)
if err != nil {
return
}
// Iterate over all the files in the directory.
for {
// Only read 128 files at a time.
if fis, err := d.Readdir(128); err == nil {
// Read each entry
for _, fi := range fis {
// fmt.Println("%#v", fi)
// If the FileInfo is a directory, read the directory.
// Otherwise, read the file.
switch fi.IsDir() {
case true:
// return error if there is one
if err := LoadDirectory(fi.Name(), parser); err != nil {
return err
}
case false:
// All schema files should end with .nt
if !strings.HasSuffix(fi.Name(), ".ent") {
break
}
// Read the file
if _, err := LoadFile(filepath.Join(dir, fi.Name()), parser); err != nil {
return err
}
}
}
} else if err == io.EOF {
// If there are no more files in the directory, break.
break
} else {
// If there is any other error, return it.
return err
}
}
// If you have reached this far, you are done.
return nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/debugger.go#L473-L476
|
func (p SearchInContentParams) WithCaseSensitive(caseSensitive bool) *SearchInContentParams {
p.CaseSensitive = caseSensitive
return &p
}
|
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L434-L445
|
func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator {
iters := make([]*Iterator, len(tbls))
for i := 0; i < len(tbls); i++ {
iters[i] = tbls[i].NewIterator(reversed)
}
return &ConcatIterator{
reversed: reversed,
iters: iters,
tables: tbls,
idx: -1, // Not really necessary because s.it.Valid()=false, but good to have.
}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/util.go#L133-L142
|
func compact(c *v3.Client, rev int64) {
fmt.Printf("Compacting with revision %d\n", rev)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := c.Compact(ctx, rev, v3.WithCompactPhysical())
cancel()
if err != nil {
ExitWithError(ExitError, err)
}
fmt.Printf("Compacted with revision %d\n", rev)
}
|
https://github.com/tendermint/go-amino/blob/dc14acf9ef15f85828bfbc561ed9dd9d2a284885/reflect.go#L181-L192
|
func constructConcreteType(cinfo *TypeInfo) (crv, irvSet reflect.Value) {
// Construct new concrete type.
if cinfo.PointerPreferred {
cPtrRv := reflect.New(cinfo.Type)
crv = cPtrRv.Elem()
irvSet = cPtrRv
} else {
crv = reflect.New(cinfo.Type).Elem()
irvSet = crv
}
return
}
|
https://github.com/nwaples/rardecode/blob/197ef08ef68c4454ae5970a9c2692d6056ceb8d7/ppm_model.go#L262-L291
|
func (c *context) expandStates() []state {
states := c.states()
ns := len(states)
if ns == 1 {
s := states[0]
n := c.a.allocUnits(1)
if n == 0 {
return nil
}
c.setStatesIndex(n)
states = c.a.states[n:]
states[0] = s
} else if ns&0x1 == 0 {
u := ns >> 1
i1 := units2Index[u]
i2 := units2Index[u+1]
if i1 != i2 {
n := c.a.allocUnits(i2)
if n == 0 {
return nil
}
copy(c.a.states[n:], states)
c.a.addFreeBlock(c.statesIndex(), i1)
c.setStatesIndex(n)
states = c.a.states[n:]
}
}
c.setNumStates(ns + 1)
return states[:ns+1]
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/image/unparsed.go#L86-L95
|
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
i.cachedSignatures = sigs
}
return i.cachedSignatures, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/spyglass/lenses/buildlog/lens.go#L147-L170
|
func (lens Lens) Callback(artifacts []lenses.Artifact, resourceDir string, data string) string {
var request LineRequest
err := json.Unmarshal([]byte(data), &request)
if err != nil {
return "failed to unmarshal request"
}
artifact, ok := artifactByName(artifacts, request.Artifact)
if !ok {
return "no artifact named " + request.Artifact
}
var lines []string
if request.Offset == 0 && request.Length == -1 {
lines, err = logLinesAll(artifact)
} else {
lines, err = logLines(artifact, request.Offset, request.Length)
}
if err != nil {
return fmt.Sprintf("failed to retrieve log lines: %v", err)
}
logLines := highlightLines(lines, request.StartLine)
return executeTemplate(resourceDir, "line group", logLines)
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pkg/obj/obj.go#L183-L202
|
func NewGoogleClientFromSecret(bucket string) (Client, error) {
var err error
if bucket == "" {
bucket, err = readSecretFile("/google-bucket")
if err != nil {
return nil, fmt.Errorf("google-bucket not found")
}
}
cred, err := readSecretFile("/google-cred")
if err != nil {
return nil, fmt.Errorf("google-cred not found")
}
var opts []option.ClientOption
if cred != "" {
opts = append(opts, option.WithCredentialsFile(secretFile("/google-cred")))
} else {
opts = append(opts, option.WithTokenSource(google.ComputeTokenSource("")))
}
return NewGoogleClient(bucket, opts)
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/openshift/openshift-copies.go#L756-L781
|
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" {
return nil, errors.Errorf("host must be a URL or a host:port pair")
}
base := host
hostURL, err := url.Parse(base)
if err != nil {
return nil, err
}
if hostURL.Scheme == "" {
scheme := "http://"
if defaultTLS {
scheme = "https://"
}
hostURL, err = url.Parse(scheme + base)
if err != nil {
return nil, err
}
if hostURL.Path != "" && hostURL.Path != "/" {
return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
}
}
// REMOVED: versionedAPIPath computation.
return hostURL, nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/debugger/easyjson.go#L1152-L1156
|
func (v *SetBreakpointOnFunctionCallReturns) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoDebugger12(&r, v)
return r.Error()
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/shared/logger/log.go#L69-L73
|
func Infof(format string, args ...interface{}) {
if Log != nil {
Log.Info(fmt.Sprintf(format, args...))
}
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L681-L704
|
func (r *raft) tickHeartbeat() {
r.heartbeatElapsed++
r.electionElapsed++
if r.electionElapsed >= r.electionTimeout {
r.electionElapsed = 0
if r.checkQuorum {
r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
}
// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
if r.state == StateLeader && r.leadTransferee != None {
r.abortLeaderTransfer()
}
}
if r.state != StateLeader {
return
}
if r.heartbeatElapsed >= r.heartbeatTimeout {
r.heartbeatElapsed = 0
r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
}
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/enterprise/server/api_server.go#L72-L88
|
func NewEnterpriseServer(env *serviceenv.ServiceEnv, etcdPrefix string) (ec.APIServer, error) {
s := &apiServer{
pachLogger: log.NewLogger("enterprise.API"),
env: env,
enterpriseToken: col.NewCollection(
env.GetEtcdClient(),
etcdPrefix, // only one collection--no extra prefix needed
nil,
&ec.EnterpriseRecord{},
nil,
nil,
),
}
s.enterpriseExpiration.Store(time.Time{})
go s.watchEnterpriseToken(etcdPrefix)
return s, nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/comment_counter.go#L62-L75
|
func (c *CommentCounterPlugin) ReceiveComment(comment sql.Comment) []Point {
points := []Point{}
for _, matcher := range c.matcher {
if matcher.MatchString(comment.Body) {
points = append(points, Point{
Values: map[string]interface{}{
"comment": 1,
},
Date: comment.CommentCreatedAt,
})
}
}
return points
}
|
https://github.com/lestrrat-go/xslate/blob/6a6eb0fce8ab7407a3e0460af60758e5d6f2b9f8/vm/state.go#L82-L88
|
func (st *State) CurrentFrame() *frame.Frame {
x, err := st.frames.Top()
if err != nil {
return nil
}
return x.(*frame.Frame)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/dom/easyjson.go#L4639-L4643
|
func (v *GetAttributesReturns) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoDom52(&r, v)
return r.Error()
}
|
https://github.com/weaveworks/mesh/blob/512bdb7b3cb7b2c939fcd0ee434d48b6732ecc39/connection.go#L146-L156
|
func (conn *LocalConnection) shutdown(err error) {
// err should always be a real error, even if only io.EOF
if err == nil {
panic("nil error")
}
select {
case conn.errorChan <- err:
default:
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/easyjson.go#L4179-L4183
|
func (v GetStyleSheetTextParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoCss36(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/naoina/genmai/blob/78583835e1e41e3938e1ddfffd7101f8ad27fae0/genmai.go#L1174-L1196
|
func (c *Condition) OrderBy(table, col interface{}, order ...interface{}) *Condition {
order = append([]interface{}{table, col}, order...)
orderbys := make([]orderBy, 0, 1)
for len(order) > 0 {
o, rest := order[0], order[1:]
if _, ok := o.(string); ok {
if len(rest) < 1 {
panic(fmt.Errorf("OrderBy: few arguments"))
}
// OrderBy("column", genmai.DESC)
orderbys = append(orderbys, c.orderBy(nil, o, rest[0]))
order = rest[1:]
continue
}
if len(rest) < 2 {
panic(fmt.Errorf("OrderBy: few arguments"))
}
// OrderBy(tbl{}, "column", genmai.DESC)
orderbys = append(orderbys, c.orderBy(o, rest[0], rest[1]))
order = rest[2:]
}
return c.appendQuery(300, OrderBy, orderbys)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/types.go#L46-L48
|
func (t StyleSheetOrigin) MarshalEasyJSON(out *jwriter.Writer) {
out.String(string(t))
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/tide.go#L1090-L1133
|
func (c *changedFilesAgent) prChanges(pr *PullRequest) config.ChangedFilesProvider {
return func() ([]string, error) {
cacheKey := changeCacheKey{
org: string(pr.Repository.Owner.Login),
repo: string(pr.Repository.Name),
number: int(pr.Number),
sha: string(pr.HeadRefOID),
}
c.RLock()
changedFiles, ok := c.changeCache[cacheKey]
if ok {
c.RUnlock()
c.Lock()
c.nextChangeCache[cacheKey] = changedFiles
c.Unlock()
return changedFiles, nil
}
if changedFiles, ok = c.nextChangeCache[cacheKey]; ok {
c.RUnlock()
return changedFiles, nil
}
c.RUnlock()
// We need to query the changes from GitHub.
changes, err := c.ghc.GetPullRequestChanges(
string(pr.Repository.Owner.Login),
string(pr.Repository.Name),
int(pr.Number),
)
if err != nil {
return nil, fmt.Errorf("error getting PR changes for #%d: %v", int(pr.Number), err)
}
changedFiles = make([]string, 0, len(changes))
for _, change := range changes {
changedFiles = append(changedFiles, change.Filename)
}
c.Lock()
c.nextChangeCache[cacheKey] = changedFiles
c.Unlock()
return changedFiles, nil
}
}
|
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L337-L343
|
func decodeConfiguration(buf []byte) Configuration {
var configuration Configuration
if err := decodeMsgPack(buf, &configuration); err != nil {
panic(fmt.Errorf("failed to decode configuration: %v", err))
}
return configuration
}
|
https://github.com/opentracing/opentracing-go/blob/659c90643e714681897ec2521c60567dd21da733/log/field.go#L108-L114
|
func Float32(key string, val float32) Field {
return Field{
key: key,
fieldType: float32Type,
numericVal: int64(math.Float32bits(val)),
}
}
|
https://github.com/aphistic/gomol/blob/1546845ba714699f76f484ad3af64cf0503064d1/base.go#L405-L407
|
func (b *Base) Info(msg string) error {
return b.Log(LevelInfo, nil, msg)
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/overlay/easyjson.go#L1661-L1665
|
func (v GetHighlightObjectForTestParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoOverlay16(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/containers/image/blob/da9ab3561ad2031aeb5e036b7cf2755d4e246fec/manifest/docker_schema2.go#L225-L246
|
func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
}
s2 := &Schema2Image{}
if err := json.Unmarshal(config, s2); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
Tag: "",
Created: &s2.Created,
DockerVersion: s2.DockerVersion,
Architecture: s2.Architecture,
Os: s2.OS,
Layers: layerInfosToStrings(m.LayerInfos()),
}
if s2.Config != nil {
i.Labels = s2.Config.Labels
}
return i, nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/browser/browser.go#L382-L387
|
func SetWindowBounds(windowID WindowID, bounds *Bounds) *SetWindowBoundsParams {
return &SetWindowBoundsParams{
WindowID: windowID,
Bounds: bounds,
}
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/runtime/easyjson.go#L1970-L1974
|
func (v ObjectPreview) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoRuntime17(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L136-L140
|
func snapshotName(term, index uint64) string {
now := time.Now()
msec := now.UnixNano() / int64(time.Millisecond)
return fmt.Sprintf("%d-%d-%d", term, index, msec)
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/shared/generate/db/lex.go#L30-L32
|
func entityPost(entity string) string {
return fmt.Sprintf("%sPost", lex.Capital(lex.Plural(entity)))
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pfs.go#L1443-L1462
|
func (w *PutObjectWriteCloserAsync) Write(p []byte) (int, error) {
select {
case err := <-w.errChan:
if err != nil {
return 0, grpcutil.ScrubGRPC(err)
}
default:
for len(w.buf)+len(p) > cap(w.buf) {
// Write the bytes that fit into w.buf, then
// remove those bytes from p.
i := cap(w.buf) - len(w.buf)
w.buf = append(w.buf, p[:i]...)
p = p[i:]
w.writeChan <- w.buf
w.buf = grpcutil.GetBuffer()[:0]
}
w.buf = append(w.buf, p...)
}
return len(p), nil
}
|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/mutex.go#L42-L78
|
func (m *Mutex) Lock(ctx context.Context) error {
s := m.s
client := m.s.Client()
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if wait failed
if werr != nil {
m.Unlock(client.Ctx())
} else {
m.hdr = hdr
}
return werr
}
|
https://github.com/qor/render/blob/63566e46f01b134ae9882a59a06518e82a903231/template.go#L41-L44
|
func (tmpl *Template) Funcs(funcMap template.FuncMap) *Template {
tmpl.funcMap = funcMap
return tmpl
}
|
https://github.com/codemodus/kace/blob/e3ecf78ee2a5e58652cb2be34d3159ad9c89acf8/kace.go#L115-L117
|
func (k *Kace) KebabUpper(s string) string {
return delimitedCase(s, kebabDelim, true)
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/pkg/io/opener.go#L55-L69
|
func NewOpener(ctx context.Context, creds string) (Opener, error) {
var options []option.ClientOption
if creds != "" {
options = append(options, option.WithCredentialsFile(creds))
}
client, err := storage.NewClient(ctx, options...)
if err != nil {
if creds != "" {
return nil, err
}
logrus.WithError(err).Debug("Cannot load application default gcp credentials")
client = nil
}
return opener{gcs: client}, nil
}
|
https://github.com/aphistic/gomol/blob/1546845ba714699f76f484ad3af64cf0503064d1/base.go#L474-L476
|
func (b *Base) Errf(msg string, a ...interface{}) error {
return b.Errorf(msg, a...)
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/server/pfs/server/driver.go#L800-L973
|
func (d *driver) propagateCommit(stm col.STM, branch *pfs.Branch) error {
if branch == nil {
return fmt.Errorf("cannot propagate nil branch")
}
// 'subvBranchInfos' is the collection of downstream branches that may get a
// new commit. Populate subvBranchInfo
var subvBranchInfos []*pfs.BranchInfo
branchInfo := &pfs.BranchInfo{}
if err := d.branches(branch.Repo.Name).ReadWrite(stm).Get(branch.Name, branchInfo); err != nil {
return err
}
subvBranchInfos = append(subvBranchInfos, branchInfo) // add 'branch' itself
for _, subvBranch := range branchInfo.Subvenance {
subvBranchInfo := &pfs.BranchInfo{}
if err := d.branches(subvBranch.Repo.Name).ReadWrite(stm).Get(subvBranch.Name, subvBranchInfo); err != nil {
return err
}
subvBranchInfos = append(subvBranchInfos, subvBranchInfo)
}
var head *pfs.CommitInfo
if branchInfo.Head != nil {
head = &pfs.CommitInfo{}
if err := d.commits(branch.Repo.Name).ReadWrite(stm).Get(branchInfo.Head.ID, head); err != nil {
return err
}
}
// Sort subvBranchInfos so that upstream branches are processed before their
// descendants. This guarantees that if branch B is provenant on branch A, we
// create a new commit in A before creating a new commit in B provenant on the
// (new) HEAD of A.
sort.Slice(subvBranchInfos, func(i, j int) bool { return len(subvBranchInfos[i].Provenance) < len(subvBranchInfos[j].Provenance) })
// Iterate through downstream branches and determine which need a new commit.
nextSubvBranch:
for _, branchInfo := range subvBranchInfos {
branch := branchInfo.Branch
repo := branch.Repo
commits := d.commits(repo.Name).ReadWrite(stm)
branches := d.branches(repo.Name).ReadWrite(stm)
// Compute the full provenance of hypothetical new output commit to decide
// if we need it
key := path.Join
commitProvMap := make(map[string]*pfs.CommitProvenance)
for _, provBranch := range branchInfo.Provenance {
provBranchInfo := &pfs.BranchInfo{}
if err := d.branches(provBranch.Repo.Name).ReadWrite(stm).Get(provBranch.Name, provBranchInfo); err != nil && !col.IsErrNotFound(err) {
return fmt.Errorf("could not read branch %s/%s: %v", provBranch.Repo.Name, provBranch.Name, err)
}
if provBranchInfo.Head == nil {
continue
}
// We need to key on both the commit id and the branch name, so that branches with a shared commit are both represented in the provenance
commitProvMap[key(provBranchInfo.Head.ID, provBranch.Name)] = &pfs.CommitProvenance{
Commit: provBranchInfo.Head,
Branch: provBranch,
}
// Because provenace is stored as a transitive closure, we don't
// need to inspect provBranchInfo.HEAD's provenance. Every commit
// in there will be the HEAD of some other provBranchInfo.
}
if head != nil {
for _, commitProv := range head.Provenance {
commitProvMap[key(commitProv.Commit.ID, commitProv.Branch.Name)] = commitProv
}
}
if len(commitProvMap) == 0 {
// no input commits to process; don't create a new output commit
continue nextSubvBranch
}
// 'branch' may already have a HEAD commit, so compute whether the new
// output commit would have the same provenance as the existing HEAD
// commit. If so, a new output commit would be a duplicate, so don't create
// it.
if branchInfo.Head != nil {
branchHeadInfo := &pfs.CommitInfo{}
if err := commits.Get(branchInfo.Head.ID, branchHeadInfo); err != nil {
return pfsserver.ErrCommitNotFound{branchInfo.Head}
}
headIsSubset := false
for _, v := range commitProvMap {
matched := false
for _, c := range branchHeadInfo.Provenance {
if c.Commit.ID == v.Commit.ID {
matched = true
}
}
headIsSubset = matched
if !headIsSubset {
break
}
}
if len(branchHeadInfo.Provenance) >= len(commitProvMap) && headIsSubset {
// existing HEAD commit is the same new output commit would be; don't
// create new commit
continue nextSubvBranch
}
}
// If the only branches in the hypothetical output commit's provenance are
// in the 'spec' repo, creating it would mean creating a confusing
// "dummy" job with no non-spec input data. If this is the case, don't
// create a new output commit
allSpec := true
for _, p := range commitProvMap {
if p.Branch.Repo.Name != ppsconsts.SpecRepo {
allSpec = false
break
}
}
if allSpec {
// Only input data is PipelineInfo; don't create new output commit
continue nextSubvBranch
}
// *All checks passed* start a new output commit in 'subvBranch'
newCommit := &pfs.Commit{
Repo: branch.Repo,
ID: uuid.NewWithoutDashes(),
}
newCommitInfo := &pfs.CommitInfo{
Commit: newCommit,
Started: now(),
}
// Set 'newCommit's ParentCommit, 'branch.Head's ChildCommits and 'branch.Head'
newCommitInfo.ParentCommit = branchInfo.Head
if branchInfo.Head != nil {
parentCommitInfo := &pfs.CommitInfo{}
if err := commits.Update(newCommitInfo.ParentCommit.ID, parentCommitInfo, func() error {
parentCommitInfo.ChildCommits = append(parentCommitInfo.ChildCommits, newCommit)
return nil
}); err != nil {
return err
}
}
branchInfo.Head = newCommit
branchInfo.Name = branch.Name // set in case 'branch' is new
branchInfo.Branch = branch // set in case 'branch' is new
newCommitInfo.Branch = branch
if err := branches.Put(branch.Name, branchInfo); err != nil {
return err
}
// Set provenance and upstream subvenance (appendSubvenance needs
// newCommitInfo.ParentCommit to extend the correct subvenance range)
for _, prov := range commitProvMap {
// set provenance of 'newCommit'
newCommitInfo.Provenance = append(newCommitInfo.Provenance, prov)
// update subvenance of 'prov'
provCommitInfo := &pfs.CommitInfo{}
if err := d.commits(prov.Commit.Repo.Name).ReadWrite(stm).Update(prov.Commit.ID, provCommitInfo, func() error {
appendSubvenance(provCommitInfo, newCommitInfo)
return nil
}); err != nil {
return err
}
}
// finally create open 'commit'
if err := commits.Create(newCommit.ID, newCommitInfo); err != nil {
return err
}
if err := d.openCommits.ReadWrite(stm).Put(newCommit.ID, newCommit); err != nil {
return err
}
}
return nil
}
|
https://github.com/piotrkowalczuk/mnemosyne/blob/66d59c3c5b886e8e869915bb76257bcba4a47250/internal/service/logger/logger.go#L252-L258
|
func (rl reportLocation) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("filePath", rl.FilePath)
enc.AddInt("lineNumber", rl.LineNumber)
enc.AddString("functionName", rl.FunctionName)
return nil
}
|
https://github.com/stellar/go-stellar-base/blob/79c570612c0b461db178aa8949d9f13cafc2a7c9/xdr/xdr_generated.go#L2193-L2207
|
func (u Memo) ArmForSwitch(sw int32) (string, bool) {
switch MemoType(sw) {
case MemoTypeMemoNone:
return "", true
case MemoTypeMemoText:
return "Text", true
case MemoTypeMemoId:
return "Id", true
case MemoTypeMemoHash:
return "Hash", true
case MemoTypeMemoReturn:
return "RetHash", true
}
return "-", false
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/container_lxc.go#L7534-L7636
|
func (c *containerLXC) createNetworkDevice(name string, m types.Device) (string, error) {
var dev, n1 string
if shared.StringInSlice(m["nictype"], []string{"bridged", "p2p", "macvlan"}) {
// Host Virtual NIC name
if m["host_name"] != "" {
n1 = m["host_name"]
} else {
n1 = deviceNextVeth()
}
}
if m["nictype"] == "sriov" {
dev = m["host_name"]
}
// Handle bridged and p2p
if shared.StringInSlice(m["nictype"], []string{"bridged", "p2p"}) {
n2 := deviceNextVeth()
_, err := shared.RunCommand("ip", "link", "add", "dev", n1, "type", "veth", "peer", "name", n2)
if err != nil {
return "", fmt.Errorf("Failed to create the veth interface: %s", err)
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n1, "up")
if err != nil {
return "", fmt.Errorf("Failed to bring up the veth interface %s: %s", n1, err)
}
if m["nictype"] == "bridged" {
err = networkAttachInterface(m["parent"], n1)
if err != nil {
deviceRemoveInterface(n2)
return "", fmt.Errorf("Failed to add interface to bridge: %s", err)
}
// Attempt to disable router advertisement acceptance
networkSysctlSet(fmt.Sprintf("ipv6/conf/%s/accept_ra", n1), "0")
}
dev = n2
}
// Handle physical and macvlan
if shared.StringInSlice(m["nictype"], []string{"macvlan", "physical"}) {
// Deal with VLAN
device := m["parent"]
if m["vlan"] != "" {
device = networkGetHostDevice(m["parent"], m["vlan"])
if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", device)) {
_, err := shared.RunCommand("ip", "link", "add", "link", m["parent"], "name", device, "up", "type", "vlan", "id", m["vlan"])
if err != nil {
return "", err
}
// Attempt to disable IPv6 router advertisement acceptance
networkSysctlSet(fmt.Sprintf("ipv6/conf/%s/accept_ra", device), "0")
}
}
// Handle physical
if m["nictype"] == "physical" {
dev = device
}
// Handle macvlan
if m["nictype"] == "macvlan" {
_, err := shared.RunCommand("ip", "link", "add", "dev", n1, "link", device, "type", "macvlan", "mode", "bridge")
if err != nil {
return "", fmt.Errorf("Failed to create the new macvlan interface: %s", err)
}
dev = n1
}
}
// Set the MAC address
if m["hwaddr"] != "" {
_, err := shared.RunCommand("ip", "link", "set", "dev", dev, "address", m["hwaddr"])
if err != nil {
deviceRemoveInterface(dev)
return "", fmt.Errorf("Failed to set the MAC address: %s", err)
}
}
// Bring the interface up
_, err := shared.RunCommand("ip", "link", "set", "dev", dev, "up")
if err != nil {
deviceRemoveInterface(dev)
return "", fmt.Errorf("Failed to bring up the interface: %s", err)
}
// Set the filter
if m["nictype"] == "bridged" && shared.IsTrue(m["security.mac_filtering"]) {
err = c.createNetworkFilter(dev, m["parent"], m["hwaddr"])
if err != nil {
return "", err
}
}
return dev, nil
}
|
https://github.com/t3rm1n4l/nitro/blob/937fe99f63a01a8bea7661c49e2f3f8af6541d7c/skiplist/skiplist.go#L153-L155
|
func (s *Skiplist) Size(n *Node) int {
return s.ItemSize(n.Item()) + n.Size()
}
|
https://github.com/pachyderm/pachyderm/blob/94fb2d536cb6852a77a49e8f777dc9c1bde2c723/src/client/pfs.go#L744-L754
|
func (c APIClient) GetTagReader(tag string) (io.ReadCloser, error) {
ctx, cancel := context.WithCancel(c.Ctx())
getTagClient, err := c.ObjectAPIClient.GetTag(
ctx,
&pfs.Tag{Name: tag},
)
if err != nil {
return nil, grpcutil.ScrubGRPC(err)
}
return grpcutil.NewStreamingBytesReader(getTagClient, cancel), nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/backgroundservice/easyjson.go#L744-L748
|
func (v ClearEventsParams) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdprotoBackgroundservice7(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/label_sync/main.go#L251-L278
|
func (c Configuration) validate(orgs string) error {
// Check default labels
seen, err := validate(c.Default.Labels, "default", make(map[string]string))
if err != nil {
return fmt.Errorf("invalid config: %v", err)
}
// Generate list of orgs
sortedOrgs := strings.Split(orgs, ",")
sort.Strings(sortedOrgs)
// Check other repos labels
for repo, repoconfig := range c.Repos {
// Will complain if a label is both in default and repo
if _, err := validate(repoconfig.Labels, repo, seen); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
// If orgs have been specified, warn if repo isn't under orgs
if len(orgs) != 0 {
data := strings.Split(repo, "/")
if len(data) == 2 {
if !stringInSortedSlice(data[0], sortedOrgs) {
logrus.WithField("orgs", orgs).WithField("org", data[0]).WithField("repo", repo).Warn("Repo isn't inside orgs")
}
}
}
}
return nil
}
|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/mason/storage.go#L72-L87
|
func (s *Storage) GetConfigs() ([]common.ResourcesConfig, error) {
var configs []common.ResourcesConfig
items, err := s.configs.List()
if err != nil {
return configs, err
}
for _, i := range items {
var conf common.ResourcesConfig
conf, err = common.ItemToResourcesConfig(i)
if err != nil {
return nil, err
}
configs = append(configs, conf)
}
return configs, nil
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/domstorage/easyjson.go#L511-L515
|
func (v *GetDOMStorageItemsParams) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdprotoDomstorage4(&r, v)
return r.Error()
}
|
https://github.com/chromedp/cdproto/blob/d40c70bcdf242660a32f2eadf323662dd75378b5/css/css.go#L398-L407
|
func (p *GetMediaQueriesParams) Do(ctx context.Context) (medias []*Media, err error) {
// execute
var res GetMediaQueriesReturns
err = cdp.Execute(ctx, CommandGetMediaQueries, nil, &res)
if err != nil {
return nil, err
}
return res.Medias, nil
}
|
https://github.com/lxc/lxd/blob/7a41d14e4c1a6bc25918aca91004d594774dcdd3/lxd/db/raft.go#L46-L62
|
func (n *NodeTx) RaftNodeAddress(id int64) (string, error) {
stmt := "SELECT address FROM raft_nodes WHERE id=?"
addresses, err := query.SelectStrings(n.tx, stmt, id)
if err != nil {
return "", err
}
switch len(addresses) {
case 0:
return "", ErrNoSuchObject
case 1:
return addresses[0], nil
default:
// This should never happen since we have a UNIQUE constraint
// on the raft_nodes.id column.
return "", fmt.Errorf("more than one match found")
}
}
|
https://github.com/naoina/genmai/blob/78583835e1e41e3938e1ddfffd7101f8ad27fae0/log.go#L51-L76
|
func (l *templateLogger) Print(start time.Time, query string, args ...interface{}) error {
if len(args) > 0 {
values := make([]string, len(args))
for i, arg := range args {
values[i] = fmt.Sprintf("%#v", arg)
}
query = fmt.Sprintf("%v; [%v]", query, strings.Join(values, ", "))
} else {
query = fmt.Sprintf("%s;", query)
}
data := map[string]interface{}{
"time": start,
"duration": fmt.Sprintf("%.2fms", now().Sub(start).Seconds()*float64(time.Microsecond)),
"query": query,
}
var buf bytes.Buffer
if err := l.t.Execute(&buf, data); err != nil {
return err
}
l.m.Lock()
defer l.m.Unlock()
if _, err := fmt.Fprintln(l.w, strings.TrimSuffix(buf.String(), "\n")); err != nil {
return err
}
return nil
}
|
https://github.com/golang/appengine/blob/54a98f90d1c46b7731eb8fb305d2a321c30ef610/log/log.go#L193-L230
|
func protoToRecord(rl *pb.RequestLog) *Record {
offset, err := proto.Marshal(rl.Offset)
if err != nil {
offset = nil
}
return &Record{
AppID: *rl.AppId,
ModuleID: rl.GetModuleId(),
VersionID: *rl.VersionId,
RequestID: rl.RequestId,
Offset: offset,
IP: *rl.Ip,
Nickname: rl.GetNickname(),
AppEngineRelease: string(rl.GetAppEngineRelease()),
StartTime: time.Unix(0, *rl.StartTime*1e3),
EndTime: time.Unix(0, *rl.EndTime*1e3),
Latency: time.Duration(*rl.Latency) * time.Microsecond,
MCycles: *rl.Mcycles,
Method: *rl.Method,
Resource: *rl.Resource,
HTTPVersion: *rl.HttpVersion,
Status: *rl.Status,
ResponseSize: *rl.ResponseSize,
Referrer: rl.GetReferrer(),
UserAgent: rl.GetUserAgent(),
URLMapEntry: *rl.UrlMapEntry,
Combined: *rl.Combined,
Host: rl.GetHost(),
Cost: rl.GetCost(),
TaskQueueName: rl.GetTaskQueueName(),
TaskName: rl.GetTaskName(),
WasLoadingRequest: rl.GetWasLoadingRequest(),
PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
Finished: rl.GetFinished(),
AppLogs: protoToAppLogs(rl.Line),
InstanceID: string(rl.GetCloneKey()),
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.