_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q19200
|
SlabDict.pop
|
train
|
def pop(self, name, defval=None):
'''
Pop a name from the SlabDict.
Args:
name (str): The name to remove.
defval (obj): The default value to return if the name is not present.
Returns:
object: The object stored in the SlabDict, or defval if the object was not present.
'''
valu = self.info.pop(name, defval)
lkey = self.pref + name.encode('utf8')
self.slab.pop(lkey, db=self.db)
return valu
|
python
|
{
"resource": ""
}
|
q19201
|
Slab.copydb
|
train
|
def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
'''
Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab.
'''
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE):
ccount, acount = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb)
if ccount != len(chunk) or acount != len(chunk):
raise s_exc.BadCoreStore(mesg='Unexpected number of values written') # pragma: no cover
rowcount += len(chunk)
if progresscb is not None and 0 == (rowcount % PROGRESS_PERIOD):
progresscb(rowcount)
return rowcount
|
python
|
{
"resource": ""
}
|
q19202
|
Slab.replace
|
train
|
def replace(self, lkey, lval, db=None):
'''
Like put, but returns the previous value if existed
'''
return self._xact_action(self.replace, lmdb.Transaction.replace, lkey, lval, db=db)
|
python
|
{
"resource": ""
}
|
q19203
|
Trigger._match_idens
|
train
|
async def _match_idens(self, core, prefix):
'''
Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one.
'''
idens = [iden for iden, trig in await core.listTriggers()]
matches = [iden for iden in idens if iden.startswith(prefix)]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
self.printf('Error: provided iden does not match any valid authorized triggers')
else:
self.printf('Error: provided iden matches more than one trigger')
return None
|
python
|
{
"resource": ""
}
|
q19204
|
decode
|
train
|
def decode(name, byts, **opts):
'''
Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer.
'''
for name in name.split(','):
if name.startswith('+'):
byts = encode(name[1:], byts, **opts)
continue
func = decoders.get(name)
if func is None:
raise s_exc.NoSuchDecoder(name=name)
byts = func(byts, **opts)
return byts
|
python
|
{
"resource": ""
}
|
q19205
|
addFormat
|
train
|
def addFormat(name, fn, opts):
'''
Add an additional ingest file format
'''
fmtyielders[name] = fn
fmtopts[name] = opts
|
python
|
{
"resource": ""
}
|
q19206
|
iterdata
|
train
|
def iterdata(fd, close_fd=True, **opts):
'''
Iterate through the data provided by a file like object.
Optional parameters may be used to control how the data
is deserialized.
Examples:
The following example show use of the iterdata function.::
with open('foo.csv','rb') as fd:
for row in iterdata(fd, format='csv', encoding='utf8'):
dostuff(row)
Args:
fd (file) : File like object to iterate over.
close_fd (bool) : Default behavior is to close the fd object.
If this is not true, the fd will not be closed.
**opts (dict): Ingest open directive. Causes the data in the fd
to be parsed according to the 'format' key and any
additional arguments.
Yields:
An item to process. The type of the item is dependent on the format
parameters.
'''
fmt = opts.get('format', 'lines')
fopts = fmtopts.get(fmt, {})
# set default options for format
for opt, val in fopts.items():
opts.setdefault(opt, val)
ncod = opts.get('encoding')
if ncod is not None:
fd = codecs.getreader(ncod)(fd)
fmtr = fmtyielders.get(fmt)
if fmtr is None:
raise s_exc.NoSuchImpl(name=fmt, knowns=fmtyielders.keys())
for item in fmtr(fd, opts):
yield item
if close_fd:
fd.close()
|
python
|
{
"resource": ""
}
|
q19207
|
overlap
|
train
|
def overlap(ival0, ival1):
'''
Determine if two interval tuples have overlap.
Args:
iv0 ((int,int)): An interval tuple
iv1 ((int,int)); An interval tuple
Returns:
(bool): True if the intervals overlap, otherwise False
'''
min0, max0 = ival0
min1, max1 = ival1
return max(0, min(max0, max1) - max(min0, min1)) > 0
|
python
|
{
"resource": ""
}
|
q19208
|
getTempCortex
|
train
|
async def getTempCortex(mods=None):
'''
Get a proxy to a cortex backed by a temporary directory.
Args:
mods (list): A list of modules which are loaded into the cortex.
Notes:
The cortex and temporary directory are town down on exit.
This should only be called from synchronous code.
Returns:
Proxy to the cortex.
'''
with s_common.getTempDir() as dirn:
async with await Cortex.anit(dirn) as core:
if mods:
for mod in mods:
await core.loadCoreModule(mod)
async with core.getLocalProxy() as prox:
yield prox
|
python
|
{
"resource": ""
}
|
q19209
|
CoreApi.addTrigger
|
train
|
async def addTrigger(self, condition, query, info):
'''
Adds a trigger to the cortex
'''
iden = self.cell.triggers.add(self.user.iden, condition, query, info=info)
return iden
|
python
|
{
"resource": ""
}
|
q19210
|
CoreApi._trig_auth_check
|
train
|
def _trig_auth_check(self, useriden):
''' Check that, as a non-admin, may only manipulate resources created by you. '''
if not self.user.admin and useriden != self.user.iden:
raise s_exc.AuthDeny(user=self.user.name, mesg='As non-admin, may only manipulate triggers created by you')
|
python
|
{
"resource": ""
}
|
q19211
|
CoreApi.delTrigger
|
train
|
async def delTrigger(self, iden):
'''
Deletes a trigger from the cortex
'''
trig = self.cell.triggers.get(iden)
self._trig_auth_check(trig.get('useriden'))
self.cell.triggers.delete(iden)
|
python
|
{
"resource": ""
}
|
q19212
|
CoreApi.updateTrigger
|
train
|
async def updateTrigger(self, iden, query):
'''
Change an existing trigger's query
'''
trig = self.cell.triggers.get(iden)
self._trig_auth_check(trig.get('useriden'))
self.cell.triggers.mod(iden, query)
|
python
|
{
"resource": ""
}
|
q19213
|
CoreApi.listTriggers
|
train
|
async def listTriggers(self):
'''
Lists all the triggers that the current user is authorized to access
'''
trigs = []
for (iden, trig) in self.cell.triggers.list():
useriden = trig['useriden']
if not (self.user.admin or useriden == self.user.iden):
continue
user = self.cell.auth.user(useriden)
trig['username'] = '<unknown>' if user is None else user.name
trigs.append((iden, trig))
return trigs
|
python
|
{
"resource": ""
}
|
q19214
|
CoreApi.addCronJob
|
train
|
async def addCronJob(self, query, reqs, incunit=None, incval=1):
'''
Add a cron job to the cortex
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incval (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
An iden that can be used to later modify, query, and delete the job.
Notes:
reqs must have fields present or incunit must not be None (or both)
The incunit if not None it must be larger in unit size than all the keys in all reqs elements.
'''
def _convert_reqdict(reqdict):
return {s_agenda.TimeUnit.fromString(k): v for (k, v) in reqdict.items()}
try:
if incunit is not None:
if isinstance(incunit, (list, tuple)):
incunit = [s_agenda.TimeUnit.fromString(i) for i in incunit]
else:
incunit = s_agenda.TimeUnit.fromString(incunit)
if isinstance(reqs, Mapping):
newreqs = _convert_reqdict(reqs)
else:
newreqs = [_convert_reqdict(req) for req in reqs]
except KeyError:
raise s_exc.BadConfValu('Unrecognized time unit')
return await self.cell.agenda.add(self.user.iden, query, newreqs, incunit, incval)
|
python
|
{
"resource": ""
}
|
q19215
|
CoreApi.delCronJob
|
train
|
async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
cron = self.cell.agenda.appts.get(iden)
if cron is None:
raise s_exc.NoSuchIden()
self._trig_auth_check(cron.useriden)
await self.cell.agenda.delete(iden)
|
python
|
{
"resource": ""
}
|
q19216
|
CoreApi.updateCronJob
|
train
|
async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
cron = self.cell.agenda.appts.get(iden)
if cron is None:
raise s_exc.NoSuchIden()
self._trig_auth_check(cron.useriden)
await self.cell.agenda.mod(iden, query)
|
python
|
{
"resource": ""
}
|
q19217
|
CoreApi.listCronJobs
|
train
|
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
crons = []
for iden, cron in self.cell.agenda.list():
useriden = cron['useriden']
if not (self.user.admin or useriden == self.user.iden):
continue
user = self.cell.auth.user(useriden)
cron['username'] = '<unknown>' if user is None else user.name
crons.append((iden, cron))
return crons
|
python
|
{
"resource": ""
}
|
q19218
|
CoreApi.addNodeTag
|
train
|
async def addNodeTag(self, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
buid = s_common.uhex(iden)
parts = tag.split('.')
self._reqUserAllowed('tag:add', *parts)
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
await node.addTag(tag, valu=valu)
return node.pack()
|
python
|
{
"resource": ""
}
|
q19219
|
CoreApi.addNodes
|
train
|
async def addNodes(self, nodes):
'''
Add a list of packed nodes to the cortex.
Args:
nodes (list): [ ( (form, valu), {'props':{}, 'tags':{}}), ... ]
Yields:
(tuple): Packed node tuples ((form,valu), {'props': {}, 'tags':{}})
'''
# First check that that user may add each form
done = {}
for node in nodes:
formname = node[0][0]
if done.get(formname):
continue
self._reqUserAllowed('node:add', formname)
done[formname] = True
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden):
snap.strict = False
async for node in snap.addNodes(nodes):
if node is not None:
node = node.pack()
yield node
|
python
|
{
"resource": ""
}
|
q19220
|
CoreApi.count
|
train
|
async def count(self, text, opts=None):
'''
Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
(int): The number of nodes resulting from the query.
'''
i = 0
async for _ in self.cell.eval(text, opts=opts, user=self.user):
i += 1
return i
|
python
|
{
"resource": ""
}
|
q19221
|
CoreApi.eval
|
train
|
async def eval(self, text, opts=None):
'''
Evalute a storm query and yield packed nodes.
'''
async for pode in self.cell.iterStormPodes(text, opts=opts, user=self.user):
yield pode
|
python
|
{
"resource": ""
}
|
q19222
|
CoreApi.splices
|
train
|
async def splices(self, offs, size):
'''
Return the list of splices at the given offset.
'''
count = 0
async for mesg in self.cell.view.layers[0].splices(offs, size):
count += 1
if not count % 1000:
await asyncio.sleep(0)
yield mesg
|
python
|
{
"resource": ""
}
|
q19223
|
CoreApi.getProvStack
|
train
|
async def getProvStack(self, iden: str):
'''
Return the providence stack associated with the given iden.
Args:
iden (str): the iden from splice
Note: the iden appears on each splice entry as the 'prov' property
'''
return self.cell.provstor.getProvStack(s_common.uhex(iden))
|
python
|
{
"resource": ""
}
|
q19224
|
Cortex._initStormCmds
|
train
|
def _initStormCmds(self):
'''
Registration for built-in Storm commands.
'''
self.addStormCmd(s_storm.MaxCmd)
self.addStormCmd(s_storm.MinCmd)
self.addStormCmd(s_storm.HelpCmd)
self.addStormCmd(s_storm.IdenCmd)
self.addStormCmd(s_storm.SpinCmd)
self.addStormCmd(s_storm.SudoCmd)
self.addStormCmd(s_storm.UniqCmd)
self.addStormCmd(s_storm.CountCmd)
self.addStormCmd(s_storm.GraphCmd)
self.addStormCmd(s_storm.LimitCmd)
self.addStormCmd(s_storm.SleepCmd)
self.addStormCmd(s_storm.DelNodeCmd)
self.addStormCmd(s_storm.MoveTagCmd)
self.addStormCmd(s_storm.ReIndexCmd)
|
python
|
{
"resource": ""
}
|
q19225
|
Cortex._initStormLibs
|
train
|
def _initStormLibs(self):
'''
Registration for built-in Storm Libraries
'''
self.addStormLib(('str',), s_stormtypes.LibStr)
self.addStormLib(('time',), s_stormtypes.LibTime)
|
python
|
{
"resource": ""
}
|
q19226
|
Cortex._initSplicers
|
train
|
def _initSplicers(self):
'''
Registration for splice handlers.
'''
splicers = {
'tag:add': self._onFeedTagAdd,
'tag:del': self._onFeedTagDel,
'node:add': self._onFeedNodeAdd,
'node:del': self._onFeedNodeDel,
'prop:set': self._onFeedPropSet,
'prop:del': self._onFeedPropDel,
}
self.splicers.update(**splicers)
|
python
|
{
"resource": ""
}
|
q19227
|
Cortex._initLayerCtors
|
train
|
def _initLayerCtors(self):
'''
Registration for built-in Layer ctors
'''
ctors = {
'lmdb': s_lmdblayer.LmdbLayer,
'remote': s_remotelayer.RemoteLayer,
}
self.layrctors.update(**ctors)
|
python
|
{
"resource": ""
}
|
q19228
|
Cortex._initFeedFuncs
|
train
|
def _initFeedFuncs(self):
'''
Registration for built-in Cortex feed functions.
'''
self.setFeedFunc('syn.nodes', self._addSynNodes)
self.setFeedFunc('syn.splice', self._addSynSplice)
self.setFeedFunc('syn.ingest', self._addSynIngest)
|
python
|
{
"resource": ""
}
|
q19229
|
Cortex._initCortexHttpApi
|
train
|
def _initCortexHttpApi(self):
'''
Registration for built-in Cortex httpapi endpoints
'''
self.addHttpApi('/api/v1/storm', s_httpapi.StormV1, {'cell': self})
self.addHttpApi('/api/v1/storm/nodes', s_httpapi.StormNodesV1, {'cell': self})
self.addHttpApi('/api/v1/model/norm', s_httpapi.ModelNormV1, {'cell': self})
|
python
|
{
"resource": ""
}
|
q19230
|
Cortex._calcFormCounts
|
train
|
async def _calcFormCounts(self):
'''
Recalculate form counts from scratch.
'''
logger.info('Calculating form counts from scratch.')
self.counts.clear()
nameforms = list(self.model.forms.items())
fairiter = 5
tcount = 0
for i, (name, form) in enumerate(nameforms, 1):
logger.info('Calculating form counts for [%s] [%s/%s]',
name, i, len(nameforms))
count = 0
async for buid, valu in self.view.layers[0].iterFormRows(name):
count += 1
tcount += 1
if count % fairiter == 0:
await asyncio.sleep(0)
# identity check for small integer
if fairiter == 5 and tcount > 100000:
fairiter = 1000
self.counts[name] = count
for name, valu in self.counts.items():
byts = s_common.int64en(valu)
self.slab.put(name.encode('utf8'), byts, db=self.formcountdb)
logger.info('Done calculating form counts.')
|
python
|
{
"resource": ""
}
|
q19231
|
Cortex.onTagAdd
|
train
|
def onTagAdd(self, name, func):
'''
Register a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
# TODO allow name wild cards
if '*' in name:
self.ontagaddglobs.add(name, func)
else:
self.ontagadds[name].append(func)
|
python
|
{
"resource": ""
}
|
q19232
|
Cortex.offTagAdd
|
train
|
def offTagAdd(self, name, func):
'''
Unregister a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagaddglobs.rem(name, func)
return
cblist = self.ontagadds.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
|
python
|
{
"resource": ""
}
|
q19233
|
Cortex.onTagDel
|
train
|
def onTagDel(self, name, func):
'''
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.add(name, func)
else:
self.ontagdels[name].append(func)
|
python
|
{
"resource": ""
}
|
q19234
|
Cortex.offTagDel
|
train
|
def offTagDel(self, name, func):
'''
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.rem(name, func)
return
cblist = self.ontagdels.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
|
python
|
{
"resource": ""
}
|
q19235
|
Cortex.runRuntLift
|
train
|
async def runRuntLift(self, full, valu=None, cmpr=None):
'''
Execute a runt lift function.
Args:
full (str): Property to lift by.
valu:
cmpr:
Returns:
bytes, list: Yields bytes, list tuples where the list contains a series of
key/value pairs which are used to construct a Node object.
'''
func = self._runtLiftFuncs.get(full)
if func is None:
raise s_exc.NoSuchLift(mesg='No runt lift implemented for requested property.',
full=full, valu=valu, cmpr=cmpr)
async for buid, rows in func(full, valu, cmpr):
yield buid, rows
|
python
|
{
"resource": ""
}
|
q19236
|
Cortex.delView
|
train
|
async def delView(self, iden):
'''
Delete a cortex view by iden.
'''
if iden == self.iden:
raise s_exc.SynErr(mesg='cannot delete the main view')
view = self.views.pop(iden, None)
if view is None:
raise s_exc.NoSuchView(iden=iden)
await self.hive.pop(('cortex', 'views', iden))
await view.fini()
|
python
|
{
"resource": ""
}
|
q19237
|
Cortex.addLayer
|
train
|
async def addLayer(self, **info):
'''
Add a Layer to the cortex.
Notes:
The addLayer ``**info`` arg is expected to be shaped like the following::
info = {
'iden': <str>, ( optional iden. default guid() )
'type': <str>, ( optional type. default lmdb )
'owner': <str>, ( optional owner. default root )
'config': {}, # type specific config options.
}
'''
iden = info.pop('iden', None)
if iden is None:
iden = s_common.guid()
node = await self.hive.open(('cortex', 'layers', iden))
layrinfo = await node.dict()
layrconf = await (await node.open(('config',))).dict()
await layrinfo.set('type', info.get('type', 'lmdb'))
await layrinfo.set('owner', info.get('owner', 'root'))
await layrinfo.set('name', info.get('name', '??'))
for name, valu in info.get('config', {}).items():
await layrconf.set(name, valu)
return await self._layrFromNode(node)
|
python
|
{
"resource": ""
}
|
q19238
|
Cortex.joinTeleLayer
|
train
|
async def joinTeleLayer(self, url, indx=None):
'''
Convenience function to join a remote telepath layer
into this cortex and default view.
'''
info = {
'type': 'remote',
'owner': 'root',
'config': {
'url': url
}
}
layr = await self.addLayer(**info)
await self.view.addLayer(layr, indx=indx)
return layr.iden
|
python
|
{
"resource": ""
}
|
q19239
|
Cortex.addStormCmd
|
train
|
def addStormCmd(self, ctor):
'''
Add a synapse.lib.storm.Cmd class to the cortex.
'''
if not s_syntax.isCmdName(ctor.name):
raise s_exc.BadCmdName(name=ctor.name)
self.stormcmds[ctor.name] = ctor
|
python
|
{
"resource": ""
}
|
q19240
|
Cortex._getSynIngestNodes
|
train
|
def _getSynIngestNodes(self, item):
'''
Get a list of packed nodes from a ingest definition.
'''
pnodes = []
seen = item.get('seen')
# Track all the ndefs we make so we can make sources
ndefs = []
# Make the form nodes
tags = item.get('tags', {})
forms = item.get('forms', {})
for form, valus in forms.items():
for valu in valus:
ndef = [form, valu]
ndefs.append(ndef)
obj = [ndef, {'tags': tags}]
if seen:
obj[1]['props'] = {'.seen': seen}
pnodes.append(obj)
# Make the packed nodes
nodes = item.get('nodes', ())
for pnode in nodes:
ndefs.append(pnode[0])
pnode[1].setdefault('tags', {})
for tag, valu in tags.items():
# Tag in the packed node has a higher predecence
# than the tag in the whole ingest set of data.
pnode[1]['tags'].setdefault(tag, valu)
if seen:
pnode[1].setdefault('props', {})
pnode[1]['props'].setdefault('.seen', seen)
pnodes.append(pnode)
# Make edges
for srcdef, etyp, destndefs in item.get('edges', ()):
for destndef in destndefs:
ndef = [etyp, [srcdef, destndef]]
ndefs.append(ndef)
obj = [ndef, {}]
if seen:
obj[1]['props'] = {'.seen': seen}
if tags:
obj[1]['tags'] = tags.copy()
pnodes.append(obj)
# Make time based edges
for srcdef, etyp, destndefs in item.get('time:edges', ()):
for destndef, time in destndefs:
ndef = [etyp, [srcdef, destndef, time]]
ndefs.append(ndef)
obj = [ndef, {}]
if seen:
obj[1]['props'] = {'.seen': seen}
if tags:
obj[1]['tags'] = tags.copy()
pnodes.append(obj)
# Make the source node and links
source = item.get('source')
if source:
# Base object
obj = [['meta:source', source], {}]
pnodes.append(obj)
# Subsequent links
for ndef in ndefs:
obj = [['meta:seen', (source, ndef)],
{'props': {'.seen': seen}}]
pnodes.append(obj)
return pnodes
|
python
|
{
"resource": ""
}
|
q19241
|
Cortex.eval
|
train
|
async def eval(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield Nodes only.
'''
if user is None:
user = self.auth.getUserByName('root')
await self.boss.promote('storm', user=user, info={'query': text})
async with await self.snap(user=user) as snap:
async for node in snap.eval(text, opts=opts, user=user):
yield node
|
python
|
{
"resource": ""
}
|
q19242
|
Cortex.nodes
|
train
|
async def nodes(self, text, opts=None, user=None):
'''
A simple non-streaming way to return a list of nodes.
'''
return [n async for n in self.eval(text, opts=opts, user=user)]
|
python
|
{
"resource": ""
}
|
q19243
|
Cortex.getStormQuery
|
train
|
def getStormQuery(self, text):
'''
Parse storm query text and return a Query object.
'''
query = s_syntax.Parser(text).query()
query.init(self)
return query
|
python
|
{
"resource": ""
}
|
q19244
|
Cortex._logStormQuery
|
train
|
def _logStormQuery(self, text, user):
'''
Log a storm query.
'''
if self.conf.get('storm:log'):
lvl = self.conf.get('storm:log:level')
logger.log(lvl, 'Executing storm query {%s} as [%s]', text, user.name)
|
python
|
{
"resource": ""
}
|
q19245
|
Cortex.getNodesBy
|
train
|
async def getNodesBy(self, full, valu, cmpr='='):
'''
Get nodes by a property value or lift syntax.
Args:
full (str): The full name of a property <form>:<prop>.
valu (obj): A value that the type knows how to lift by.
cmpr (str): The comparison operator you are lifting by.
Some node property types allow special syntax here.
Examples:
# simple lift by property equality
core.getNodesBy('file:bytes:size', 20)
# The inet:ipv4 type knows about cidr syntax
core.getNodesBy('inet:ipv4', '1.2.3.0/24')
'''
async with await self.snap() as snap:
async for node in snap.getNodesBy(full, valu, cmpr=cmpr):
yield node
|
python
|
{
"resource": ""
}
|
q19246
|
Cortex.snap
|
train
|
async def snap(self, user=None, view=None):
'''
Return a transaction object for the default view.
Args:
write (bool): Set to True for a write transaction.
Returns:
(synapse.lib.snap.Snap)
NOTE: This must be used in a with block.
'''
if view is None:
view = self.view
if user is None:
user = self.auth.getUserByName('root')
snap = await view.snap(user)
return snap
|
python
|
{
"resource": ""
}
|
q19247
|
Cortex.loadCoreModule
|
train
|
async def loadCoreModule(self, ctor, conf=None):
'''
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
'''
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
await self.fire('core:module:load', module=ctor)
return modu
|
python
|
{
"resource": ""
}
|
q19248
|
Cortex.getPropNorm
|
train
|
async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
pobj = self.model.prop(prop)
if pobj is None:
raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.',
prop=prop)
norm, info = pobj.type.norm(valu)
return norm, info
|
python
|
{
"resource": ""
}
|
q19249
|
Cortex.getTypeNorm
|
train
|
async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
tobj = self.model.type(name)
if tobj is None:
raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.',
name=name)
norm, info = tobj.norm(valu)
return norm, info
|
python
|
{
"resource": ""
}
|
q19250
|
LmdbLayer.stor
|
train
|
async def stor(self, sops, splices=None):
'''
Execute a series of storage operations.
Overrides implementation in layer.py to avoid unnecessary async calls.
'''
for oper in sops:
func = self._stor_funcs.get(oper[0])
if func is None: # pragma: no cover
raise s_exc.NoSuchStor(name=oper[0])
func(oper)
if splices:
self._storSplicesSync(splices)
self.spliced.set()
self.spliced.clear()
|
python
|
{
"resource": ""
}
|
q19251
|
LmdbLayer._migrate_db_pre010
|
train
|
def _migrate_db_pre010(self, dbname, newslab):
'''
Check for any pre-010 entries in 'dbname' in my slab and migrate those to the new slab.
Once complete, drop the database from me with the name 'dbname'
Returns (bool): True if a migration occurred, else False
'''
donekey = f'migrdone:{dbname}'
if self.metadict.get(donekey, False):
return
if not self.layrslab.dbexists(dbname):
self.metadict.set(donekey, True)
return False
oldslab = self.layrslab
olddb = oldslab.initdb(dbname)
entries = oldslab.stat(olddb)['entries']
if not entries:
self.metadict.set(donekey, True)
return False
if newslab.dbexists(dbname):
logger.warning('Incomplete migration detected. Dropping new splices to restart.')
newslab.dropdb(dbname)
logger.info('New splice dropping complete.')
logger.info('Pre-010 %s migration starting. Total rows: %d...', dbname, entries)
def progfunc(count):
logger.info('Progress %d/%d (%2.2f%%)', count, entries, count / entries * 100)
oldslab.copydb(olddb, newslab, destdbname=dbname, progresscb=progfunc)
logger.info('Pre-010 %s migration copying done. Deleting from old location...', dbname)
oldslab.dropdb(dbname)
logger.info('Pre-010 %s migration completed.', dbname)
self.metadict.set(donekey, True)
return True
|
python
|
{
"resource": ""
}
|
q19252
|
LmdbLayer.migrateProvPre010
|
train
|
def migrateProvPre010(self, newslab):
'''
Check for any pre-010 provstacks and migrate those to the new slab.
'''
did_migrate = self._migrate_db_pre010('prov', newslab)
if not did_migrate:
return
self._migrate_db_pre010('provs', newslab)
|
python
|
{
"resource": ""
}
|
q19253
|
LmdbLayer.storPropSet
|
train
|
async def storPropSet(self, buid, prop, valu):
'''
Migration-only function
'''
assert self.buidcache.disabled
indx = prop.type.indx(valu)
if indx is not None and len(indx) > MAX_INDEX_LEN:
mesg = 'index bytes are too large'
raise s_exc.BadIndxValu(mesg=mesg, prop=prop, valu=valu)
univ = prop.utf8name[0] in (46, 35) # leading . or #
bpkey = buid + prop.utf8name
self._storPropSetCommon(buid, prop.utf8name, bpkey, prop.pref, univ, valu, indx)
|
python
|
{
"resource": ""
}
|
q19254
|
openurl
|
train
|
async def openurl(url, **opts):
'''
Open a URL to a remote telepath object.
Args:
url (str): A telepath URL.
**opts (dict): Telepath connect options.
Returns:
(synapse.telepath.Proxy): A telepath proxy object.
The telepath proxy may then be used for sync or async calls:
proxy = openurl(url)
value = proxy.getFooThing()
... or ...
proxy = await openurl(url)
valu = await proxy.getFooThing()
... or ...
async with await openurl(url) as proxy:
valu = await proxy.getFooThing()
'''
if url.find('://') == -1:
newurl = alias(url)
if newurl is None:
raise s_exc.BadUrl(f':// not found in [{url}] and no alias found!')
url = newurl
info = s_urlhelp.chopurl(url)
info.update(opts)
host = info.get('host')
port = info.get('port')
auth = None
user = info.get('user')
if user is not None:
passwd = info.get('passwd')
auth = (user, {'passwd': passwd})
scheme = info.get('scheme')
if scheme == 'cell':
# cell:///path/to/celldir:share
# cell://rel/path/to/celldir:share
path = info.get('path')
name = info.get('name', '*')
# support cell://<relpath>/<to>/<cell>
# by detecting host...
host = info.get('host')
if host:
path = path.strip('/')
path = os.path.join(host, path)
if ':' in path:
path, name = path.split(':')
full = os.path.join(path, 'sock')
link = await s_link.unixconnect(full)
elif scheme == 'unix':
# unix:///path/to/sock:share
path, name = info.get('path').split(':')
link = await s_link.unixconnect(path)
else:
path = info.get('path')
name = info.get('name', path[1:])
sslctx = None
if scheme == 'ssl':
certpath = info.get('certdir')
certdir = s_certdir.CertDir(certpath)
sslctx = certdir.getClientSSLContext()
link = await s_link.connect(host, port, ssl=sslctx)
prox = await Proxy.anit(link, name)
prox.onfini(link)
try:
await prox.handshake(auth=auth)
except Exception:
await prox.fini()
raise
return prox
|
python
|
{
"resource": ""
}
|
q19255
|
Proxy.call
|
train
|
async def call(self, methname, *args, **kwargs):
'''
Call a remote method by name.
Args:
methname (str): The name of the remote method.
*args: Arguments to the method call.
**kwargs: Keyword arguments to the method call.
Most use cases will likely use the proxy methods directly:
The following two are effectively the same:
valu = proxy.getFooBar(x, y)
valu = proxy.call('getFooBar', x, y)
'''
todo = (methname, args, kwargs)
return await self.task(todo)
|
python
|
{
"resource": ""
}
|
q19256
|
RateLimit.allows
|
train
|
def allows(self):
'''
Returns True if the rate limit has not been reached.
Example:
if not rlimit.allows():
rasie RateExceeded()
# ok to go...
'''
tick = time.time()
passed = tick - self.lasttick
self.allowance = min(self.rate, self.allowance + (passed * self.persec))
self.lasttick = tick
if self.allowance < 1.0:
return False
self.allowance -= 1.0
return True
|
python
|
{
"resource": ""
}
|
q19257
|
Layer.disablingBuidCache
|
train
|
def disablingBuidCache(self):
'''
Disable and invalidate the layer buid cache for migration
'''
self.buidcache = s_cache.LruDict(0)
yield
self.buidcache = s_cache.LruDict(BUID_CACHE_SIZE)
|
python
|
{
"resource": ""
}
|
q19258
|
getClsNames
|
train
|
def getClsNames(item):
'''
Return a list of "fully qualified" class names for an instance.
Example:
for name in getClsNames(foo):
print(name)
'''
mro = inspect.getmro(item.__class__)
mro = [c for c in mro if c not in clsskip]
return ['%s.%s' % (c.__module__, c.__name__) for c in mro]
|
python
|
{
"resource": ""
}
|
q19259
|
getShareInfo
|
train
|
def getShareInfo(item):
'''
Get a dictionary of special annotations for a Telepath Proxy.
Args:
item: Item to inspect.
Notes:
This will set the ``_syn_telemeth`` attribute on the item
and the items class, so this data is only computed once.
Returns:
dict: A dictionary of methods requiring special handling by the proxy.
'''
key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'
info = getattr(item, key, None)
if info is not None:
return info
meths = {}
info = {'meths': meths}
for name in dir(item):
if name.startswith('_'):
continue
attr = getattr(item, name, None)
if not callable(attr):
continue
# We know we can cleanly unwrap these functions
# for asyncgenerator inspection.
wrapped = getattr(attr, '__syn_wrapped__', None)
if wrapped in unwraps:
real = inspect.unwrap(attr)
if inspect.isasyncgenfunction(real):
meths[name] = {'genr': True}
continue
if inspect.isasyncgenfunction(attr):
meths[name] = {'genr': True}
try:
setattr(item, key, info)
except Exception as e: # pragma: no cover
logger.exception(f'Failed to set magic on {item}')
try:
setattr(item.__class__, key, info)
except Exception as e: # pragma: no cover
logger.exception(f'Failed to set magic on {item.__class__}')
return info
|
python
|
{
"resource": ""
}
|
q19260
|
CellApi.getHiveKey
|
train
|
async def getHiveKey(self, path):
''' Get the value of a key in the cell default hive '''
perm = ('hive:get',) + path
self.user.allowed(perm)
return await self.cell.hive.get(path)
|
python
|
{
"resource": ""
}
|
q19261
|
CellApi.setHiveKey
|
train
|
async def setHiveKey(self, path, value):
''' Set or change the value of a key in the cell default hive '''
perm = ('hive:set',) + path
self.user.allowed(perm)
return await self.cell.hive.set(path, value)
|
python
|
{
"resource": ""
}
|
q19262
|
CellApi.popHiveKey
|
train
|
async def popHiveKey(self, path):
''' Remove and return the value of a key in the cell default hive '''
perm = ('hive:pop',) + path
self.user.allowed(perm)
return await self.cell.hive.pop(path)
|
python
|
{
"resource": ""
}
|
q19263
|
CellApi.getAuthInfo
|
train
|
async def getAuthInfo(self, name):
'''
An admin only API endpoint for getting user info.
'''
item = self._getAuthItem(name)
pack = item.pack()
# translate role guids to names for back compat
if pack.get('type') == 'user':
pack['roles'] = [self.cell.auth.role(r).name for r in pack['roles']]
return (name, pack)
|
python
|
{
"resource": ""
}
|
q19264
|
_asynciostacks
|
train
|
def _asynciostacks(*args, **kwargs): # pragma: no cover
'''
A signal handler used to print asyncio task stacks and thread stacks.
'''
print(80 * '*')
print('Asyncio tasks stacks:')
tasks = asyncio.all_tasks(_glob_loop)
for task in tasks:
task.print_stack()
print(80 * '*')
print('Faulthandler stack frames per thread:')
faulthandler.dump_traceback()
print(80 * '*')
|
python
|
{
"resource": ""
}
|
q19265
|
sync
|
train
|
def sync(coro, timeout=None):
'''
Schedule a coroutine to run on the global loop and return it's result.
Args:
coro (coroutine): The coroutine instance.
Notes:
This API is thread safe and should only be called by non-loop threads.
'''
loop = initloop()
return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout)
|
python
|
{
"resource": ""
}
|
q19266
|
synchelp
|
train
|
def synchelp(f):
'''
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y)
'''
def wrap(*args, **kwargs):
coro = f(*args, **kwargs)
if not iAmLoop():
return sync(coro)
return coro
return wrap
|
python
|
{
"resource": ""
}
|
q19267
|
Cron._parse_weekday
|
train
|
def _parse_weekday(val):
''' Try to match a day-of-week abbreviation, then try a day-of-week full name '''
val = val.title()
try:
return list(calendar.day_abbr).index(val)
except ValueError:
try:
return list(calendar.day_name).index(val)
except ValueError:
return None
|
python
|
{
"resource": ""
}
|
q19268
|
Cron._parse_incval
|
train
|
def _parse_incval(incunit, incval):
''' Parse a non-day increment value. Should be an integer or a comma-separated integer list. '''
try:
retn = [int(val) for val in incval.split(',')]
except ValueError:
return None
return retn[0] if len(retn) == 1 else retn
|
python
|
{
"resource": ""
}
|
q19269
|
Cron._parse_req
|
train
|
def _parse_req(requnit, reqval):
''' Parse a non-day fixed value '''
assert reqval[0] != '='
try:
retn = []
for val in reqval.split(','):
if requnit == 'month':
if reqval[0].isdigit():
retn.append(int(reqval)) # must be a month (1-12)
else:
try:
retn.append(list(calendar.month_abbr).index(val.title()))
except ValueError:
retn.append(list(calendar.month_name).index(val.title()))
else:
retn.append(int(val))
except ValueError:
return None
if not retn:
return None
return retn[0] if len(retn) == 1 else retn
|
python
|
{
"resource": ""
}
|
q19270
|
Cron._handle_stat
|
train
|
async def _handle_stat(self, core, opts):
''' Prints details about a particular cron job. Not actually a different API call '''
prefix = opts.prefix
crons = await core.listCronJobs()
idens = [cron[0] for cron in crons]
matches = [iden for iden in idens if iden.startswith(prefix)]
if len(matches) == 0:
self.printf('Error: provided iden does not match any valid authorized cron job')
return
elif len(matches) > 1:
self.printf('Error: provided iden matches more than one cron job')
return
iden = matches[0]
cron = [cron[1] for cron in crons if cron[0] == iden][0]
user = cron.get('username') or '<None>'
query = cron.get('query') or '<missing>'
isrecur = 'Yes' if cron.get('recur') else 'No'
startcount = cron.get('startcount') or 0
recs = cron.get('recs', [])
laststart = cron.get('laststarttime')
lastend = cron.get('lastfinishtime')
laststart = 'Never' if laststart is None else self._format_timestamp(laststart)
lastend = 'Never' if lastend is None else self._format_timestamp(lastend)
lastresult = cron.get('lastresult') or '<None>'
self.printf(f'iden: {iden}')
self.printf(f'user: {user}')
self.printf(f'recurring: {isrecur}')
self.printf(f'# starts: {startcount}')
self.printf(f'last start time: {laststart}')
self.printf(f'last end time: {lastend}')
self.printf(f'last result: {lastresult}')
self.printf(f'query: {query}')
if not recs:
self.printf(f'entries: <None>')
else:
self.printf(f'entries: {"incunit":10} {"incval":6} {"required"}')
for reqdict, incunit, incval in recs:
reqdict = reqdict or '<None>'
incunit = incunit or '<None>'
incval = incval or '<None>'
self.printf(f' {incunit:10} {incval:6} {reqdict}')
|
python
|
{
"resource": ""
}
|
q19271
|
Boss.promote
|
train
|
async def promote(self, name, user, info=None):
'''
Promote the currently running task.
'''
task = asyncio.current_task()
synt = getattr(task, '_syn_task', None)
if synt is not None:
if synt.root is None:
return synt
synt.root.kids.pop(synt.iden)
synt.root = None
return synt
return await s_task.Task.anit(self, task, name, user, info=info)
|
python
|
{
"resource": ""
}
|
q19272
|
Boss.execute
|
train
|
async def execute(self, coro, name, user, info=None):
'''
Create a synapse task from the given coroutine.
'''
task = self.schedCoro(coro)
return await s_task.Task.anit(self, task, name, user, info=info)
|
python
|
{
"resource": ""
}
|
q19273
|
Base.on
|
train
|
def on(self, evnt, func, base=None):
'''
Add an base function callback for a specific event with optional filtering. If the function returns a
coroutine, it will be awaited.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
Examples:
Add a callback function and fire it:
async def baz(event):
x = event[1].get('x')
y = event[1].get('y')
return x + y
d.on('foo', baz)
# this fire triggers baz...
await d.fire('foo', x=10, y=20)
Returns:
None:
'''
funcs = self._syn_funcs[evnt]
if func in funcs:
return
funcs.append(func)
if base is not None:
def fini():
self.off(evnt, func)
base.onfini(fini)
|
python
|
{
"resource": ""
}
|
q19274
|
Base.off
|
train
|
def off(self, evnt, func):
'''
Remove a previously registered event handler function.
Example:
base.off( 'foo', onFooFunc )
'''
funcs = self._syn_funcs.get(evnt)
if funcs is None:
return
try:
funcs.remove(func)
except ValueError:
pass
|
python
|
{
"resource": ""
}
|
q19275
|
Base.fire
|
train
|
async def fire(self, evtname, **info):
'''
Fire the given event name on the Base.
Returns a list of the return values of each callback.
Example:
for ret in d.fire('woot',foo='asdf'):
print('got: %r' % (ret,))
'''
event = (evtname, info)
if self.isfini:
return event
await self.dist(event)
return event
|
python
|
{
"resource": ""
}
|
q19276
|
Base.dist
|
train
|
async def dist(self, mesg):
'''
Distribute an existing event tuple.
Args:
mesg ((str,dict)): An event tuple.
Example:
await base.dist( ('foo',{'bar':'baz'}) )
'''
if self.isfini:
return ()
ret = []
for func in self._syn_funcs.get(mesg[0], ()):
try:
ret.append(await s_coro.ornot(func, mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
for func in self._syn_links:
try:
ret.append(await func(mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
return ret
|
python
|
{
"resource": ""
}
|
q19277
|
Base.onWith
|
train
|
def onWith(self, evnt, func):
'''
A context manager which can be used to add a callback and remove it when
using a ``with`` statement.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
'''
self.on(evnt, func)
# Allow exceptions to propagate during the context manager
# but ensure we cleanup our temporary callback
try:
yield self
finally:
self.off(evnt, func)
|
python
|
{
"resource": ""
}
|
q19278
|
Base.schedCoro
|
train
|
def schedCoro(self, coro):
'''
Schedules a free-running coroutine to run on this base's event loop. Kills the coroutine if Base is fini'd.
It does not pend on coroutine completion.
Precondition:
This function is *not* threadsafe and must be run on the Base's event loop
Returns:
asyncio.Task: An asyncio.Task object.
'''
import synapse.lib.provenance as s_provenance # avoid import cycle
if __debug__:
assert s_coro.iscoro(coro)
import synapse.lib.threads as s_threads # avoid import cycle
assert s_threads.iden() == self.tid
task = self.loop.create_task(coro)
# In rare cases, (Like this function being triggered from call_soon_threadsafe), there's no task context
if asyncio.current_task():
s_provenance.dupstack(task)
def taskDone(task):
self._active_tasks.remove(task)
try:
task.result()
except asyncio.CancelledError:
pass
except Exception:
logger.exception('Task scheduled through Base.schedCoro raised exception')
self._active_tasks.add(task)
task.add_done_callback(taskDone)
return task
|
python
|
{
"resource": ""
}
|
q19279
|
Base.schedCoroSafePend
|
train
|
def schedCoroSafePend(self, coro):
'''
Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on
Note:
This method may *not* be run inside an event loop
'''
if __debug__:
import synapse.lib.threads as s_threads # avoid import cycle
assert s_threads.iden() != self.tid
task = asyncio.run_coroutine_threadsafe(coro, self.loop)
return task.result()
|
python
|
{
"resource": ""
}
|
q19280
|
Base.waiter
|
train
|
def waiter(self, count, *names):
'''
Construct and return a new Waiter for events on this base.
Example:
# wait up to 3 seconds for 10 foo:bar events...
waiter = base.waiter(10,'foo:bar')
# .. fire thread that will cause foo:bar events
events = waiter.wait(timeout=3)
if events == None:
# handle the timout case...
for event in events:
# parse the events if you need...
NOTE: use with caution... it's easy to accidentally construct
race conditions with this mechanism ;)
'''
return Waiter(self, count, self.loop, *names)
|
python
|
{
"resource": ""
}
|
q19281
|
Waiter.wait
|
train
|
async def wait(self, timeout=None):
'''
Wait for the required number of events and return them or None on timeout.
Example:
evnts = waiter.wait(timeout=30)
if evnts == None:
handleTimedOut()
return
for evnt in evnts:
doStuff(evnt)
'''
try:
retn = await s_coro.event_wait(self.event, timeout)
if not retn:
return None
return self.events
finally:
self.fini()
|
python
|
{
"resource": ""
}
|
q19282
|
guid
|
train
|
def guid(valu=None):
'''
Get a 16 byte guid value.
By default, this is a random guid value.
Args:
valu: Object used to construct the guid valu from. This must be able
to be msgpack'd.
Returns:
str: 32 character, lowercase ascii string.
'''
if valu is None:
return binascii.hexlify(os.urandom(16)).decode('utf8')
# Generate a "stable" guid from the given item
byts = s_msgpack.en(valu)
return hashlib.md5(byts).hexdigest()
|
python
|
{
"resource": ""
}
|
q19283
|
buid
|
train
|
def buid(valu=None):
'''
A binary GUID like sequence of 32 bytes.
Args:
valu (object): Optional, if provided, the hash of the msgpack
encoded form of the object is returned. This can be used to
create stable buids.
Notes:
By default, this returns a random 32 byte value.
Returns:
bytes: A 32 byte value.
'''
if valu is None:
return os.urandom(32)
byts = s_msgpack.en(valu)
return hashlib.sha256(byts).digest()
|
python
|
{
"resource": ""
}
|
q19284
|
lockfile
|
train
|
def lockfile(path):
'''
A file lock with-block helper.
Args:
path (str): A path to a lock file.
Examples:
Get the lock on a file and dostuff while having the lock::
path = '/hehe/haha.lock'
with lockfile(path):
dostuff()
Notes:
This is curently based on fcntl.lockf(), and as such, it is purely
advisory locking. If multiple processes are attempting to obtain a
lock on the same file, this will block until the process which has
the current lock releases it.
Yields:
None
'''
with genfile(path) as fd:
fcntl.lockf(fd, fcntl.LOCK_EX)
yield None
|
python
|
{
"resource": ""
}
|
q19285
|
getexcfo
|
train
|
def getexcfo(e):
'''
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
'''
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd = {
'msg': str(e),
'file': path,
'line': line,
'name': name,
'src': src
}
if isinstance(e, s_exc.SynErr):
retd['syn:err'] = e.errinfo
return (e.__class__.__name__, retd)
|
python
|
{
"resource": ""
}
|
q19286
|
excinfo
|
train
|
def excinfo(e):
'''
Populate err,errmsg,errtrace info from exc.
'''
tb = sys.exc_info()[2]
path, line, name, sorc = traceback.extract_tb(tb)[-1]
ret = {
'err': e.__class__.__name__,
'errmsg': str(e),
'errfile': path,
'errline': line,
}
if isinstance(e, s_exc.SynErr):
ret['errinfo'] = e.errinfo
return ret
|
python
|
{
"resource": ""
}
|
q19287
|
chunks
|
train
|
def chunks(item, size):
'''
Divide an iterable into chunks.
Args:
item: Item to slice
size (int): Maximum chunk size.
Notes:
This supports Generator objects and objects which support calling
the __getitem__() method with a slice object.
Yields:
Slices of the item containing up to "size" number of items.
'''
# use islice if it's a generator
if isinstance(item, types.GeneratorType):
while True:
chunk = tuple(itertools.islice(item, size))
if not chunk:
return
yield chunk
# The sequence item is empty, yield a empty slice from it.
# This will also catch mapping objects since a slice should
# be an unhashable type for a mapping and the __getitem__
# method would not be present on a set object
if not item:
yield item[0:0]
return
# otherwise, use normal slicing
off = 0
while True:
chunk = item[off:off + size]
if not chunk:
return
yield chunk
off += size
|
python
|
{
"resource": ""
}
|
q19288
|
iterfd
|
train
|
def iterfd(fd, size=10000000):
'''
Generator which yields bytes from a file descriptor.
Args:
fd (file): A file-like object to read bytes from.
size (int): Size, in bytes, of the number of bytes to read from the
fd at a given time.
Notes:
If the first read call on the file descriptor is a empty bytestring,
that zero length bytestring will be yielded and the generator will
then be exhuasted. This behavior is intended to allow the yielding of
contents of a zero byte file.
Yields:
bytes: Bytes from the file descriptor.
'''
fd.seek(0)
byts = fd.read(size)
# Fast path to yield b''
if len(byts) == 0:
yield byts
return
while byts:
yield byts
byts = fd.read(size)
|
python
|
{
"resource": ""
}
|
q19289
|
firethread
|
train
|
def firethread(f):
'''
A decorator for making a function fire a thread.
'''
@functools.wraps(f)
def callmeth(*args, **kwargs):
thr = worker(f, *args, **kwargs)
return thr
return callmeth
|
python
|
{
"resource": ""
}
|
q19290
|
setlogging
|
train
|
def setlogging(mlogger, defval=None):
'''
Configure synapse logging.
Args:
mlogger (logging.Logger): Reference to a logging.Logger()
defval (str): Default log level
Notes:
This calls logging.basicConfig and should only be called once per process.
Returns:
None
'''
log_level = os.getenv('SYN_LOG_LEVEL',
defval)
if log_level: # pragma: no cover
log_level = log_level.upper()
if log_level not in s_const.LOG_LEVEL_CHOICES:
raise ValueError('Invalid log level provided: {}'.format(log_level))
logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)
mlogger.info('log level set to %s', log_level)
|
python
|
{
"resource": ""
}
|
q19291
|
result
|
train
|
def result(retn):
'''
Return a value or raise an exception from a retn tuple.
'''
ok, valu = retn
if ok:
return valu
name, info = valu
ctor = getattr(s_exc, name, None)
if ctor is not None:
raise ctor(**info)
info['errx'] = name
raise s_exc.SynErr(**info)
|
python
|
{
"resource": ""
}
|
q19292
|
config
|
train
|
def config(conf, confdefs):
'''
Initialize a config dict using the given confdef tuples.
'''
conf = conf.copy()
# for now just populate defval
for name, info in confdefs:
conf.setdefault(name, info.get('defval'))
return conf
|
python
|
{
"resource": ""
}
|
q19293
|
Prop.getDelOps
|
train
|
def getDelOps(self, buid):
'''
Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations
'''
return (
('prop:del', (buid, self.form.name, self.name, self.storinfo)),
)
|
python
|
{
"resource": ""
}
|
q19294
|
Form.getLiftOps
|
train
|
def getLiftOps(self, valu, cmpr='='):
'''
Get a set of lift operations for use with an Xact.
'''
if valu is None:
iops = (('pref', b''),)
return (
('indx', ('byprop', self.pref, iops)),
)
# TODO: In an ideal world, this would get smashed down into the self.type.getLiftOps
# but since doing so breaks existing types, and fixing those could cause a cascade
# of fun failures, we'll put this off until another flag day
if cmpr == '~=':
return (
('form:re', (self.name, valu, {})),
)
lops = self.type.getLiftOps('form', cmpr, (None, self.name, valu))
if lops is not None:
return lops
iops = self.type.getIndxOps(valu, cmpr)
return (
('indx', ('byprop', self.pref, iops)),
)
|
python
|
{
"resource": ""
}
|
q19295
|
Model.addBaseType
|
train
|
def addBaseType(self, item):
'''
Add a Type instance to the data model.
'''
ctor = '.'.join([item.__class__.__module__, item.__class__.__qualname__])
self._modeldef['ctors'].append(((item.name, ctor, dict(item.opts), dict(item.info))))
self.types[item.name] = item
|
python
|
{
"resource": ""
}
|
q19296
|
Daemon.share
|
train
|
def share(self, name, item):
'''
Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath.
'''
try:
if isinstance(item, s_telepath.Aware):
item.onTeleShare(self, name)
self.shared[name] = item
except Exception:
logger.exception(f'onTeleShare() error for: {name}')
|
python
|
{
"resource": ""
}
|
q19297
|
_dayofmonth
|
train
|
def _dayofmonth(hardday, month, year):
'''
Returns a valid day of the month given the desired value.
Negative values are interpreted as offset backwards from the last day of the month, with -1 representing the
last day of the month. Out-of-range values are clamped to the first or last day of the month.
'''
newday = hardday
daysinmonth = calendar.monthrange(year, month)[1]
if newday < 0:
newday = daysinmonth + hardday + 1
newday = max(1, min(newday, daysinmonth))
return newday
|
python
|
{
"resource": ""
}
|
q19298
|
_Appt.updateNexttime
|
train
|
def updateNexttime(self, now):
'''
Find the next time this appointment should be scheduled.
Delete any nonrecurring record that just happened.
'''
if self._recidxnexttime is not None and not self.recur:
del self.recs[self._recidxnexttime]
while self.recs and self.nexttime <= now:
lowtime = 999999999999.9
# Find the lowest next time of all of our recs (backwards, so we can delete)
for i in range(len(self.recs) - 1, -1, -1):
rec = self.recs[i]
nexttime = rec.nexttime(self.nexttime)
if nexttime == 0.0:
# We blew by and missed a fixed-year appointment, either due to clock shenanigans, this query going
# really long, or the initial requirement being in the past
logger.warning(f'Missed an appointment: {rec}')
del self.recs[i]
continue
if nexttime < lowtime:
lowtime = nexttime
lowidx = i
if not self.recs:
break
self._recidxnexttime = lowidx
self.nexttime = lowtime
if not self.recs:
self._recidxnexttime = None
self.nexttime = None
return
|
python
|
{
"resource": ""
}
|
q19299
|
Agenda.enable
|
train
|
async def enable(self):
'''
Enable cron jobs to start running, start the scheduler loop
Go through all the appointments, making sure the query is valid, and remove the ones that aren't. (We can't
evaluate queries until enabled because not all the modules are loaded yet.)
'''
if self.enabled:
return
to_delete = []
for iden, appt in self.appts.items():
try:
self.core.getStormQuery(appt.query)
except Exception as e:
logger.warning('Invalid appointment %r found in storage: %r. Disabling.', iden, e)
appt.enabled = False
for iden in to_delete:
await self.delete(iden)
self._schedtask = self.schedCoro(self._scheduleLoop())
self.enabled = True
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.