code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
# Store the number of thresholds within the summary metadata because
# that value is constant for all pr curve summaries with the same tag.
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
# Store values within a tensor. We store them in the order:
# true positives, false positives, true negatives, false
# negatives, precision, and recall.
combined_data = tf.stack([
tf.cast(true_positive_counts, tf.float32),
tf.cast(false_positive_counts, tf.float32),
tf.cast(true_negative_counts, tf.float32),
tf.cast(false_negative_counts, tf.float32),
tf.cast(precision, tf.float32),
tf.cast(recall, tf.float32)])
return tf.summary.tensor_summary(
name='pr_curves',
tensor=combined_data,
collections=collections,
summary_metadata=summary_metadata) | A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves. |
def is_sparse_vector(x):
""" x is a 2D sparse matrix with it's first shape equal to 1.
"""
return sp.issparse(x) and len(x.shape) == 2 and x.shape[0] == 1 | x is a 2D sparse matrix with it's first shape equal to 1. |
def create_config(case=None, Exp='Dummy', Type='Tor',
Lim=None, Bump_posextent=[np.pi/4., np.pi/4],
R=2.4, r=1., elong=0., Dshape=0.,
divlow=True, divup=True, nP=200,
out='object', SavePath='./'):
""" Create easily a tofu.geom.Config object
In tofu, a Config (short for geometrical configuration) refers to the 3D
geometry of a fusion device.
It includes, at least, a simple 2D polygon describing the first wall of the
fusion chamber, and can also include other structural elements (tiles,
limiters...) that can be non-axisymmetric.
To create a simple Config, provide either the name of a reference test
case, of a set of geometrical parameters (major radius, elongation...).
This is just a tool for fast testing, if you want to create a custom
config, use directly tofu.geom.Config and provide the parameters you want.
Parameters
----------
case : str
The name of a reference test case, if provided, this arguments is
sufficient, the others are ignored
Exp : str
The name of the experiment
Type : str
The type of configuration (toroidal 'Tor' or linear 'Lin')
Lim_Bump: list
The angular (poloidal) limits, in the cross-section of the extension of
the outer bumper
R : float
The major radius of the center of the cross-section
r : float
The minor radius of the cross-section
elong: float
An elongation parameter (in [-1;1])
Dshape: float
A parameter specifying the D-shape of the cross-section (in [-1;1])
divlow: bool
A flag specifying whether to include a lower divertor-like shape
divup: bool
A flag specifying whether to include an upper divertor-like shape
nP: int
Number of points used to describe the cross-section polygon
out: str
FLag indicating whether to return:
- 'dict' : the polygons as a dictionary of np.ndarrays
- 'object': the configuration as a tofu.geom.Config instance
Return
------
conf: tofu.geom.Config / dict
Depending on the value of parameter out, either:
- the tofu.geom.Config object created
- a dictionary of the polygons and their pos/extent (if any)
"""
if case is not None:
conf = _create_config_testcase(config=case, out=out)
else:
poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r, elong=elong, Dshape=Dshape,
divlow=divlow, divup=divup, nP=nP)
if out=='dict':
conf = {'Ves':{'Poly':poly},
'Baffle':{'Poly':pbaffle},
'Bumper':{'Poly':pbump,
'pos':Bump_posextent[0],
'extent':Bump_posextent[1]}}
else:
ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves',
SavePath=SavePath)
baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim,
Exp=Exp, Name='Baffle', color='b', SavePath=SavePath)
bump = _core.PFC(Poly=pbump, Type=Type,
pos=Bump_posextent[0], extent=Bump_posextent[1],
Exp=Exp, Name='Bumper', color='g', SavePath=SavePath)
conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump],
SavePath=SavePath)
return conf | Create easily a tofu.geom.Config object
In tofu, a Config (short for geometrical configuration) refers to the 3D
geometry of a fusion device.
It includes, at least, a simple 2D polygon describing the first wall of the
fusion chamber, and can also include other structural elements (tiles,
limiters...) that can be non-axisymmetric.
To create a simple Config, provide either the name of a reference test
case, of a set of geometrical parameters (major radius, elongation...).
This is just a tool for fast testing, if you want to create a custom
config, use directly tofu.geom.Config and provide the parameters you want.
Parameters
----------
case : str
The name of a reference test case, if provided, this arguments is
sufficient, the others are ignored
Exp : str
The name of the experiment
Type : str
The type of configuration (toroidal 'Tor' or linear 'Lin')
Lim_Bump: list
The angular (poloidal) limits, in the cross-section of the extension of
the outer bumper
R : float
The major radius of the center of the cross-section
r : float
The minor radius of the cross-section
elong: float
An elongation parameter (in [-1;1])
Dshape: float
A parameter specifying the D-shape of the cross-section (in [-1;1])
divlow: bool
A flag specifying whether to include a lower divertor-like shape
divup: bool
A flag specifying whether to include an upper divertor-like shape
nP: int
Number of points used to describe the cross-section polygon
out: str
FLag indicating whether to return:
- 'dict' : the polygons as a dictionary of np.ndarrays
- 'object': the configuration as a tofu.geom.Config instance
Return
------
conf: tofu.geom.Config / dict
Depending on the value of parameter out, either:
- the tofu.geom.Config object created
- a dictionary of the polygons and their pos/extent (if any) |
def remove_arrays(code, count=1):
"""removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects"""
res = ''
last = ''
replacements = {}
for e in bracket_split(code, ['[]']):
if e[0] == '[':
if is_array(last):
name = ARRAY_LVAL % count
res += ' ' + name
replacements[name] = e
count += 1
else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array
cand, new_replacements, count = remove_arrays(e[1:-1], count)
res += '[%s]' % cand
replacements.update(new_replacements)
else:
res += e
last = e
return res, replacements, count | removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects |
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values | Actually parse the config file. |
def _format_issue(issue):
'''
Helper function to format API return information into a more manageable
and useful dictionary for issue information.
issue
The issue to format.
'''
ret = {'id': issue.get('id'),
'issue_number': issue.get('number'),
'state': issue.get('state'),
'title': issue.get('title'),
'user': issue.get('user').get('login'),
'html_url': issue.get('html_url')}
assignee = issue.get('assignee')
if assignee:
assignee = assignee.get('login')
labels = issue.get('labels')
label_names = []
for label in labels:
label_names.append(label.get('name'))
milestone = issue.get('milestone')
if milestone:
milestone = milestone.get('title')
ret['assignee'] = assignee
ret['labels'] = label_names
ret['milestone'] = milestone
return ret | Helper function to format API return information into a more manageable
and useful dictionary for issue information.
issue
The issue to format. |
def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception("Seeds must be same size as input image")
self.seeds = seeds.astype("int8")
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2] | Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training) |
def list_files(
client, fileshare, prefix, recursive, timeout=None, snapshot=None):
# type: (azure.storage.file.FileService, str, str, bool, int, str) ->
# azure.storage.file.models.File
"""List files in path
:param azure.storage.file.FileService client: file client
:param str fileshare: file share
:param str prefix: path prefix
:param bool recursive: recursive
:param int timeout: timeout
:param str snapshot: snapshot
:rtype: azure.storage.file.models.File
:return: generator of files
"""
# if single file, then yield file and return
_check = check_if_single_file(client, fileshare, prefix, timeout)
if _check[0]:
yield _check[1]
return
# get snapshot from fileshare
if snapshot is None:
fileshare, snapshot = \
blobxfer.util.parse_fileshare_or_file_snapshot_parameter(fileshare)
# get snapshot from prefix
if snapshot is None:
prefix, snapshot = \
blobxfer.util.parse_fileshare_or_file_snapshot_parameter(
prefix)
# else recursively list from prefix path
dirs = [prefix]
while len(dirs) > 0:
dir = dirs.pop()
files = client.list_directories_and_files(
share_name=fileshare,
directory_name=dir,
timeout=timeout,
snapshot=snapshot,
)
for file in files:
fspath = str(
pathlib.Path(dir if dir is not None else '') / file.name)
if type(file) == azure.storage.file.models.File:
fsprop = client.get_file_properties(
share_name=fileshare,
directory_name=None,
file_name=fspath,
timeout=timeout,
snapshot=snapshot,
)
yield fsprop
else:
if recursive:
dirs.append(fspath) | List files in path
:param azure.storage.file.FileService client: file client
:param str fileshare: file share
:param str prefix: path prefix
:param bool recursive: recursive
:param int timeout: timeout
:param str snapshot: snapshot
:rtype: azure.storage.file.models.File
:return: generator of files |
def plotplanarPotentials(Pot,*args,**kwargs):
"""
NAME:
plotplanarPotentials
PURPOSE:
plot a planar potential
INPUT:
Rrange - range (can be Quantity)
xrange, yrange - if relevant (can be Quantity)
grid, gridx, gridy - number of points to plot
savefilename - save to or restore from this savefile (pickle)
ncontours - number of contours to plot (if applicable)
+bovy_plot(*args,**kwargs) or bovy_dens2d(**kwargs)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
Pot= flatten(Pot)
Rrange= kwargs.pop('Rrange',[0.01,5.])
xrange= kwargs.pop('xrange',[-5.,5.])
yrange= kwargs.pop('yrange',[-5.,5.])
if _APY_LOADED:
if hasattr(Pot,'_ro'):
tro= Pot._ro
else:
tro= Pot[0]._ro
if isinstance(Rrange[0],units.Quantity):
Rrange[0]= Rrange[0].to(units.kpc).value/tro
if isinstance(Rrange[1],units.Quantity):
Rrange[1]= Rrange[1].to(units.kpc).value/tro
if isinstance(xrange[0],units.Quantity):
xrange[0]= xrange[0].to(units.kpc).value/tro
if isinstance(xrange[1],units.Quantity):
xrange[1]= xrange[1].to(units.kpc).value/tro
if isinstance(yrange[0],units.Quantity):
yrange[0]= yrange[0].to(units.kpc).value/tro
if isinstance(yrange[1],units.Quantity):
yrange[1]= yrange[1].to(units.kpc).value/tro
grid= kwargs.pop('grid',100)
gridx= kwargs.pop('gridx',100)
gridy= kwargs.pop('gridy',gridx)
savefilename= kwargs.pop('savefilename',None)
isList= isinstance(Pot,list)
nonAxi= ((isList and Pot[0].isNonAxi) or (not isList and Pot.isNonAxi))
if not savefilename is None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potR= pickle.load(savefile)
if nonAxi:
xs= pickle.load(savefile)
ys= pickle.load(savefile)
else:
Rs= pickle.load(savefile)
savefile.close()
else:
if nonAxi:
xs= nu.linspace(xrange[0],xrange[1],gridx)
ys= nu.linspace(yrange[0],yrange[1],gridy)
potR= nu.zeros((gridx,gridy))
for ii in range(gridx):
for jj in range(gridy):
thisR= nu.sqrt(xs[ii]**2.+ys[jj]**2.)
if xs[ii] >= 0.:
thisphi= nu.arcsin(ys[jj]/thisR)
else:
thisphi= -nu.arcsin(ys[jj]/thisR)+nu.pi
potR[ii,jj]= evaluateplanarPotentials(Pot,thisR,
phi=thisphi,
use_physical=False)
else:
Rs= nu.linspace(Rrange[0],Rrange[1],grid)
potR= nu.zeros(grid)
for ii in range(grid):
potR[ii]= evaluateplanarPotentials(Pot,Rs[ii],
use_physical=False)
if not savefilename is None:
print("Writing planar savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potR,savefile)
if nonAxi:
pickle.dump(xs,savefile)
pickle.dump(ys,savefile)
else:
pickle.dump(Rs,savefile)
savefile.close()
if nonAxi:
if not 'orogin' in kwargs:
kwargs['origin']= 'lower'
if not 'cmap' in kwargs:
kwargs['cmap']= 'gist_yarg'
if not 'contours' in kwargs:
kwargs['contours']= True
if not 'xlabel' in kwargs:
kwargs['xlabel']= r"$x / R_0$"
if not 'ylabel' in kwargs:
kwargs['ylabel']= "$y / R_0$"
if not 'aspect' in kwargs:
kwargs['aspect']= 1.
if not 'cntrls' in kwargs:
kwargs['cntrls']= '-'
ncontours= kwargs.pop('ncontours',10)
if not 'levels' in kwargs:
kwargs['levels']= nu.linspace(nu.nanmin(potR),nu.nanmax(potR),ncontours)
return plot.bovy_dens2d(potR.T,
xrange=xrange,
yrange=yrange,**kwargs)
else:
kwargs['xlabel']=r"$R/R_0$"
kwargs['ylabel']=r"$\Phi(R)$"
kwargs['xrange']=Rrange
return plot.bovy_plot(Rs,potR,*args,**kwargs) | NAME:
plotplanarPotentials
PURPOSE:
plot a planar potential
INPUT:
Rrange - range (can be Quantity)
xrange, yrange - if relevant (can be Quantity)
grid, gridx, gridy - number of points to plot
savefilename - save to or restore from this savefile (pickle)
ncontours - number of contours to plot (if applicable)
+bovy_plot(*args,**kwargs) or bovy_dens2d(**kwargs)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU) |
def sbo_case_insensitive(self):
"""Matching packages distinguish between uppercase and
lowercase for sbo repository
"""
if "--case-ins" in self.flag:
data = SBoGrep(name="").names()
data_dict = Utils().case_sensitive(data)
for key, value in data_dict.iteritems():
if key == self.name.lower():
self.name = value | Matching packages distinguish between uppercase and
lowercase for sbo repository |
def __get_pending_revisions(self):
"""
Get all the pending revisions after the current time
:return: A list of revisions
:rtype: list
"""
dttime = time.mktime(datetime.datetime.now().timetuple())
changes = yield self.revisions.find({
"toa" : {
"$lt" : dttime,
},
"processed": False,
"inProcess": None
})
if len(changes) > 0:
yield self.set_all_revisions_to_in_process([change.get("id") for change in changes])
raise Return(changes) | Get all the pending revisions after the current time
:return: A list of revisions
:rtype: list |
def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
Ranges are encoded as single integers (start << 32 | end), not as tuples.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
last_write = i
return tuple(ranges) | Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
Ranges are encoded as single integers (start << 32 | end), not as tuples. |
def remove_non_ascii(input_string):
"""Remove non-ascii characters
Source: http://stackoverflow.com/a/1342373
"""
no_ascii = "".join(i for i in input_string if ord(i) < 128)
return no_ascii | Remove non-ascii characters
Source: http://stackoverflow.com/a/1342373 |
def _generate_base_anchors(base_size, scales, ratios):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)
anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window. |
def _get_firewall_policy(kwargs):
'''
Construct FirewallPolicy and FirewallPolicy instances from passed arguments
'''
fp_name = kwargs.get('name', None)
fp_description = kwargs.get('description', None)
firewallPolicy = FirewallPolicy(
name=fp_name,
description=fp_description
)
fpr_json = kwargs.get('rules', None)
jdata = json.loads(fpr_json)
rules = []
for fwpr in jdata:
firewallPolicyRule = FirewallPolicyRule()
if 'protocol' in fwpr:
firewallPolicyRule.rule_set['protocol'] = fwpr['protocol']
if 'port_from' in fwpr:
firewallPolicyRule.rule_set['port_from'] = fwpr['port_from']
if 'port_to' in fwpr:
firewallPolicyRule.rule_set['port_to'] = fwpr['port_to']
if 'source' in fwpr:
firewallPolicyRule.rule_set['source'] = fwpr['source']
if 'action' in fwpr:
firewallPolicyRule.rule_set['action'] = fwpr['action']
if 'description' in fwpr:
firewallPolicyRule.rule_set['description'] = fwpr['description']
if 'port' in fwpr:
firewallPolicyRule.rule_set['port'] = fwpr['port']
rules.append(firewallPolicyRule)
return {'firewall_policy': firewallPolicy, 'firewall_policy_rules': rules} | Construct FirewallPolicy and FirewallPolicy instances from passed arguments |
def init_properties(self) -> 'PygalleBaseClass':
""" Initialize the Pigalle properties.
# Returns:
PygalleBaseClass: The current instance.
"""
self._pigalle = {
PygalleBaseClass.__KEYS.INTERNALS: dict(),
PygalleBaseClass.__KEYS.PUBLIC: dict()
}
return self | Initialize the Pigalle properties.
# Returns:
PygalleBaseClass: The current instance. |
def propagate_paths_and_modules(self, context, paths, modules):
"""
One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules.
"""
for path in paths:
self.propagate_to(context, mitogen.core.to_text(path))
self.router.responder.forward_modules(context, modules) | One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules. |
def dict_deep_merge(tgt, src):
"""
Utility function to merge the source dictionary `src` to the target
dictionary recursively
Note:
The type of the values in the dictionary can only be `dict` or `list`
Parameters:
tgt (dict): The target dictionary
src (dict): The source dictionary
"""
for k, v in src.items():
if k in tgt:
if isinstance(tgt[k], dict) and isinstance(v, dict):
dict_deep_merge(tgt[k], v)
else:
tgt[k].extend(deepcopy(v))
else:
tgt[k] = deepcopy(v) | Utility function to merge the source dictionary `src` to the target
dictionary recursively
Note:
The type of the values in the dictionary can only be `dict` or `list`
Parameters:
tgt (dict): The target dictionary
src (dict): The source dictionary |
def _ParseComment(self, structure):
"""Parses a comment.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file.
"""
if structure[1] == 'Date:':
self._year, self._month, self._day_of_month, _, _, _ = structure.date_time
elif structure[1] == 'Fields:':
self._ParseFieldsMetadata(structure) | Parses a comment.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file. |
def get_proxy_session(self):
"""Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client.
:return: a proxy session for this service
:rtype: ``osid.proxy.ProxySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.*
"""
if not self.supports_proxy():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
try:
session = sessions.ProxySession()
except AttributeError:
raise # OperationFailed()
return session | Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client.
:return: a proxy session for this service
:rtype: ``osid.proxy.ProxySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.* |
def element_wise(self, func, *args, **kwargs):
"""Apply a function to each matrix element and return the result in a
new operator matrix of the same shape.
Args:
func (FunctionType): A function to be applied to each element. It
must take the element as its first argument.
args: Additional positional arguments to be passed to `func`
kwargs: Additional keyword arguments to be passed to `func`
Returns:
Matrix: Matrix with results of `func`, applied element-wise.
"""
s = self.shape
emat = [func(o, *args, **kwargs) for o in self.matrix.ravel()]
return Matrix(np_array(emat).reshape(s)) | Apply a function to each matrix element and return the result in a
new operator matrix of the same shape.
Args:
func (FunctionType): A function to be applied to each element. It
must take the element as its first argument.
args: Additional positional arguments to be passed to `func`
kwargs: Additional keyword arguments to be passed to `func`
Returns:
Matrix: Matrix with results of `func`, applied element-wise. |
def parse_set(string):
"""Parse set from comma separated string."""
string = string.strip()
if string:
return set(string.split(","))
else:
return set() | Parse set from comma separated string. |
def pack_column_flat(self, value, components=None, offset=False):
"""
TODO: add documentation
"""
if components:
if isinstance(components, str):
components = [components]
elif isinstance(components, list):
components = components
else:
raise TypeError("components should be list or string, not {}".format(type(components)))
elif isinstance(value, dict):
components = value.keys()
elif isinstance(value, list):
components = self._dict.keys()
value = {c: v for c,v in zip(components, value)}
if offset:
values = []
offsetN = 0
for c in components:
values.append(value[c]+offsetN)
offsetN += len(self[c]['vertices'])
else:
values = [value[c] for c in components]
if len(value[components[0]].shape) > 1:
return np.vstack(values)
else:
return np.hstack(values) | TODO: add documentation |
def ilxSearches(self,
ilx_ids=None,
LIMIT=25,
_print=True,
crawl=False):
"""parameters( data = "list of ilx_ids" )"""
url_base = self.base_url + "/api/1/ilx/search/identifier/{identifier}?key={APIKEY}"
urls = [url_base.format(identifier=ilx_id.replace('ILX:', 'ilx_'), APIKEY=self.api_key) for ilx_id in ilx_ids]
return self.get(
urls=urls,
LIMIT=LIMIT,
action='Searching For Terms',
crawl=crawl,
_print=_print) | parameters( data = "list of ilx_ids" ) |
def show_xys(self, xs, ys)->None:
"Show the `xs` (inputs) and `ys` (targets)."
from IPython.display import display, HTML
items,names = [], xs[0].names + ['target']
for i, (x,y) in enumerate(zip(xs,ys)):
res = []
cats = x.cats if len(x.cats.size()) > 0 else []
conts = x.conts if len(x.conts.size()) > 0 else []
for c, n in zip(cats, x.names[:len(cats)]):
res.append(x.classes[n][c])
res += [f'{c:.4f}' for c in conts] + [y]
items.append(res)
items = np.array(items)
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False))) | Show the `xs` (inputs) and `ys` (targets). |
def findLowest(self, symorders):
"""Find the position of the first lowest tie in a
symorder or -1 if there are no ties"""
_range = range(len(symorders))
stableSymorders = map(None, symorders, _range)
# XXX FIX ME
# Do I need to sort?
stableSymorders.sort()
lowest = None
for index in _range:
if stableSymorders[index][0] == lowest:
return stableSymorders[index-1][1]
lowest = stableSymorders[index][0]
return -1 | Find the position of the first lowest tie in a
symorder or -1 if there are no ties |
def easeOutElastic(n, amplitude=1, period=0.3):
"""An elastic tween function that overshoots the destination and then "rubber bands" into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
if amplitude < 1:
amplitude = 1
s = period / 4
else:
s = period / (2 * math.pi) * math.asin(1 / amplitude)
return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1 | An elastic tween function that overshoots the destination and then "rubber bands" into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). |
def _remove_mapper_from_plotter(plotter, actor, reset_camera):
"""removes this actor's mapper from the given plotter's _scalar_bar_mappers"""
try:
mapper = actor.GetMapper()
except AttributeError:
return
for name in list(plotter._scalar_bar_mappers.keys()):
try:
plotter._scalar_bar_mappers[name].remove(mapper)
except ValueError:
pass
if len(plotter._scalar_bar_mappers[name]) < 1:
slot = plotter._scalar_bar_slot_lookup.pop(name)
plotter._scalar_bar_mappers.pop(name)
plotter._scalar_bar_ranges.pop(name)
plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera)
plotter._scalar_bar_slots.add(slot)
return | removes this actor's mapper from the given plotter's _scalar_bar_mappers |
def plot(self, *args, **kwargs):
"""
Plot latent space X in 1D:
See GPy.plotting.matplot_dep.variational_plots
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import variational_plots
return variational_plots.plot_SpikeSlab(self,*args, **kwargs) | Plot latent space X in 1D:
See GPy.plotting.matplot_dep.variational_plots |
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry | Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None. |
def required_max_memory(cls, id, memory):
"""
Recommend a max_memory setting for this vm given memory. If the
VM already has a nice setting, return None. The max_memory
param cannot be fixed too high, because page table allocation
would cost too much for small memory profile. Use a range as below.
"""
best = int(max(2 ** math.ceil(math.log(memory, 2)), 2048))
actual_vm = cls.info(id)
if (actual_vm['state'] == 'running'
and actual_vm['vm_max_memory'] != best):
return best | Recommend a max_memory setting for this vm given memory. If the
VM already has a nice setting, return None. The max_memory
param cannot be fixed too high, because page table allocation
would cost too much for small memory profile. Use a range as below. |
def average_sources(source_encoded: mx.sym.Symbol, source_encoded_length: mx.sym.Symbol) -> mx.nd.NDArray:
"""
Calculate the average of encoded sources taking into account their lengths.
:param source_encoded: Encoder representation for n elements. Shape: (n, source_encoded_length, hidden_size).
:param source_encoded_length: A vector of encoded sequence lengths. Shape: (n,).
:return: Average vectors. Shape(n, hidden_size).
"""
# source_masked: (n, source_encoded_length, hidden_size)
source_masked = mx.sym.SequenceMask(data=source_encoded,
axis=1,
sequence_length=source_encoded_length,
use_sequence_length=True,
value=0.)
# calculate the proper means of encoded sources
averaged = mx.sym.broadcast_div(mx.sym.sum(source_masked, axis=1, keepdims=False),
mx.sym.reshape(source_encoded_length, shape=(-1, 1)))
return averaged | Calculate the average of encoded sources taking into account their lengths.
:param source_encoded: Encoder representation for n elements. Shape: (n, source_encoded_length, hidden_size).
:param source_encoded_length: A vector of encoded sequence lengths. Shape: (n,).
:return: Average vectors. Shape(n, hidden_size). |
def get_header(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, name))
return FastlyHeader(self, content) | Retrieves a Header object by name. |
def u2i(uint32):
"""
Converts a 32 bit unsigned number to signed.
uint32:= an unsigned 32 bit number
...
print(u2i(4294967272))
-24
print(u2i(37))
37
...
"""
mask = (2 ** 32) - 1
if uint32 & (1 << 31):
v = uint32 | ~mask
else:
v = uint32 & mask
return v | Converts a 32 bit unsigned number to signed.
uint32:= an unsigned 32 bit number
...
print(u2i(4294967272))
-24
print(u2i(37))
37
... |
def value_equality(cls: type = None,
*,
unhashable: bool = False,
distinct_child_types: bool = False,
manual_cls: bool = False,
approximate: bool = False
) -> Union[Callable[[type], type], type]:
"""Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method.
_value_equality_values_ is a method that the decorated class must implement.
_value_equality_approximate_values_ is a method that the decorated class
might implement if special support for approximate equality is required.
This is only used when approximate argument is set. When approximate
argument is set and _value_equality_approximate_values_ is not defined,
_value_equality_values_ values are used for approximate equality.
For example, this can be used to compare periodic values like angles: the
angle value can be wrapped with `PeriodicValue`. When returned as part of
approximate values a special normalization will be done automatically to
guarantee correctness.
Note that the type of the decorated value is included as part of the value
equality values. This is so that completely separate classes with identical
equality values (e.g. a Point2D and a Vector2D) don't compare as equal.
Further note that this means that child types of the decorated type will be
considered equal to each other, though this behavior can be changed via
the 'distinct_child_types` argument. The type logic is implemented behind
the scenes by a `_value_equality_values_cls_` method added to the class.
Args:
cls: The type to decorate. Automatically passed in by python when using
the @cirq.value_equality decorator notation on a class.
unhashable: When set, the __hash__ method will be set to None instead of
to a hash of the equality class and equality values. Useful for
mutable types such as dictionaries.
distinct_child_types: When set, classes that inherit from the decorated
class will not be considered equal to it. Also, different child
classes will not be considered equal to each other. Useful for when
the decorated class is an abstract class or trait that is helping to
define equality for many conceptually distinct concrete classes.
manual_cls: When set, the method '_value_equality_values_cls_' must be
implemented. This allows a new class to compare as equal to another
existing class that is also using value equality, by having the new
class return the existing class' type.
Incompatible with `distinct_child_types`.
approximate: When set, the decorated class will be enhanced with
`_approx_eq_` implementation and thus start to support the
`SupportsApproximateEquality` protocol.
"""
# If keyword arguments were specified, python invokes the decorator method
# without a `cls` argument, then passes `cls` into the result.
if cls is None:
return lambda deferred_cls: value_equality(deferred_cls,
unhashable=unhashable,
manual_cls=manual_cls,
distinct_child_types=
distinct_child_types,
approximate=approximate)
if distinct_child_types and manual_cls:
raise ValueError("'distinct_child_types' is "
"incompatible with 'manual_cls")
values_getter = getattr(cls, '_value_equality_values_', None)
if values_getter is None:
raise TypeError('The @cirq.value_equality decorator requires a '
'_value_equality_values_ method to be defined.')
if distinct_child_types:
setattr(cls, '_value_equality_values_cls_', lambda self: type(self))
elif manual_cls:
cls_getter = getattr(cls, '_value_equality_values_cls_', None)
if cls_getter is None:
raise TypeError('The @cirq.value_equality decorator requires a '
'_value_equality_values_cls_ method to be defined '
'when "manual_cls" is set.')
else:
setattr(cls, '_value_equality_values_cls_', lambda self: cls)
setattr(cls, '__hash__', None if unhashable else _value_equality_hash)
setattr(cls, '__eq__', _value_equality_eq)
setattr(cls, '__ne__', _value_equality_ne)
if approximate:
if not hasattr(cls, '_value_equality_approximate_values_'):
setattr(cls, '_value_equality_approximate_values_', values_getter)
setattr(cls, '_approx_eq_', _value_equality_approx_eq)
return cls | Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method.
_value_equality_values_ is a method that the decorated class must implement.
_value_equality_approximate_values_ is a method that the decorated class
might implement if special support for approximate equality is required.
This is only used when approximate argument is set. When approximate
argument is set and _value_equality_approximate_values_ is not defined,
_value_equality_values_ values are used for approximate equality.
For example, this can be used to compare periodic values like angles: the
angle value can be wrapped with `PeriodicValue`. When returned as part of
approximate values a special normalization will be done automatically to
guarantee correctness.
Note that the type of the decorated value is included as part of the value
equality values. This is so that completely separate classes with identical
equality values (e.g. a Point2D and a Vector2D) don't compare as equal.
Further note that this means that child types of the decorated type will be
considered equal to each other, though this behavior can be changed via
the 'distinct_child_types` argument. The type logic is implemented behind
the scenes by a `_value_equality_values_cls_` method added to the class.
Args:
cls: The type to decorate. Automatically passed in by python when using
the @cirq.value_equality decorator notation on a class.
unhashable: When set, the __hash__ method will be set to None instead of
to a hash of the equality class and equality values. Useful for
mutable types such as dictionaries.
distinct_child_types: When set, classes that inherit from the decorated
class will not be considered equal to it. Also, different child
classes will not be considered equal to each other. Useful for when
the decorated class is an abstract class or trait that is helping to
define equality for many conceptually distinct concrete classes.
manual_cls: When set, the method '_value_equality_values_cls_' must be
implemented. This allows a new class to compare as equal to another
existing class that is also using value equality, by having the new
class return the existing class' type.
Incompatible with `distinct_child_types`.
approximate: When set, the decorated class will be enhanced with
`_approx_eq_` implementation and thus start to support the
`SupportsApproximateEquality` protocol. |
def _output_format(cls, func, override=None):
""" Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
"""
@wraps(func)
def _format_wrapper(self, *args, **kwargs):
call_response, data_key, meta_data_key = func(
self, *args, **kwargs)
if 'json' in self.output_format.lower() or 'pandas' \
in self.output_format.lower():
data = call_response[data_key]
if meta_data_key is not None:
meta_data = call_response[meta_data_key]
else:
meta_data = None
# Allow to override the output parameter in the call
if override is None:
output_format = self.output_format.lower()
elif 'json' or 'pandas' in override.lower():
output_format = override.lower()
# Choose output format
if output_format == 'json':
return data, meta_data
elif output_format == 'pandas':
if isinstance(data, list):
# If the call returns a list, then we will append them
# in the resulting data frame. If in the future
# alphavantage decides to do more with returning arrays
# this might become buggy. For now will do the trick.
data_array = []
for val in data:
data_array.append([v for _, v in val.items()])
data_pandas = pandas.DataFrame(data_array, columns=[
k for k, _ in data[0].items()])
else:
data_pandas = pandas.DataFrame.from_dict(data,
orient='index',
dtype=float)
data_pandas.index.name = 'date'
if 'integer' in self.indexing_type:
# Set Date as an actual column so a new numerical index
# will be created, but only when specified by the user.
data_pandas.reset_index(level=0, inplace=True)
return data_pandas, meta_data
elif 'csv' in self.output_format.lower():
return call_response, None
else:
raise ValueError('Format: {} is not supported'.format(
self.output_format))
return _format_wrapper | Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None |
def build_from_node(package, node):
"""
Compile a Quilt data package from an existing package node.
"""
team, owner, pkg, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
store = PackageStore()
pkg_root = get_or_create_package(store, team, owner, pkg, subpath)
if not subpath and not isinstance(node, nodes.GroupNode):
raise CommandException("Top-level node must be a group")
def _process_node(node, path):
if not isinstance(node._meta, dict):
raise CommandException(
"Error in %s: value must be a dictionary" % '.'.join(path + ['_meta'])
)
meta = dict(node._meta)
system_meta = meta.pop(SYSTEM_METADATA, {})
if not isinstance(system_meta, dict):
raise CommandException(
"Error in %s: %s overwritten. %s is a reserved metadata key. Try a different key." %
('.'.join(path + ['_meta']), SYSTEM_METADATA, SYSTEM_METADATA)
)
if isinstance(node, nodes.GroupNode):
store.add_to_package_group(pkg_root, path, meta)
for key, child in node._items():
_process_node(child, path + [key])
elif isinstance(node, nodes.DataNode):
# TODO: Reuse existing fragments if we have them.
data = node._data()
filepath = system_meta.get('filepath')
transform = system_meta.get('transform')
if isinstance(data, pd.DataFrame):
store.add_to_package_df(pkg_root, data, path, TargetType.PANDAS, filepath, transform, meta)
elif isinstance(data, np.ndarray):
store.add_to_package_numpy(pkg_root, data, path, TargetType.NUMPY, filepath, transform, meta)
elif isinstance(data, string_types):
store.add_to_package_file(pkg_root, data, path, TargetType.FILE, filepath, transform, meta)
else:
assert False, "Unexpected data type: %r" % data
else:
assert False, "Unexpected node type: %r" % node
try:
_process_node(node, subpath)
except StoreException as ex:
raise CommandException("Failed to build the package: %s" % ex)
store.save_package_contents(pkg_root, team, owner, pkg) | Compile a Quilt data package from an existing package node. |
def _start_server(self, *args):
"""Run the node local server"""
self.log("Starting server", args)
secure = self.certificate is not None
if secure:
self.log("Running SSL server with cert:", self.certificate)
else:
self.log("Running insecure server without SSL. Do not use without SSL proxy in production!", lvl=warn)
try:
self.server = Server(
(self.host, self.port),
secure=secure,
certfile=self.certificate # ,
# inherit=True
).register(self)
except PermissionError:
self.log('Could not open (privileged?) port, check '
'permissions!', lvl=critical) | Run the node local server |
def add_albumart(albumart, song_title):
'''
Adds the album art to the song
'''
try:
img = urlopen(albumart) # Gets album art from url
except Exception:
log.log_error("* Could not add album art", indented=True)
return None
audio = EasyMP3(song_title, ID3=ID3)
try:
audio.add_tags()
except _util.error:
pass
audio.tags.add(
APIC(
encoding=3, # UTF-8
mime='image/png',
type=3, # 3 is for album art
desc='Cover',
data=img.read() # Reads and adds album art
)
)
audio.save()
log.log("> Added album art") | Adds the album art to the song |
def fill_luis_event_properties(
self,
recognizer_result: RecognizerResult,
turn_context: TurnContext,
telemetry_properties: Dict[str, str] = None,
) -> Dict[str, str]:
"""Fills the event properties for LuisResult event for telemetry.
These properties are logged when the recognizer is called.
:param recognizer_result: Last activity sent from user.
:type recognizer_result: RecognizerResult
:param turn_context: Context object containing information for a single turn of conversation with a user.
:type turn_context: TurnContext
:param telemetry_properties: Additional properties to be logged to telemetry with the LuisResult event, defaults to None
:param telemetry_properties: Dict[str, str], optional
:return: A dictionary that is sent as "Properties" to IBotTelemetryClient.TrackEvent method for the BotMessageSend event.
:rtype: Dict[str, str]
"""
intents = recognizer_result.intents
top_two_intents = (
sorted(intents.keys(), key=lambda k: intents[k].score, reverse=True)[:2]
if intents
else []
)
intent_name, intent_score = LuisRecognizer._get_top_k_intent_score(
top_two_intents, intents, index=0
)
intent2_name, intent2_score = LuisRecognizer._get_top_k_intent_score(
top_two_intents, intents, index=1
)
# Add the intent score and conversation id properties
properties: Dict[str, str] = {
LuisTelemetryConstants.application_id_property: self._application.application_id,
LuisTelemetryConstants.intent_property: intent_name,
LuisTelemetryConstants.intent_score_property: intent_score,
LuisTelemetryConstants.intent2_property: intent2_name,
LuisTelemetryConstants.intent_score2_property: intent2_score,
LuisTelemetryConstants.from_id_property: turn_context.activity.from_property.id,
}
sentiment = recognizer_result.properties.get("sentiment")
if sentiment is not None and isinstance(sentiment, Dict):
label = sentiment.get("label")
if label is not None:
properties[LuisTelemetryConstants.sentiment_label_property] = str(label)
score = sentiment.get("score")
if score is not None:
properties[LuisTelemetryConstants.sentiment_score_property] = str(score)
entities = None
if recognizer_result.entities is not None:
entities = json.dumps(recognizer_result.entities)
properties[LuisTelemetryConstants.entities_property] = entities
# Use the LogPersonalInformation flag to toggle logging PII data, text is a common example
if self.log_personal_information and turn_context.activity.text:
properties[
LuisTelemetryConstants.question_property
] = turn_context.activity.text
# Additional Properties can override "stock" properties.
if telemetry_properties is not None:
for key in telemetry_properties:
properties[key] = telemetry_properties[key]
return properties | Fills the event properties for LuisResult event for telemetry.
These properties are logged when the recognizer is called.
:param recognizer_result: Last activity sent from user.
:type recognizer_result: RecognizerResult
:param turn_context: Context object containing information for a single turn of conversation with a user.
:type turn_context: TurnContext
:param telemetry_properties: Additional properties to be logged to telemetry with the LuisResult event, defaults to None
:param telemetry_properties: Dict[str, str], optional
:return: A dictionary that is sent as "Properties" to IBotTelemetryClient.TrackEvent method for the BotMessageSend event.
:rtype: Dict[str, str] |
def default_metadata_db_path():
"""Helper to get the default path for the metadata file.
:returns: The path to where the default location of the metadata
database is. Maps to which is ~/.inasafe.metadata35.db
:rtype: str
"""
home = expanduser("~")
home = os.path.abspath(os.path.join(home, '.inasafe', 'metadata.db'))
return home | Helper to get the default path for the metadata file.
:returns: The path to where the default location of the metadata
database is. Maps to which is ~/.inasafe.metadata35.db
:rtype: str |
def human_filesize(i):
"""
'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc).
"""
bytes = float(i)
if bytes < 1024:
return u"%d Byte%s" % (bytes, bytes != 1 and u"s" or u"")
if bytes < 1024 * 1024:
return u"%.1f KB" % (bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return u"%.1f MB" % (bytes / (1024 * 1024))
return u"%.1f GB" % (bytes / (1024 * 1024 * 1024)) | 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc). |
def getParameterByName(self, name):
"""Searchs a parameter by name and returns it."""
result = None
for parameter in self.getParameters():
nameParam = parameter.getName()
if nameParam == name:
result = parameter
break
return result | Searchs a parameter by name and returns it. |
def meff_lh_110(self, **kwargs):
'''
Returns the light-hole band effective mass in the [110] direction,
meff_lh_110, in units of electron mass.
'''
return 2. / (2 * self.luttinger1(**kwargs) + self.luttinger2(**kwargs)
+ 3 * self.luttinger3(**kwargs)) | Returns the light-hole band effective mass in the [110] direction,
meff_lh_110, in units of electron mass. |
def _load_relation(self, models, name, constraints):
"""
Eagerly load the relationship on a set of models.
:rtype: list
"""
relation = self.get_relation(name)
relation.add_eager_constraints(models)
if callable(constraints):
constraints(relation)
else:
relation.merge_query(constraints)
models = relation.init_relation(models, name)
results = relation.get_eager()
return relation.match(models, results, name) | Eagerly load the relationship on a set of models.
:rtype: list |
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
This will return a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = "Fn::"
if tag in ["Ref", "Condition"]:
prefix = ""
cfntag = prefix + tag
if tag == "GetAtt" and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split(".", 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value} | YAML constructor to parse CloudFormation intrinsics.
This will return a dictionary with key being the instrinsic name |
def unArrayify(self, gene):
"""
Copies gene bias values and weights to network bias values and
weights.
"""
g = 0
# if gene is too small an IndexError will be thrown
for layer in self.layers:
if layer.type != 'Input':
for i in range(layer.size):
layer.weight[i] = float( gene[g])
g += 1
for connection in self.connections:
for i in range(connection.fromLayer.size):
for j in range(connection.toLayer.size):
connection.weight[i][j] = gene[g]
g += 1
# if gene is too long we may have a problem
if len(gene) > g:
raise IndexError('Argument to unArrayify is too long.', len(gene)) | Copies gene bias values and weights to network bias values and
weights. |
def age_simulants(self, event: Event):
"""Updates simulant age on every time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information.
"""
population = self.population_view.get(event.index, query="alive == 'alive'")
population['age'] += event.step_size / pd.Timedelta(days=365)
self.population_view.update(population) | Updates simulant age on every time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information. |
def multiple_outputs_from_file(cls, filename, keep_sub_files=True):
"""
Parses a QChem output file with multiple calculations
1.) Seperates the output into sub-files
e.g. qcout -> qcout.0, qcout.1, qcout.2 ... qcout.N
a.) Find delimeter for multiple calcualtions
b.) Make seperate output sub-files
2.) Creates seperate QCCalcs for each one from the sub-files
"""
to_return = []
with zopen(filename, 'rt') as f:
text = re.split(r'\s*(?:Running\s+)*Job\s+\d+\s+of\s+\d+\s+',
f.read())
if text[0] == '':
text = text[1:]
for i, sub_text in enumerate(text):
temp = open(filename + '.' + str(i), 'w')
temp.write(sub_text)
temp.close()
tempOutput = cls(filename + '.' + str(i))
to_return.append(tempOutput)
if not keep_sub_files:
os.remove(filename + '.' + str(i))
return to_return | Parses a QChem output file with multiple calculations
1.) Seperates the output into sub-files
e.g. qcout -> qcout.0, qcout.1, qcout.2 ... qcout.N
a.) Find delimeter for multiple calcualtions
b.) Make seperate output sub-files
2.) Creates seperate QCCalcs for each one from the sub-files |
def p_x_commalist(self,t):
"""commalist : commalist ',' expression
| expression
"""
if len(t) == 2: t[0] = CommaX([t[1]])
elif len(t) == 4: t[0] = CommaX(t[1].children+[t[3]])
else: raise NotImplementedError('unk_len',len(t)) # pragma: no cover | commalist : commalist ',' expression
| expression |
def _assert_refspec(self):
"""Turns out we can't deal with remotes if the refspec is missing"""
config = self.config_reader
unset = 'placeholder'
try:
if config.get_value('fetch', default=unset) is unset:
msg = "Remote '%s' has no refspec set.\n"
msg += "You can set it as follows:"
msg += " 'git config --add \"remote.%s.fetch +refs/heads/*:refs/heads/*\"'."
raise AssertionError(msg % (self.name, self.name))
finally:
config.release() | Turns out we can't deal with remotes if the refspec is missing |
def get_overlapping_ranges(self, collection_link, sorted_ranges):
'''
Given the sorted ranges and a collection,
Returns the list of overlapping partition key ranges
:param str collection_link:
The collection link.
:param (list of routing_range._Range) sorted_ranges: The sorted list of non-overlapping ranges.
:return:
List of partition key ranges.
:rtype: list of dict
:raises ValueError: If two ranges in sorted_ranges overlap or if the list is not sorted
'''
# validate if the list is non-overlapping and sorted
if not self._is_sorted_and_non_overlapping(sorted_ranges):
raise ValueError("the list of ranges is not a non-overlapping sorted ranges")
target_partition_key_ranges = []
it = iter(sorted_ranges)
try:
currentProvidedRange = next(it)
while True:
if (currentProvidedRange.isEmpty()):
# skip and go to the next item\
currentProvidedRange = next(it)
continue
if len(target_partition_key_ranges):
queryRange = self._subtract_range(currentProvidedRange, target_partition_key_ranges[-1])
else:
queryRange = currentProvidedRange
overlappingRanges = _PartitionKeyRangeCache.get_overlapping_ranges(self, collection_link, queryRange)
assert len(overlappingRanges), ("code bug: returned overlapping ranges for queryRange {} is empty".format(queryRange))
target_partition_key_ranges.extend(overlappingRanges)
lastKnownTargetRange = routing_range._Range.PartitionKeyRangeToRange(target_partition_key_ranges[-1])
# the overlapping ranges must contain the requested range
assert currentProvidedRange.max <= lastKnownTargetRange.max, "code bug: returned overlapping ranges {} does not contain the requested range {}".format(overlappingRanges, queryRange)
# the current range is contained in target_partition_key_ranges just move forward
currentProvidedRange = next(it)
while currentProvidedRange.max <= lastKnownTargetRange.max:
# the current range is covered too. just move forward
currentProvidedRange = next(it)
except StopIteration:
# when the iteration is exhausted we get here. There is nothing else to be done
pass
return target_partition_key_ranges | Given the sorted ranges and a collection,
Returns the list of overlapping partition key ranges
:param str collection_link:
The collection link.
:param (list of routing_range._Range) sorted_ranges: The sorted list of non-overlapping ranges.
:return:
List of partition key ranges.
:rtype: list of dict
:raises ValueError: If two ranges in sorted_ranges overlap or if the list is not sorted |
def fail(self, err='MockupDB query failure', *args, **kwargs):
"""Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('flags', 0)
kwargs['flags'] |= REPLY_FLAGS['QueryFailure']
kwargs['$err'] = err
self.replies(*args, **kwargs)
return True | Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler. |
def save_current_nb_as_html(info=False):
"""
Save the current notebook as html file in the same directory
"""
assert in_ipynb()
full_path = get_notebook_name()
path, filename = os.path.split(full_path)
wd_save = os.getcwd()
os.chdir(path)
cmd = 'jupyter nbconvert --to html "{}"'.format(filename)
os.system(cmd)
os.chdir(wd_save)
if info:
print("target dir: ", path)
print("cmd: ", cmd)
print("working dir: ", wd_save) | Save the current notebook as html file in the same directory |
def convert_command_output(*command):
"""
Command line interface for ``coloredlogs --to-html``.
Takes a command (and its arguments) and runs the program under ``script``
(emulating an interactive terminal), intercepts the output of the command
and converts ANSI escape sequences in the output to HTML.
"""
captured_output = capture(command)
converted_output = convert(captured_output)
if connected_to_terminal():
fd, temporary_file = tempfile.mkstemp(suffix='.html')
with open(temporary_file, 'w') as handle:
handle.write(converted_output)
webbrowser.open(temporary_file)
elif captured_output and not captured_output.isspace():
output(converted_output) | Command line interface for ``coloredlogs --to-html``.
Takes a command (and its arguments) and runs the program under ``script``
(emulating an interactive terminal), intercepts the output of the command
and converts ANSI escape sequences in the output to HTML. |
def __send_static_file(self, path=None):
"""
Send apidoc files from the apidoc folder to the browser.
:param path: the apidoc file.
"""
if not path:
path = 'index.html'
file_name = join(self.folder_path, path)
# the api_project.js has the absolute url
# hard coded so we replace them by the current url.
if self.dynamic_url and path == 'api_project.js':
return self.__send_api_file(file_name)
if self.allow_absolute_url and path == 'main.js':
return self.__send_main_file(file_name)
# Any other apidoc file is treated as a normal static file
return self.app.send_static_file(file_name) | Send apidoc files from the apidoc folder to the browser.
:param path: the apidoc file. |
def parse_declaration_expressn_fncall_SUBparams(self, params):
"""
Needs rearrangement:
0 1 2
WDL native params: sub(input, pattern, replace)
1 2 0
Python's re.sub() params: sub(pattern, replace, input)
:param params:
:param es:
:return:
"""
# arguments passed to the function
if isinstance(params, wdl_parser.Terminal):
raise NotImplementedError
elif isinstance(params, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(params, wdl_parser.AstList):
assert len(params) == 3, ('sub() function requires exactly 3 arguments.')
es_params0 = self.parse_declaration_expressn(params[0], es='')
es_params1 = self.parse_declaration_expressn(params[1], es='')
es_params2 = self.parse_declaration_expressn(params[2], es='')
return es_params1 + ', ' + es_params2 + ', ' + es_params0 | Needs rearrangement:
0 1 2
WDL native params: sub(input, pattern, replace)
1 2 0
Python's re.sub() params: sub(pattern, replace, input)
:param params:
:param es:
:return: |
def auth_stage2(self,stanza):
"""Handle the first stage authentication response (result of the <iq
type="get"/>).
[client only]"""
self.lock.acquire()
try:
self.__logger.debug("Procesing auth response...")
self.available_auth_methods=[]
if (stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"}) and self.stream_id):
self.available_auth_methods.append("digest")
if (stanza.xpath_eval("a:query/a:password",{"a":"jabber:iq:auth"})):
self.available_auth_methods.append("plain")
self.auth_stanza=stanza.copy()
self._try_auth()
finally:
self.lock.release() | Handle the first stage authentication response (result of the <iq
type="get"/>).
[client only] |
def get_field(hologram, sideband=+1, filter_name="disk", filter_size=1 / 3,
subtract_mean=True, zero_pad=True, copy=True):
"""Compute the complex field from a hologram using Fourier analysis
Parameters
----------
hologram: real-valued 2d ndarray
hologram data
sideband: +1, -1, or tuple of (float, float)
specifies the location of the sideband:
- +1: sideband in the upper half in Fourier space,
exact location is found automatically
- -1: sideband in the lower half in Fourier space,
exact location is found automatically
- (float, float): sideband coordinates in
frequencies in interval [1/"axes size", .5]
filter_name: str
specifies the filter to use, one of
- "disk": binary disk with radius `filter_size`
- "smooth disk": disk with radius `filter_size` convolved
with a radial gaussian (`sigma=filter_size/5`)
- "gauss": radial gaussian (`sigma=0.6*filter_size`)
- "square": binary square with side length `filter_size`
- "smooth square": square with side length `filter_size`
convolved with square gaussian (`sigma=filter_size/5`)
- "tukey": a square tukey window of width `2*filter_size` and
`alpha=0.1`
filter_size: float
Size of the filter in Fourier space in fractions of the
distance between central band and sideband.
See `filter_shape` for interpretation of `filter_size`.
subtract_mean: bool
If True, remove the mean of the hologram before performing
the Fourier transform. This setting is recommended as it
can reduce artifacts from frequencies around the central
band.
zero_pad: bool
Perform zero-padding before applying the FFT. Setting
`zero_pad` to `False` increases speed but might
introduce image distortions such as tilts in the phase
and amplitude data or dark borders in the amplitude data.
copy: bool
If set to True, input `data` is not edited.
x0 and y0 are center of the filter
R is factor for "radius" of filter (sqrt(x0² + y0²)/np.pi)
filter_type can be "disk" or "gauss"
Notes
-----
Even though the size of the "gauss" filter approximately matches
the frequencies of the "disk" filter, it takes into account
higher frequencies as well and thus suppresses ringing artifacts
for data that contain jumps in the phase image.
"""
if copy:
hologram = hologram.astype(dtype=np.float, copy=True)
if subtract_mean:
# remove contributions of the central band
# (this affects more than one pixel in the FFT
# because of zero-padding)
if issubclass(hologram.dtype.type, np.integer):
hologram = hologram.astype(np.float)
hologram -= hologram.mean()
# Fourier transform
fft = fourier2dpad(hologram, zero_pad=zero_pad)
if sideband in [+1, -1]:
fsx, fsy = find_sideband(fft, which=sideband)
else:
fsx, fsy = sideband
# shift fft to sideband location
shifted = np.roll(np.roll(fft, -int(fsx * fft.shape[0]), axis=0),
-int(fsy * fft.shape[1]), axis=1)
# coordinates in Fourier space
fx = np.fft.fftshift(np.fft.fftfreq(fft.shape[0])).reshape(-1, 1)
fy = np.fft.fftshift(np.fft.fftfreq(fft.shape[1])).reshape(1, -1)
# filter size based on central band - sideband - distance
if filter_size >= 1:
raise ValueError("`filter_size` must be < 1!")
fsize = np.sqrt(fsx**2 + fsy**2) * filter_size
if filter_name == "disk":
afilter = (fx**2 + fy**2) < fsize**2
elif filter_name == "smooth disk":
sigma = fsize / 5
tau = 2 * sigma**2
radsq = fx**2 + fy**2
disk = radsq < fsize**2
gauss = np.exp(-radsq / tau)
afilter = signal.convolve(gauss, disk, mode="same")
afilter /= afilter.max()
elif filter_name == "gauss":
sigma = fsize * .6
tau = 2 * sigma**2
afilter = np.exp(-(fx**2 + fy**2) / tau)
afilter /= afilter.max()
elif filter_name == "square":
afilter = (np.abs(fx) < fsize) * (np.abs(fy) < fsize)
elif filter_name == "smooth square":
blur = fsize / 5
tau = 2 * blur**2
square = (np.abs(fx) < fsize) * (np.abs(fy) < fsize)
gauss = np.exp(-(fy**2) / tau) * np.exp(-(fy**2) / tau)
afilter = signal.convolve(square, gauss, mode="same")
afilter /= afilter.max()
elif filter_name == "tukey":
alpha = 0.1
rsize = np.int(min(fx.size, fy.size)*fsize) * 2
tukey_window_x = signal.tukey(rsize, alpha=alpha).reshape(-1, 1)
tukey_window_y = signal.tukey(rsize, alpha=alpha).reshape(1, -1)
tukey = tukey_window_x * tukey_window_y
afilter = np.zeros(shifted.shape)
s1 = (np.array(shifted.shape) - rsize)//2
s2 = (np.array(shifted.shape) + rsize)//2
afilter[s1[0]:s2[0], s1[1]:s2[1]] = tukey
else:
raise ValueError("Unknown filter: {}".format(filter_name))
# apply filter
fft_filt = afilter * shifted
# inverse Fourier transform
field = np.fft.ifft2(np.fft.ifftshift(fft_filt))
return field[:hologram.shape[0], :hologram.shape[1]] | Compute the complex field from a hologram using Fourier analysis
Parameters
----------
hologram: real-valued 2d ndarray
hologram data
sideband: +1, -1, or tuple of (float, float)
specifies the location of the sideband:
- +1: sideband in the upper half in Fourier space,
exact location is found automatically
- -1: sideband in the lower half in Fourier space,
exact location is found automatically
- (float, float): sideband coordinates in
frequencies in interval [1/"axes size", .5]
filter_name: str
specifies the filter to use, one of
- "disk": binary disk with radius `filter_size`
- "smooth disk": disk with radius `filter_size` convolved
with a radial gaussian (`sigma=filter_size/5`)
- "gauss": radial gaussian (`sigma=0.6*filter_size`)
- "square": binary square with side length `filter_size`
- "smooth square": square with side length `filter_size`
convolved with square gaussian (`sigma=filter_size/5`)
- "tukey": a square tukey window of width `2*filter_size` and
`alpha=0.1`
filter_size: float
Size of the filter in Fourier space in fractions of the
distance between central band and sideband.
See `filter_shape` for interpretation of `filter_size`.
subtract_mean: bool
If True, remove the mean of the hologram before performing
the Fourier transform. This setting is recommended as it
can reduce artifacts from frequencies around the central
band.
zero_pad: bool
Perform zero-padding before applying the FFT. Setting
`zero_pad` to `False` increases speed but might
introduce image distortions such as tilts in the phase
and amplitude data or dark borders in the amplitude data.
copy: bool
If set to True, input `data` is not edited.
x0 and y0 are center of the filter
R is factor for "radius" of filter (sqrt(x0² + y0²)/np.pi)
filter_type can be "disk" or "gauss"
Notes
-----
Even though the size of the "gauss" filter approximately matches
the frequencies of the "disk" filter, it takes into account
higher frequencies as well and thus suppresses ringing artifacts
for data that contain jumps in the phase image. |
def get_as_string(self, s3_path, encoding='utf-8'):
"""
Get the contents of an object stored in S3 as string.
:param s3_path: URL for target S3 location
:param encoding: Encoding to decode bytes to string
:return: File contents as a string
"""
content = self.get_as_bytes(s3_path)
return content.decode(encoding) | Get the contents of an object stored in S3 as string.
:param s3_path: URL for target S3 location
:param encoding: Encoding to decode bytes to string
:return: File contents as a string |
def query_struct(self, name):
"""Query struct."""
sql = 'select id, file_id, name from code_items '\
'where name = ?'
self.cursor.execute(sql, (name,))
for i in self.cursor.fetchall():
sql = 'select id, type, name from code_items ' \
'where parent_id = ?'
self.cursor.execute(sql, (i[0],))
members = self.cursor.fetchall()
if members:
print(self.file_id_to_name(i[1]), i[2])
print(members) | Query struct. |
def _check_pending(self, tag, match_func=None):
"""Check the pending_events list for events that match the tag
:param tag: The tag to search for
:type tag: str
:param tags_regex: List of re expressions to search for also
:type tags_regex: list[re.compile()]
:return:
"""
if match_func is None:
match_func = self._get_match_func()
old_events = self.pending_events
self.pending_events = []
ret = None
for evt in old_events:
if match_func(evt['tag'], tag):
if ret is None:
ret = evt
log.trace('get_event() returning cached event = %s', ret)
else:
self.pending_events.append(evt)
elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
self.pending_events.append(evt)
else:
log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt)
return ret | Check the pending_events list for events that match the tag
:param tag: The tag to search for
:type tag: str
:param tags_regex: List of re expressions to search for also
:type tags_regex: list[re.compile()]
:return: |
def _obj_index(self, uri, base_path, marked_path, headers, spr=False):
"""Return an index of objects from within the container.
:param uri:
:param base_path:
:param marked_path:
:param headers:
:param spr: "single page return" Limit the returned data to one page
:type spr: ``bol``
:return:
"""
object_list = list()
l_obj = None
container_uri = uri.geturl()
while True:
marked_uri = urlparse.urljoin(container_uri, marked_path)
resp = self.http.get(url=marked_uri, headers=headers)
self._resp_exception(resp=resp)
return_list = resp.json()
if spr:
return return_list
time_offset = self.job_args.get('time_offset')
for obj in return_list:
if time_offset:
# Get the last_modified data from the Object.
time_delta = cloud_utils.TimeDelta(
job_args=self.job_args,
last_modified=time_offset
)
if time_delta:
object_list.append(obj)
else:
object_list.append(obj)
if object_list:
last_obj_in_list = object_list[-1].get('name')
else:
last_obj_in_list = None
if l_obj == last_obj_in_list:
return object_list
else:
l_obj = last_obj_in_list
marked_path = self._last_marker(
base_path=base_path,
last_object=l_obj
) | Return an index of objects from within the container.
:param uri:
:param base_path:
:param marked_path:
:param headers:
:param spr: "single page return" Limit the returned data to one page
:type spr: ``bol``
:return: |
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
# Right now, if the actor handle has been pickled, we create a
# temporary actor handle id for invocations.
# TODO(pcm): This still leads to a lot of actor handles being
# created, there should be a better way to handle pickled
# actor handles.
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# TODO(swang): Unpickling the same actor handle twice in the same
# task will break the application, and unpickling it twice in the
# same actor is likely a performance bug. We should consider
# logging a warning in these cases.
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
state["actor_driver_id"],
actor_handle_id=actor_handle_id) | This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling. |
def main(pub_port=None, sub_port=None):
'''main of forwarder
:param sub_port: port for subscribers
:param pub_port: port for publishers
'''
try:
if sub_port is None:
sub_port = get_sub_port()
if pub_port is None:
pub_port = get_pub_port()
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
backend = context.socket(zmq.PUB)
frontend.bind('tcp://*:{pub_port}'.format(pub_port=pub_port))
frontend.setsockopt(zmq.SUBSCRIBE, b'')
backend.bind('tcp://*:{sub_port}'.format(sub_port=sub_port))
zmq.device(zmq.FORWARDER, frontend, backend)
except KeyboardInterrupt:
pass
finally:
frontend.close()
backend.close()
context.term() | main of forwarder
:param sub_port: port for subscribers
:param pub_port: port for publishers |
def build_sector_fundamentals(sector):
'''
In this method, for the given sector, we'll get the data we need for each stock
in the sector from IEX. Once we have the data, we'll check that the earnings
reports meet our criteria with `eps_good()`. We'll put stocks that meet those
requirements into a dataframe along with all the data about them we'll need.
'''
stocks = get_sector(sector)
if len(stocks) == 0:
raise ValueError("Invalid sector name: {}".format(sector))
# If we can't see its PE here, we're probably not interested in a stock.
# Omit it from batch queries.
stocks = [s for s in stocks if s['peRatio'] is not None]
# IEX doesn't like batch queries for more than 100 symbols at a time.
# We need to build our fundamentals info iteratively.
batch_idx = 0
batch_size = 99
fundamentals_dict = {}
while batch_idx < len(stocks):
symbol_batch = [s['symbol']
for s in stocks[batch_idx:batch_idx + batch_size]]
stock_batch = Stock(symbol_batch)
# Pull all the data we'll need from IEX.
financials_json = stock_batch.get_financials()
quote_json = stock_batch.get_quote()
stats_json = stock_batch.get_key_stats()
earnings_json = stock_batch.get_earnings()
for symbol in symbol_batch:
# We'll filter based on earnings first to keep our fundamentals
# info a bit cleaner.
if not eps_good(earnings_json[symbol]):
continue
# Make sure we have all the data we'll need for our filters for
# this stock.
if not data_quality_good(
symbol,
financials_json,
quote_json,
stats_json):
continue
fundamentals_dict[symbol] = get_fundamental_data_for_symbol(
symbol,
financials_json,
quote_json,
stats_json
)
batch_idx += batch_size
# Transform all our data into a more filterable form - a dataframe - with
# a bit of pandas magic.
return pd.DataFrame.from_dict(fundamentals_dict).T | In this method, for the given sector, we'll get the data we need for each stock
in the sector from IEX. Once we have the data, we'll check that the earnings
reports meet our criteria with `eps_good()`. We'll put stocks that meet those
requirements into a dataframe along with all the data about them we'll need. |
def tags(self):
"""The tags property.
Returns:
(hash). the property value. (defaults to: {})
"""
if 'tags' in self._values:
return self._values['tags']
self._values['tags'] = copy.deepcopy(self._defaults['tags'])
return self._values['tags'] | The tags property.
Returns:
(hash). the property value. (defaults to: {}) |
def submit(self, command='sleep 1', blocksize=1, job_name="parsl.auto"):
"""Submit command to an Azure instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
job_name : str
Prefix for job name.
Returns
-------
None or str
If at capacity (no more can be provisioned), None is returned. Otherwise,
an identifier for the job is returned.
"""
job_name = "parsl.auto.{0}".format(time.time())
[instance, *rest] = self.deployer.deploy(command=command, job_name=job_name, blocksize=1)
if not instance:
logger.error("Failed to submit request to Azure")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], "PENDING")
self.resources[instance.instance_id] = {"job_id": instance.instance_id, "instance": instance, "status": state}
return instance.instance_id | Submit command to an Azure instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
job_name : str
Prefix for job name.
Returns
-------
None or str
If at capacity (no more can be provisioned), None is returned. Otherwise,
an identifier for the job is returned. |
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
for codeobj in self.else_body._children():
yield codeobj | Yield all direct children of this object. |
def is_false(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument
"""
Should return True if e can be easily found to be False.
:param e: The AST
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
#if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
if not isinstance(e, Base):
return self._is_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
try:
return self._false_cache[e.cache_key]
except KeyError:
f = self._is_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
self._false_cache[e.cache_key] = f
if f is True:
self._true_cache[e.cache_key] = False
return f | Should return True if e can be easily found to be False.
:param e: The AST
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean. |
def setup_simulation(components: List, input_config: Mapping=None,
plugin_config: Mapping=None) -> InteractiveContext:
"""Construct a simulation from a list of components and call its setup
method.
Parameters
----------
components
A list of initialized simulation components. Corresponds to the
components block of a model specification.
input_config
A nested dictionary with any additional simulation configuration
information needed. Corresponds to the configuration block of a model
specification.
plugin_config
A dictionary containing a description of any simulation plugins to
include in the simulation. If you're using this argument, you're either
deep in the process of simulation development or the maintainers have
done something wrong. Corresponds to the plugins block of a model
specification.
Returns
-------
A simulation context that is setup and ready to run.
"""
simulation = initialize_simulation(components, input_config, plugin_config)
simulation.setup()
return simulation | Construct a simulation from a list of components and call its setup
method.
Parameters
----------
components
A list of initialized simulation components. Corresponds to the
components block of a model specification.
input_config
A nested dictionary with any additional simulation configuration
information needed. Corresponds to the configuration block of a model
specification.
plugin_config
A dictionary containing a description of any simulation plugins to
include in the simulation. If you're using this argument, you're either
deep in the process of simulation development or the maintainers have
done something wrong. Corresponds to the plugins block of a model
specification.
Returns
-------
A simulation context that is setup and ready to run. |
def stdout():
"""
Returns the stdout as a byte stream in a Py2/PY3 compatible manner
Returns
-------
io.BytesIO
Byte stream of Stdout
"""
# We write all of the data to stdout with bytes, typically io.BytesIO. stdout in Python2
# accepts bytes but Python3 does not. This is due to a type change on the attribute. To keep
# this consistent, we leave Python2 the same and get the .buffer attribute on stdout in Python3
byte_stdout = sys.stdout
if sys.version_info.major > 2:
byte_stdout = sys.stdout.buffer # pylint: disable=no-member
return byte_stdout | Returns the stdout as a byte stream in a Py2/PY3 compatible manner
Returns
-------
io.BytesIO
Byte stream of Stdout |
def get_unique_named_object(root, name):
"""
retrieves a unique named object (no fully qualified name)
Args:
root: start of search
name: name of object
Returns:
the object (if not unique, raises an error)
"""
a = get_children(lambda x: hasattr(x, 'name') and x.name == name, root)
assert len(a) == 1
return a[0] | retrieves a unique named object (no fully qualified name)
Args:
root: start of search
name: name of object
Returns:
the object (if not unique, raises an error) |
def to_browser_mode(self):
""" Write all the messages to files and open them in the browser """
for message_no in range(len(self.messages)):
self.__to_browser(message_no) | Write all the messages to files and open them in the browser |
def fetch_chain(self, certr, max_length=10):
"""
Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last.
"""
action = LOG_ACME_FETCH_CHAIN()
with action.context():
if certr.cert_chain_uri is None:
return succeed([])
elif max_length < 1:
raise errors.ClientError('chain too long')
return (
DeferredContext(
self._client.get(
certr.cert_chain_uri,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._parse_certificate)
.addCallback(
lambda issuer:
self.fetch_chain(issuer, max_length=max_length - 1)
.addCallback(lambda chain: [issuer] + chain))
.addActionFinish()) | Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last. |
def Nu_Xu(Re, Pr, rho_w=None, rho_b=None, mu_w=None, mu_b=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_.
.. math::
Nu_b = 0.02269 Re_b^{0.8079} \bar{Pr}_b^{0.9213}
\left(\frac{\rho_w}{\rho_b}\right)^{0.6638}
\left(\frac{\mu_w}{\mu_b}\right)^{0.8687}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
mu_w : float, optional
Viscosity at the wall temperature, [Pa*s]
mu_b : float, optional
Viscosity at the bulk temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P varied from 23 to 30 MPa,
and D was 12 mm. G varied from 600-1200 kg/m^2/s and q varied from 100 to
600 kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the third most accurate
correlation in [2]_ with a MAD of 20.5%.
If the extra density and viscosity information is not provided, it will
not be used.
Examples
--------
>>> Nu_Xu(1E5, 1.2, 330, 290., 8e-4, 9e-4)
289.133054256742
References
----------
.. [1] Xu, F., Guo, L.J., Mao, Y.F., Jiang, X.E., 2005. "Experimental
investigation to the heat transfer characteristics of water in vertical
pipes under supercritical pressure". J. Xi'an Jiaotong University 39,
468-471.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
'''
Nu = 0.02269*Re**0.8079*Pr**0.9213
if rho_w and rho_b:
Nu *= (rho_w/rho_b)**0.6638
if mu_w and mu_b:
Nu *= (mu_w/mu_b)**0.8687
return Nu | r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_.
.. math::
Nu_b = 0.02269 Re_b^{0.8079} \bar{Pr}_b^{0.9213}
\left(\frac{\rho_w}{\rho_b}\right)^{0.6638}
\left(\frac{\mu_w}{\mu_b}\right)^{0.8687}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
mu_w : float, optional
Viscosity at the wall temperature, [Pa*s]
mu_b : float, optional
Viscosity at the bulk temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P varied from 23 to 30 MPa,
and D was 12 mm. G varied from 600-1200 kg/m^2/s and q varied from 100 to
600 kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the third most accurate
correlation in [2]_ with a MAD of 20.5%.
If the extra density and viscosity information is not provided, it will
not be used.
Examples
--------
>>> Nu_Xu(1E5, 1.2, 330, 290., 8e-4, 9e-4)
289.133054256742
References
----------
.. [1] Xu, F., Guo, L.J., Mao, Y.F., Jiang, X.E., 2005. "Experimental
investigation to the heat transfer characteristics of water in vertical
pipes under supercritical pressure". J. Xi'an Jiaotong University 39,
468-471.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027. |
def get_end_date_metadata(self):
"""Gets the metadata for an end date.
return: (osid.Metadata) - metadata for the date
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._mdata['end_date'])
metadata.update({'existing_date_time_values': self._my_map['endDate']})
return Metadata(**metadata) | Gets the metadata for an end date.
return: (osid.Metadata) - metadata for the date
*compliance: mandatory -- This method must be implemented.* |
def get_authors_by_keyword(keyword: str, graph=None, authors=None) -> Set[str]:
"""Get authors for whom the search term is a substring.
:param pybel.BELGraph graph: A BEL graph
:param keyword: The keyword to search the author strings for
:param set[str] authors: An optional set of pre-cached authors calculated from the graph
:return: A set of authors with the keyword as a substring
"""
keyword_lower = keyword.lower()
if authors is not None:
return {
author
for author in authors
if keyword_lower in author.lower()
}
if graph is None:
raise ValueError('Graph not supplied')
return {
author
for author in get_authors(graph)
if keyword_lower in author.lower()
} | Get authors for whom the search term is a substring.
:param pybel.BELGraph graph: A BEL graph
:param keyword: The keyword to search the author strings for
:param set[str] authors: An optional set of pre-cached authors calculated from the graph
:return: A set of authors with the keyword as a substring |
def recent_update_frequencies(self):
""" Returns the 10 most recent update frequencies.
The given frequencies are computed as short-term frequencies!
The 0th element of the list corresponds to the most recent frequency.
"""
return list(reversed([(1.0 / p) for p in numpy.diff(self._recent_updates)])) | Returns the 10 most recent update frequencies.
The given frequencies are computed as short-term frequencies!
The 0th element of the list corresponds to the most recent frequency. |
def execute(sql, args=None, key='default'):
"""It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users')
"""
database = __db[key]
return database.execute(sql, args) | It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users') |
def mtf_bitransformer_all_layers_tiny():
"""Test out all the layers on local CPU."""
hparams = mtf_bitransformer_tiny()
hparams.moe_num_experts = 4
hparams.moe_expert_x = 4
hparams.moe_expert_y = 4
hparams.moe_hidden_size = 512
hparams.encoder_layers = [
"self_att", "local_self_att", "moe_1d", "moe_2d", "drd"]
hparams.decoder_layers = [
"self_att", "local_self_att", "enc_att", "moe_1d", "moe_2d", "drd"]
return hparams | Test out all the layers on local CPU. |
def add_tar_opts (cmdlist, compression, verbosity):
"""Add tar options to cmdlist."""
progname = os.path.basename(cmdlist[0])
if compression == 'gzip':
cmdlist.append('-z')
elif compression == 'compress':
cmdlist.append('-Z')
elif compression == 'bzip2':
cmdlist.append('-j')
elif compression in ('lzma', 'xz') and progname == 'bsdtar':
cmdlist.append('--%s' % compression)
elif compression in ('lzma', 'xz', 'lzip'):
# use the compression name as program name since
# tar is picky which programs it can use
program = compression
# set compression program
cmdlist.extend(['--use-compress-program', program])
if verbosity > 1:
cmdlist.append('--verbose')
if progname == 'tar':
cmdlist.append('--force-local') | Add tar options to cmdlist. |
def _modules_to_main(modList):
"""Force every module in modList to be placed into main"""
if not modList:
return
main = sys.modules['__main__']
for modname in modList:
if isinstance(modname, str):
try:
mod = __import__(modname)
except Exception:
sys.stderr.write(
'warning: could not import %s\n. '
'Your function may unexpectedly error due to this import failing;'
'A version mismatch is likely. Specific error was:\n' % modname)
print_exec(sys.stderr)
else:
setattr(main, mod.__name__, mod) | Force every module in modList to be placed into main |
def put(request, obj_id=None):
"""Adds tags from objects resolved from guids
:param tags: Tags to add
:type tags: list
:param guids: Guids to add tags from
:type guids: list
:returns: json
"""
res = Result()
data = request.PUT or json.loads(request.body)['body']
if obj_id:
# -- Edit the tag
tag = Tag.objects.get(pk=obj_id)
tag.name = data.get('name', tag.name)
tag.artist = data.get('artist', tag.artist)
tag.save()
else:
tags = [_ for _ in data.get('tags', '').split(',') if _]
guids = [_ for _ in data.get('guids', '').split(',') if _]
_manageTags(tags, guids)
return JsonResponse(res.asDict()) | Adds tags from objects resolved from guids
:param tags: Tags to add
:type tags: list
:param guids: Guids to add tags from
:type guids: list
:returns: json |
def _scaleTo8bit(self, img):
'''
The pattern comparator need images to be 8 bit
-> find the range of the signal and scale the image
'''
r = scaleSignalCutParams(img, 0.02) # , nSigma=3)
self.signal_ranges.append(r)
return toUIntArray(img, dtype=np.uint8, range=r) | The pattern comparator need images to be 8 bit
-> find the range of the signal and scale the image |
def loadedfields(self):
'''Generator of fields loaded from database'''
if self._loadedfields is None:
for field in self._meta.scalarfields:
yield field
else:
fields = self._meta.dfields
processed = set()
for name in self._loadedfields:
if name in processed:
continue
if name in fields:
processed.add(name)
yield fields[name]
else:
name = name.split(JSPLITTER)[0]
if name in fields and name not in processed:
field = fields[name]
if field.type == 'json object':
processed.add(name)
yield field | Generator of fields loaded from database |
def _add_timedelta(self, delta):
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: pendulum.Duration or datetime.timedelta
:rtype: DateTime
"""
if isinstance(delta, pendulum.Period):
return self.add(
years=delta.years,
months=delta.months,
weeks=delta.weeks,
days=delta.remaining_days,
hours=delta.hours,
minutes=delta.minutes,
seconds=delta.remaining_seconds,
microseconds=delta.microseconds,
)
elif isinstance(delta, pendulum.Duration):
return self.add(
years=delta.years, months=delta.months, seconds=delta.total_seconds()
)
return self.add(seconds=delta.total_seconds()) | Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: pendulum.Duration or datetime.timedelta
:rtype: DateTime |
def write_to(self, group, append=False):
"""Write the data to the given group.
:param h5py.Group group: The group to write the data on. It is
assumed that the group is already existing or initialized
to store h5features data (i.e. the method
``Data.init_group`` have been called.
:param bool append: If False, any existing data in the group
is overwrited. If True, the data is appended to the end of
the group and we assume ``Data.is_appendable_to`` is True
for this group.
"""
write_index(self, group, append)
self._entries['items'].write_to(group)
self._entries['features'].write_to(group, append)
self._entries['labels'].write_to(group)
if self.has_properties():
self._entries['properties'].write_to(group, append=append) | Write the data to the given group.
:param h5py.Group group: The group to write the data on. It is
assumed that the group is already existing or initialized
to store h5features data (i.e. the method
``Data.init_group`` have been called.
:param bool append: If False, any existing data in the group
is overwrited. If True, the data is appended to the end of
the group and we assume ``Data.is_appendable_to`` is True
for this group. |
def add_error(self, txt):
"""Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param txt: error message
:type txt: str
:return: None
"""
self.configuration_errors.append(txt)
self.conf_is_correct = False | Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param txt: error message
:type txt: str
:return: None |
def get_did_providers(self, did):
"""
Return the list providers registered on-chain for the given did.
:param did: hex str the id of an asset on-chain
:return:
list of addresses
None if asset has no registerd providers
"""
register_values = self.contract_concise.getDIDRegister(did)
if register_values and len(register_values) == 5:
return DIDRegisterValues(*register_values).providers
return None | Return the list providers registered on-chain for the given did.
:param did: hex str the id of an asset on-chain
:return:
list of addresses
None if asset has no registerd providers |
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret | Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters]. |
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None | fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any |
def decode_body(cls, header, f):
"""Generates a `MqttPingreq` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `pingreq`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttPingreq
Object extracted from ``f``.
"""
assert header.packet_type == MqttControlPacketType.pingreq
if header.remaining_len != 0:
raise DecodeError('Extra bytes at end of packet.')
return 0, MqttPingreq() | Generates a `MqttPingreq` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `pingreq`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttPingreq
Object extracted from ``f``. |
def list(self, *kinds, **kwargs):
"""Returns a list of inputs that are in the :class:`Inputs` collection.
You can also filter by one or more input kinds.
This function iterates over all possible inputs, regardless of any arguments you
specify. Because the :class:`Inputs` collection is the union of all the inputs of each
kind, this method implements parameters such as "count", "search", and so
on at the Python level once all the data has been fetched. The exception
is when you specify a single input kind, and then this method makes a single request
with the usual semantics for parameters.
:param kinds: The input kinds to return (optional).
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kinds: ``string``
:param kwargs: Additional arguments (optional):
- "count" (``integer``): The maximum number of items to return.
- "offset" (``integer``): The offset of the first item to return.
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
:return: A list of input kinds.
:rtype: ``list``
"""
if len(kinds) == 0:
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
logging.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
logging.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
except HTTPError as he:
if he.status == 404: # No inputs of this kind
return []
entities = []
entries = _load_atom_entries(response)
if entries is None:
return [] # No inputs in a collection comes back with no feed or entry in the XML
for entry in entries:
state = _parse_atom_entry(entry)
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
return entities
search = kwargs.get('search', '*')
entities = []
for kind in kinds:
response = None
try:
kind = UrlEncoded(kind, skip_encode=True)
response = self.get(self.kindpath(kind), search=search)
except HTTPError as e:
if e.status == 404:
continue # No inputs of this kind
else:
raise
entries = _load_atom_entries(response)
if entries is None: continue # No inputs to process
for entry in entries:
state = _parse_atom_entry(entry)
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
if 'offset' in kwargs:
entities = entities[kwargs['offset']:]
if 'count' in kwargs:
entities = entities[:kwargs['count']]
if kwargs.get('sort_mode', None) == 'alpha':
sort_field = kwargs.get('sort_field', 'name')
if sort_field == 'name':
f = lambda x: x.name.lower()
else:
f = lambda x: x[sort_field].lower()
entities = sorted(entities, key=f)
if kwargs.get('sort_mode', None) == 'alpha_case':
sort_field = kwargs.get('sort_field', 'name')
if sort_field == 'name':
f = lambda x: x.name
else:
f = lambda x: x[sort_field]
entities = sorted(entities, key=f)
if kwargs.get('sort_dir', 'asc') == 'desc':
entities = list(reversed(entities))
return entities | Returns a list of inputs that are in the :class:`Inputs` collection.
You can also filter by one or more input kinds.
This function iterates over all possible inputs, regardless of any arguments you
specify. Because the :class:`Inputs` collection is the union of all the inputs of each
kind, this method implements parameters such as "count", "search", and so
on at the Python level once all the data has been fetched. The exception
is when you specify a single input kind, and then this method makes a single request
with the usual semantics for parameters.
:param kinds: The input kinds to return (optional).
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kinds: ``string``
:param kwargs: Additional arguments (optional):
- "count" (``integer``): The maximum number of items to return.
- "offset" (``integer``): The offset of the first item to return.
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
:return: A list of input kinds.
:rtype: ``list`` |
def wunique_(self, col):
"""
Weight unique values: returns a dataframe with a count
of unique values
"""
try:
s = pd.value_counts(self.df[col].values)
df = pd.DataFrame(s, columns=["Number"])
return df
except Exception as e:
self.err(e, "Can not weight unique data") | Weight unique values: returns a dataframe with a count
of unique values |
def all_phrase_translations(phrase):
'''
Return the set of translations for all possible words in a full
phrase. Chinese is sometimes ambiguous. We do not attempt to
disambiguate, or handle unknown letters especially well. Full
parsing is left to upstream logic.
'''
if not trees:
init()
phrase = phrase.split(string.whitespace)
for word in phrase:
for x in range(len(word)):
for translation in _words_at_the_beginning(
word[x+1:],
trees['simplified'][word[x]],
prefix=word[x]):
yield translation | Return the set of translations for all possible words in a full
phrase. Chinese is sometimes ambiguous. We do not attempt to
disambiguate, or handle unknown letters especially well. Full
parsing is left to upstream logic. |
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0) | Get hue value of HSL color. |
def dependent_on_composite_state(self):
""" method iterates over all nodes that provide dependency to the current node,
and compile composite state of them all
:return instance of <NodesCompositeState>
"""
composite_state = NodesCompositeState()
for dependent_on in self.tree.dependent_on:
node_b = self.find_counterpart_in(dependent_on)
if node_b is None:
# special case when counterpart tree has no process with corresponding time_qualifier
# for example Financial Monthly has no counterpart in Third-party Daily Report -
# so we assume that its not blocked
continue
composite_state.enlist(node_b)
return composite_state | method iterates over all nodes that provide dependency to the current node,
and compile composite state of them all
:return instance of <NodesCompositeState> |
def get_year_start(day=None):
"""Returns January 1 of the given year."""
day = add_timezone(day or datetime.date.today())
return day.replace(month=1).replace(day=1) | Returns January 1 of the given year. |
def _construct_key(self, rule_id: str, spacy_rule_id:int) -> int:
"""
Use a mapping to store the information about rule_id for each matches, create the mapping key here
Args:
rule_id: str
spacy_rule_id:int
Returns: int
"""
hash_key = (rule_id, spacy_rule_id)
hash_v = hash(hash_key) + sys.maxsize + 1
self._hash_map[hash_v] = hash_key
return hash_v | Use a mapping to store the information about rule_id for each matches, create the mapping key here
Args:
rule_id: str
spacy_rule_id:int
Returns: int |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.