code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_field_def(
schema: GraphQLSchema, parent_type: GraphQLType, field_node: FieldNode
) -> Optional[GraphQLField]:
"""Get field definition.
Not exactly the same as the executor's definition of `get_field_def()`, in this
statically evaluated environment we do not always have an Object type, and need
to handle Interface and Union types.
"""
name = field_node.name.value
if name == "__schema" and schema.query_type is parent_type:
return SchemaMetaFieldDef
if name == "__type" and schema.query_type is parent_type:
return TypeMetaFieldDef
if name == "__typename" and is_composite_type(parent_type):
return TypeNameMetaFieldDef
if is_object_type(parent_type) or is_interface_type(parent_type):
parent_type = cast(Union[GraphQLObjectType, GraphQLInterfaceType], parent_type)
return parent_type.fields.get(name)
return None
|
Get field definition.
Not exactly the same as the executor's definition of `get_field_def()`, in this
statically evaluated environment we do not always have an Object type, and need
to handle Interface and Union types.
|
def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True,
envelope=False, iSpeedTuning=False, enforceDale=True):
"""
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training.
"""
self.instantaneousI.fill(0)
self.instantaneousEL.fill(0)
self.instantaneousER.fill(0)
self.instantaneousI += feedforwardInputI
self.instantaneousEL += feedforwardInputE
self.instantaneousER += feedforwardInputE
if enforceDale:
weightsII = np.minimum(self.weightsII, 0)
weightsIER = np.minimum(self.weightsIER, 0)
weightsIEL = np.minimum(self.weightsIEL, 0)
weightsELI = np.maximum(self.weightsELI, 0)
weightsERI = np.maximum(self.weightsERI, 0)
else:
weightsII = self.weightsII
weightsIER = self.weightsIER
weightsIEL = self.weightsIEL
weightsELI = self.weightsELI
weightsERI = self.weightsERI
if recurrent:
self.instantaneousI += (np.matmul(self.activationsEL, weightsELI) +\
np.matmul(self.activationsER, weightsERI) +\
np.matmul(self.activationsI, weightsII))
self.instantaneousEL += np.matmul(self.activationsI, weightsIEL)
self.instantaneousER += np.matmul(self.activationsI, weightsIER)
self.instantaneousEL *= max((1 - self.velocityGain*v), 0)
self.instantaneousER *= max((1 + self.velocityGain*v), 0)
if iSpeedTuning:
self.instantaneousI *= min(self.velocityGain*np.abs(v), 1)
self.instantaneousI += self.constantTonicMagnitude
self.instantaneousEL += self.constantTonicMagnitude
self.instantaneousER += self.constantTonicMagnitude
if envelope:
self.instantaneousI *= self.envelopeI
self.instantaneousER *= self.envelopeE
self.instantaneousEL *= self.envelopeE
# Input must be positive.
np.maximum(self.instantaneousI, 0., self.instantaneousI)
np.maximum(self.instantaneousEL, 0., self.instantaneousEL)
np.maximum(self.instantaneousER, 0., self.instantaneousER)
# Activity decay and timestep adjustment
self.activationsI += (self.instantaneousI - self.activationsI/self.decayConstant)*self.dt
self.activationsEL += (self.instantaneousEL - self.activationsEL/self.decayConstant)*self.dt
self.activationsER += (self.instantaneousER - self.activationsER/self.decayConstant)*self.dt
# Finally, clip activations for stability
np.minimum(self.activationsI, self.clip, self.activationsI)
np.minimum(self.activationsEL, self.clip, self.activationsEL)
np.minimum(self.activationsER, self.clip, self.activationsER)
|
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training.
|
def cat_top_keywords(self, session, cat, up=True, offset=0, offsets=[]):
'''Get top keywords in a specific category'''
print 'CAT:%s, level:%s'%(str(cat), str(cat.level))
print 'OFFSET: %d'%offset
response = []
if not offsets or offset==0:
url = 'http://top.taobao.com/level3.php?cat=%s&level3=%s&show=focus&up=%s&offset=%d'%(cat.parent.cid, '' if cat.level==2 else str(cat.cid), 'true' if up else '', offset)
print url
rs = self.fetch(url)
if not rs: return response
soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage)
response = self.parse_cat_top_keywords(soup, offset)
if offset==0:
offsets = self.get_cat_top_keywords_pages(soup, offset)
print 'OFFSETS: %s'%offsets
if offsets:
rs = []
threadPool = ThreadPool(len(offsets) if len(offsets)<=5 else 5)
for idx, page_offset in enumerate(offsets):
page_url = 'http://top.taobao.com/level3.php?cat=%s&level3=%s&show=focus&up=%s&offset=%d'%(cat.parent.cid, '' if cat.level==2 else str(cat.cid), 'true' if up else '', page_offset)
next_page = 'True' if idx == (len(offsets)-1) else 'False'
threadPool.run(self.fetch, callback=None, url=page_url, config=dict(get_next=next_page, offset=page_offset))
pages = threadPool.killAllWorkers(None)
#print 'RESPONSES: %s'%pages
for p in pages:
if not p: continue
soup2 = BeautifulSoup(p.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage)
offset2 = int(p.config['offset'])
response += self.parse_cat_top_keywords(soup2, offset2)
print 'GOT: %d'%offset2
if p.config['get_next'] != 'True': continue
offsets = self.get_cat_top_keywords_pages(soup2, offset2)
print offsets
if not offsets: continue
response += self.cat_top_keywords(session, cat, up, offset2, offsets)
#return sorted(response, key=itemgetter('pos')) if response else []
#print "RETURN:%d"%offset
for k in response:
new_keyword = models.Keyword(k['name'].decode('utf-8'))
new_keyword.categories.append(cat)
session.add(new_keyword)
try:
session.commit()
except IntegrityError:
session.rollback()
new_keyword = session.query(models.Keyword).filter(models.Keyword.name == k['name']).first()
new_keyword.categories.append(cat)
session.commit()
print 'Duplicate %s'%new_keyword
return response
|
Get top keywords in a specific category
|
def add_namespaces(spec_dict):
"""Add namespace convenience keys, list, list_{short|long}, to_{short|long}"""
for ns in spec_dict["namespaces"]:
spec_dict["namespaces"][ns]["list"] = []
spec_dict["namespaces"][ns]["list_long"] = []
spec_dict["namespaces"][ns]["list_short"] = []
spec_dict["namespaces"][ns]["to_short"] = {}
spec_dict["namespaces"][ns]["to_long"] = {}
for obj in spec_dict["namespaces"][ns]["info"]:
spec_dict["namespaces"][ns]["list"].extend([obj["name"], obj["abbreviation"]])
spec_dict["namespaces"][ns]["list_short"].append(obj["abbreviation"])
spec_dict["namespaces"][ns]["list_long"].append(obj["name"])
spec_dict["namespaces"][ns]["to_short"][obj["abbreviation"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_short"][obj["name"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbreviation"]] = obj["name"]
spec_dict["namespaces"][ns]["to_long"][obj["name"]] = obj["name"]
# For AminoAcid namespace
if "abbrev1" in obj:
spec_dict["namespaces"][ns]["to_short"][obj["abbrev1"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbrev1"]] = obj["name"]
|
Add namespace convenience keys, list, list_{short|long}, to_{short|long}
|
def parse_info_frags(info_frags):
"""Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
"""
new_scaffolds = {}
with open(info_frags, "r") as info_frags_handle:
current_new_contig = None
for line in info_frags_handle:
if line.startswith(">"):
current_new_contig = str(line[1:-1])
new_scaffolds[current_new_contig] = []
elif line.startswith("init_contig"):
pass
else:
(init_contig, id_frag, orientation, pos_start, pos_end) = str(
line[:-1]
).split("\t")
start = int(pos_start)
end = int(pos_end)
ori = int(orientation)
fragid = int(id_frag)
assert start < end
assert ori in {-1, 1}
new_scaffolds[current_new_contig].append(
[init_contig, fragid, start, end, ori]
)
return new_scaffolds
|
Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
|
def __connect(self):
"""
Connect to the database.
"""
self.__methods = _get_methods_by_uri(self.sqluri)
uri_connect_method = self.__methods[METHOD_CONNECT]
self.__dbapi2_conn = uri_connect_method(self.sqluri)
|
Connect to the database.
|
def assign(self, object_type, object_uuid, overwrite=False):
"""Assign this persistent identifier to a given object.
Note, the persistent identifier must first have been reserved. Also,
if an existing object is already assigned to the pid, it will raise an
exception unless overwrite=True.
:param object_type: The object type is a string that identify its type.
:param object_uuid: The object UUID.
:param overwrite: Force PID overwrites in case was previously assigned.
:raises invenio_pidstore.errors.PIDInvalidAction: If the PID was
previously deleted.
:raises invenio_pidstore.errors.PIDObjectAlreadyAssigned: If the PID
was previously assigned with a different type/uuid.
:returns: `True` if the PID is successfully assigned.
"""
if self.is_deleted():
raise PIDInvalidAction(
"You cannot assign objects to a deleted/redirected persistent"
" identifier."
)
if not isinstance(object_uuid, uuid.UUID):
object_uuid = uuid.UUID(object_uuid)
if self.object_type or self.object_uuid:
# The object is already assigned to this pid.
if object_type == self.object_type and \
object_uuid == self.object_uuid:
return True
if not overwrite:
raise PIDObjectAlreadyAssigned(object_type,
object_uuid)
self.unassign()
try:
with db.session.begin_nested():
self.object_type = object_type
self.object_uuid = object_uuid
db.session.add(self)
except SQLAlchemyError:
logger.exception("Failed to assign {0}:{1}".format(
object_type, object_uuid), extra=dict(pid=self))
raise
logger.info("Assigned object {0}:{1}".format(
object_type, object_uuid), extra=dict(pid=self))
return True
|
Assign this persistent identifier to a given object.
Note, the persistent identifier must first have been reserved. Also,
if an existing object is already assigned to the pid, it will raise an
exception unless overwrite=True.
:param object_type: The object type is a string that identify its type.
:param object_uuid: The object UUID.
:param overwrite: Force PID overwrites in case was previously assigned.
:raises invenio_pidstore.errors.PIDInvalidAction: If the PID was
previously deleted.
:raises invenio_pidstore.errors.PIDObjectAlreadyAssigned: If the PID
was previously assigned with a different type/uuid.
:returns: `True` if the PID is successfully assigned.
|
def _placement_points_generator(self, skyline, width):
"""Returns a generator for the x coordinates of all the placement
points on the skyline for a given rectangle.
WARNING: In some cases could be duplicated points, but it is faster
to compute them twice than to remove them.
Arguments:
skyline (list): Skyline HSegment list
width (int, float): Rectangle width
Returns:
generator
"""
skyline_r = skyline[-1].right
skyline_l = skyline[0].left
# Placements using skyline segment left point
ppointsl = (s.left for s in skyline if s.left+width <= skyline_r)
# Placements using skyline segment right point
ppointsr = (s.right-width for s in skyline if s.right-width >= skyline_l)
# Merge positions
return heapq.merge(ppointsl, ppointsr)
|
Returns a generator for the x coordinates of all the placement
points on the skyline for a given rectangle.
WARNING: In some cases could be duplicated points, but it is faster
to compute them twice than to remove them.
Arguments:
skyline (list): Skyline HSegment list
width (int, float): Rectangle width
Returns:
generator
|
def main():
""" Main method of the script """
parser = __build_option_parser()
args = parser.parse_args()
analyze_ws = AnalyzeWS(args)
try:
analyze_ws.set_file(args.file_[0])
except IOError:
print 'IOError raised while reading file. Exiting!'
sys.exit(3)
# Start the chosen mode
if args.to_file or args.to_browser:
analyze_ws.to_file_mode()
if args.to_browser:
analyze_ws.to_browser_mode()
else:
analyze_ws.interactive_mode()
|
Main method of the script
|
def add_segy_view_widget(self, ind, widget, name=None):
"""
:param widget: The SegyViewWidget that will be added to the SegyTabWidget
:type widget: SegyViewWidget
"""
if self._context is None:
self._segywidgets.append(widget)
self.initialize()
return 0 # return 0 for first widget index
self._tab_widget.updatesEnabled = False
widget.show_toolbar(toolbar=True, layout_combo=False, colormap=True, save=True, settings=True)
self._modify_qtree(widget.settings_window.qtree, [0, 1, 2, 4])
if name is None:
name = os.path.basename(widget.slice_data_source.source_filename)
id = self._tab_widget.insertTab(ind, widget, name)
widget.context.data_changed.connect(self._local_data_changed)
self._tab_widget.updatesEnabled = True
return id
|
:param widget: The SegyViewWidget that will be added to the SegyTabWidget
:type widget: SegyViewWidget
|
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
|
Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
|
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False):
"""Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
"""
with tf.name_scope(op_name or "expand_to_vector"):
x = tf.convert_to_tensor(value=x, name="x")
ndims = tensorshape_util.rank(x.shape)
if ndims is None:
# Maybe expand ndims from 0 to 1.
if validate_args:
x = with_dependencies([
assert_util.assert_rank_at_most(
x, 1, message="Input is neither scalar nor vector.")
], x)
ndims = tf.rank(x)
expanded_shape = pick_vector(
tf.equal(ndims, 0), np.array([1], dtype=np.int32), tf.shape(input=x))
return tf.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand ndims from 0 to 1.
x_const = tf.get_static_value(x)
if x_const is not None:
return tf.convert_to_tensor(
value=dtype_util.as_numpy_dtype(x.dtype)([x_const]),
name=tensor_name)
else:
return tf.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
# ndims == 1
return x
|
Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
|
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
|
Return a string representing the default user agent.
|
def _remove_none_values(dictionary):
""" Remove dictionary keys whose value is None """
return list(map(dictionary.pop,
[i for i in dictionary if dictionary[i] is None]))
|
Remove dictionary keys whose value is None
|
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp,
self._mangle_from_,
None, # Use policy setting, which we've adjusted
policy=self.policy)
|
Clone this generator with the exact same options.
|
def check_type(self, value, attr, data):
"""Customize check_type for handling containers."""
# Check the type in the standard way first, in order to fail quickly
# in case of invalid values.
root_value = super(InstructionParameter, self).check_type(
value, attr, data)
if is_collection(value):
_ = [super(InstructionParameter, self).check_type(item, attr, data)
for item in value]
return root_value
|
Customize check_type for handling containers.
|
def extract_archive(archive, verbosity=0, outdir=None, program=None, interactive=True):
"""Extract given archive."""
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Extracting %s ..." % archive)
return _extract_archive(archive, verbosity=verbosity, interactive=interactive, outdir=outdir, program=program)
|
Extract given archive.
|
def add_sibling(self, pos=None, **kwargs):
"""Adds a new node as a sibling to the current node object."""
pos = self._prepare_pos_var_for_add_sibling(pos)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if newobj.pk:
raise NodeAlreadySaved("Attempted to add a tree node that is "\
"already in the database")
else:
# creating a new object
newobj = get_result_class(self.__class__)(**kwargs)
newobj.depth = self.depth
sql = None
target = self
if target.is_root():
newobj.lft = 1
newobj.rgt = 2
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
last_root = target.__class__.get_last_root_node()
if (
(pos == 'last-sibling') or
(pos == 'right' and target == last_root)
):
newobj.tree_id = last_root.tree_id + 1
else:
newpos = {'first-sibling': 1,
'left': target.tree_id,
'right': target.tree_id + 1}[pos]
sql, params = target.__class__._move_tree_right(newpos)
newobj.tree_id = newpos
else:
newobj.tree_id = target.tree_id
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
move_right = self.__class__._move_right
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, 2)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos - 1, False, 2)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, 2)
newobj.lft = newpos
newobj.rgt = newpos + 1
# saving the instance before returning it
if sql:
cursor = self._get_database_cursor('write')
cursor.execute(sql, params)
newobj.save()
return newobj
|
Adds a new node as a sibling to the current node object.
|
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2, v1):
"""
Add site effects to an intensity.
Implements eq. 5
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
z1pt0 = sites.z1pt0
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0
ln_y = (
# first line of eq. 13b
ln_y_ref + C['phi1'] *
np.log(np.clip(sites.vs30, -np.inf, v1) / 1130)
# second line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) + C['phi4']) / C['phi4'])
# third line
+ C['phi5']
* (1.0 - 1.0 / np.cosh(
C['phi6'] * (z1pt0 - C['phi7']).clip(0, np.inf)))
+ C['phi8'] / np.cosh(0.15 * (z1pt0 - 15).clip(0, np.inf))
# fourth line
+ eta + epsilon
)
return ln_y
|
Add site effects to an intensity.
Implements eq. 5
|
def create_payload(self):
"""Remove ``smart_class_parameter_id`` or ``smart_variable_id``"""
payload = super(OverrideValue, self).create_payload()
if hasattr(self, 'smart_class_parameter'):
del payload['smart_class_parameter_id']
if hasattr(self, 'smart_variable'):
del payload['smart_variable_id']
return payload
|
Remove ``smart_class_parameter_id`` or ``smart_variable_id``
|
def get_config_values(config_path, section, default='default'):
"""
Parse ini config file and return a dict of values.
The provided section overrides any values in default section.
"""
values = {}
if not os.path.isfile(config_path):
raise IpaUtilsException(
'Config file not found: %s' % config_path
)
config = configparser.ConfigParser()
try:
config.read(config_path)
except Exception:
raise IpaUtilsException(
'Config file format invalid.'
)
try:
values.update(config.items(default))
except Exception:
pass
try:
values.update(config.items(section))
except Exception:
pass
return values
|
Parse ini config file and return a dict of values.
The provided section overrides any values in default section.
|
def from_center(self, x=None, y=None, z=None, r=None,
theta=None, h=None, reference=None):
"""
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
"""
coords_to_endpoint = None
if all([isinstance(i, numbers.Number) for i in (x, y, z)]):
coords_to_endpoint = self.from_cartesian(x, y, z)
if all([isinstance(i, numbers.Number) for i in (r, theta, h)]):
coords_to_endpoint = self.from_polar(r, theta, h)
coords_to_reference = Vector(0, 0, 0)
if reference:
coords_to_reference = self.coordinates(reference)
return coords_to_reference + coords_to_endpoint
|
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
|
def motif4struct_wei(W):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 199xN np.ndarray
motif intensity matrix
Q : 199xN np.ndarray
motif coherence matrix
F : 199xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m4 = mot['m4']
m4n = mot['m4n']
id4 = mot['id4'].squeeze()
n4 = mot['n4'].squeeze()
n = len(W)
I = np.zeros((199, n)) # intensity
Q = np.zeros((199, n)) # coherence
F = np.zeros((199, n)) # frequency
A = binarize(W, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 3):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
vz = np.max((v1, v2)) # vz: largest rank node
# v3: all neighbors of v2 (>u)
V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1])
V3[V2] = 0 # not already in V1 and V2
# and all neighbors of v1 (>v2)
V3 = np.logical_or(
np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3)
V3[V1] = 0 # not already in V1
# and all neighbors of u (>vz)
V3 = np.logical_or(
np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3)
for v3 in np.where(V3)[0]:
a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1],
A[v3, v1], A[u, v2], A[v1, v2], A[
v3, v2], A[u, v3], A[v1, v3],
A[v2, v3]))
s = np.uint64(
np.sum(np.power(10, np.arange(11, -1, -1)) * a))
# print np.shape(s),np.shape(m4n)
ix = np.squeeze(s == m4n)
w = np.array((W[v1, u], W[v2, u], W[v3, u], W[u, v1], W[v2, v1],
W[v3, v1], W[u, v2], W[v1, v2], W[
v3, v2], W[u, v3], W[v1, v3],
W[v2, v3]))
M = w * m4[ix, :]
id = id4[ix] - 1
l = n4[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# then add to cumulative count
I[id, u] += i
I[id, v1] += i
I[id, v2] += i
I[id, v3] += i
Q[id, u] += q
Q[id, v1] += q
Q[id, v2] += q
Q[id, v3] += q
F[id, u] += 1
F[id, v1] += 1
F[id, v2] += 1
F[id, v3] += 1
return I, Q, F
|
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 199xN np.ndarray
motif intensity matrix
Q : 199xN np.ndarray
motif coherence matrix
F : 199xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
|
def _select_options(self, options, keys, invert=False):
"""Select the provided keys out of an options object.
Selects the provided keys (or everything except the provided keys) out
of an options object.
"""
options = self._merge_options(options)
result = {}
for key in options:
if (invert and key not in keys) or (not invert and key in keys):
result[key] = options[key]
return result
|
Select the provided keys out of an options object.
Selects the provided keys (or everything except the provided keys) out
of an options object.
|
def assert_array(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None):
r""" Asserts whether the given array or sparse matrix has the given properties
Parameters
----------
A : ndarray, scipy.sparse matrix or array-like
the array under investigation
shape : shape, optional, default=None
asserts if the array has the requested shape. Be careful with vectors
because this will distinguish between row vectors (1,n), column vectors
(n,1) and arrays (n,). If you want to be less specific, consider using
size
square : None | True | False
if not None, asserts whether the array dimensions are uniform (e.g.
square for a ndim=2 array) (True), or not uniform (False).
size : int, optional, default=None
asserts if the arrays has the requested number of elements
ndim : int, optional, default=None
asserts if the array has the requested dimension
dtype : type, optional, default=None
asserts if the array data has the requested data type. This check is
strong, e.g. int and int64 are not equal. If you want a weaker check,
consider the kind option
kind : string, optional, default=None
Checks if the array data is of the specified kind. Options include 'i'
for integer types, 'f' for float types Check numpy.dtype.kind for
possible options. An additional option is 'numeric' for either integer
or float.
Raises
------
AssertionError
If assertions has failed
"""
try:
if shape is not None:
if not np.array_equal(np.shape(A), shape):
raise AssertionError('Expected shape '+str(shape)+' but given array has shape '+str(np.shape(A)))
if uniform is not None:
shapearr = np.array(np.shape(A))
is_uniform = np.count_nonzero(shapearr-shapearr[0]) == 0
if uniform and not is_uniform:
raise AssertionError('Given array is not uniform \n'+str(shapearr))
elif not uniform and is_uniform:
raise AssertionError('Given array is not nonuniform: \n'+str(shapearr))
if size is not None:
if not np.size(A) == size:
raise AssertionError('Expected size '+str(size)+' but given array has size '+str(np.size(A)))
if ndim is not None:
if not ndim == np.ndim(A):
raise AssertionError('Expected shape '+str(ndim)+' but given array has shape '+str(np.ndim(A)))
if dtype is not None:
# now we must create an array if we don't have one yet
if not isinstance(A, (np.ndarray)) and not scisp.issparse(A):
A = np.array(A)
if not np.dtype(dtype) == A.dtype:
raise AssertionError('Expected data type '+str(dtype)+' but given array has data type '+str(A.dtype))
if kind is not None:
# now we must create an array if we don't have one yet
if not isinstance(A, (np.ndarray)) and not scisp.issparse(A):
A = np.array(A)
if kind == 'numeric':
if not (A.dtype.kind == 'i' or A.dtype.kind == 'f'):
raise AssertionError('Expected numerical data, but given array has data kind '+str(A.dtype.kind))
elif not A.dtype.kind == kind:
raise AssertionError('Expected data kind '+str(kind)
+' but given array has data kind '+str(A.dtype.kind))
except Exception as ex:
if isinstance(ex, AssertionError):
raise ex
else: # other exception raised in the test code above
print('Found exception: ',ex)
raise AssertionError('Given argument is not an array of the expected shape or type:\n'+
'arg = '+str(A)+'\ntype = '+str(type(A)))
|
r""" Asserts whether the given array or sparse matrix has the given properties
Parameters
----------
A : ndarray, scipy.sparse matrix or array-like
the array under investigation
shape : shape, optional, default=None
asserts if the array has the requested shape. Be careful with vectors
because this will distinguish between row vectors (1,n), column vectors
(n,1) and arrays (n,). If you want to be less specific, consider using
size
square : None | True | False
if not None, asserts whether the array dimensions are uniform (e.g.
square for a ndim=2 array) (True), or not uniform (False).
size : int, optional, default=None
asserts if the arrays has the requested number of elements
ndim : int, optional, default=None
asserts if the array has the requested dimension
dtype : type, optional, default=None
asserts if the array data has the requested data type. This check is
strong, e.g. int and int64 are not equal. If you want a weaker check,
consider the kind option
kind : string, optional, default=None
Checks if the array data is of the specified kind. Options include 'i'
for integer types, 'f' for float types Check numpy.dtype.kind for
possible options. An additional option is 'numeric' for either integer
or float.
Raises
------
AssertionError
If assertions has failed
|
def cluster(list_of_texts, num_clusters=3):
"""
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
"""
pipeline = Pipeline([
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clust", KMeans(n_clusters=num_clusters))
])
try:
clusters = pipeline.fit_predict(list_of_texts)
except ValueError:
clusters = list(range(len(list_of_texts)))
return clusters
|
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
|
def matches_query(self, key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._add_condition(key, '$inQuery', dumped)
return self
|
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
|
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register to the blueprint if method attribute found """
module_router = ModuleRouter(mod,
ignore_names=self.__serialize_module_paths()
).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod)
else:
""" prompt not found notification """
# print('{} has no module attribute method'.format(mod))
pass
|
if module found load module and save all attributes in the module found
|
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
wheel_file = '{0!s}-{1!s}-{2!s}'.format(package_name, package_version, self.manylinux_wheel_file_suffix)
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
if not os.path.exists(wheel_path) or not zipfile.is_zipfile(wheel_path):
# The file is not cached, download it.
wheel_url = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
print(" - {}=={}: Downloading".format(package_name, package_version))
with open(wheel_path, 'wb') as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
else:
print(" - {}=={}: Using locally cached manylinux wheel".format(package_name, package_version))
return wheel_path
|
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
|
def get_pose_error(target_pose, current_pose):
"""
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous matrix for the target pose
current_pose: a 4x4 homogenous matrix for the current pose
Returns:
A 6-dim numpy array for the pose error.
"""
error = np.zeros(6)
# compute translational error
target_pos = target_pose[:3, 3]
current_pos = current_pose[:3, 3]
pos_err = target_pos - current_pos
# compute rotational error
r1 = current_pose[:3, 0]
r2 = current_pose[:3, 1]
r3 = current_pose[:3, 2]
r1d = target_pose[:3, 0]
r2d = target_pose[:3, 1]
r3d = target_pose[:3, 2]
rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))
error[:3] = pos_err
error[3:] = rot_err
return error
|
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous matrix for the target pose
current_pose: a 4x4 homogenous matrix for the current pose
Returns:
A 6-dim numpy array for the pose error.
|
def scan(self, restrict):
"""
Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions
"""
# Keep looking for a token, ignoring any in self.ignore
while True:
# Search the patterns for a match, with earlier
# tokens in the list having preference
best_pat = None
best_pat_len = 0
for p, regexp in self.patterns:
# First check to see if we're restricting to this token
if restrict and p not in restrict and p not in self.ignore:
continue
m = regexp.match(self.input, self.pos)
if m:
# We got a match
best_pat = p
best_pat_len = len(m.group(0))
break
# If we didn't find anything, raise an error
if best_pat is None:
msg = "Bad Token"
if restrict:
msg = "Trying to find one of " + ", ".join(restrict)
raise SyntaxError(self.pos, msg)
# If we found something that isn't to be ignored, return it
if best_pat in self.ignore:
# This token should be ignored ..
self.pos += best_pat_len
else:
end_pos = self.pos + best_pat_len
# Create a token with this data
token = (
self.pos,
end_pos,
best_pat,
self.input[self.pos:end_pos]
)
self.pos = end_pos
# Only add this token if it's not in the list
# (to prevent looping)
if not self.tokens or token != self.tokens[-1]:
self.tokens.append(token)
self.restrictions.append(restrict)
return 1
break
return 0
|
Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions
|
def delete(cls, bucket_id):
"""Delete a bucket.
Does not actually delete the Bucket, just marks it as deleted.
"""
bucket = cls.get(bucket_id)
if not bucket or bucket.deleted:
return False
bucket.deleted = True
return True
|
Delete a bucket.
Does not actually delete the Bucket, just marks it as deleted.
|
def default_package(self):
"""
::
GET /:login/packages
:Returns: the default package for this datacenter
:rtype: :py:class:`dict` or ``None``
Requests all the packages in this datacenter, filters for the default,
and returns the corresponding dict, if a default has been defined.
"""
packages = [pk for pk in self.packages()
if pk.get('default') == 'true']
if packages:
return packages[0]
else:
return None
|
::
GET /:login/packages
:Returns: the default package for this datacenter
:rtype: :py:class:`dict` or ``None``
Requests all the packages in this datacenter, filters for the default,
and returns the corresponding dict, if a default has been defined.
|
def count_above_mean(x):
"""
Returns the number of values in x that are higher than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
m = np.mean(x)
return np.where(x > m)[0].size
|
Returns the number of values in x that are higher than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
|
def _set_config_path(self):
"""
Reads config path from environment variable CLOEEPY_CONFIG_PATH
and sets as instance attr
"""
self._path = os.getenv("CLOEEPY_CONFIG_PATH")
if self._path is None:
msg = "CLOEEPY_CONFIG_PATH is not set. Exiting..."
sys.exit(msg)
|
Reads config path from environment variable CLOEEPY_CONFIG_PATH
and sets as instance attr
|
def bbin(obj: Union[str, Element]) -> str:
""" Boldify built in types
@param obj: object name or id
@return:
"""
return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
|
Boldify built in types
@param obj: object name or id
@return:
|
def add_menu(self, menu):
"""
Adds a sub-menu to the editor context menu.
Menu are put at the bottom of the context menu.
.. note:: to add a menu in the middle of the context menu, you can
always add its menuAction().
:param menu: menu to add
"""
self._menus.append(menu)
self._menus = sorted(list(set(self._menus)), key=lambda x: x.title())
for action in menu.actions():
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.addActions(menu.actions())
|
Adds a sub-menu to the editor context menu.
Menu are put at the bottom of the context menu.
.. note:: to add a menu in the middle of the context menu, you can
always add its menuAction().
:param menu: menu to add
|
def prepare(args):
"""
%prog prepare genomesize *.fastq
Prepare MERACULOUS configuation file. Genome size should be entered in Mb.
"""
p = OptionParser(prepare.__doc__ + FastqNamings)
p.add_option("-K", default=51, type="int", help="K-mer size")
p.set_cpus(cpus=32)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
genomesize = float(args[0]) / 1000
fnames = args[1:]
for x in fnames:
assert op.exists(x), "File `{0}` not found.".format(x)
s = comment_banner("Meraculous params file") + "\n"
s += comment_banner("Basic parameters") + "\n"
s += "# Describe the libraries ( one line per library )\n"
s += "# " + " ".join(header.split()) + "\n"
libs = get_libs(fnames)
lib_seqs = []
rank = 0
for lib, fs in libs:
size = lib.size
if size == 0:
continue
rank += 1
library_name = lib.library_name
name = library_name.replace("-", "")
wildcard = "{0}*.1.*,{0}*.2.*".format(library_name)
rl = max(readlen([x]) for x in fs)
lib_seq = lib.get_lib_seq(wildcard, name, rl, rank)
lib_seqs.append(lib_seq)
s += "\n" + "\n".join(load_csv(None, lib_seqs, sep=" ")) + "\n"
params = [("genome_size", genomesize),
("is_diploid", 0),
("mer_size", opts.K),
("num_prefix_blocks", 1),
("no_read_validation", 0),
("local_num_procs", opts.cpus)]
s += "\n" + "\n".join(load_csv(None, params, sep=" ")) + "\n"
cfgfile = "meraculous.config"
write_file(cfgfile, s, tee=True)
s = "~/export/meraculous/bin/run_meraculous.sh -c {0}"\
.format(cfgfile)
runsh = "run.sh"
write_file(runsh, s)
|
%prog prepare genomesize *.fastq
Prepare MERACULOUS configuation file. Genome size should be entered in Mb.
|
def remove(self, module=True, force=False, configuration=True, dry_run=False):
"""Remove this submodule from the repository. This will remove our entry
from the .gitmodules file and the entry in the .git/config file.
:param module: If True, the module checkout we point to will be deleted
as well. If the module is currently on a commit which is not part
of any branch in the remote, if the currently checked out branch
working tree, or untracked files,
is ahead of its tracking branch, if you have modifications in the
In case the removal of the repository fails for these reasons, the
submodule status will not have been altered.
If this submodule has child-modules on its own, these will be deleted
prior to touching the own module.
:param force: Enforces the deletion of the module even though it contains
modifications. This basically enforces a brute-force file system based
deletion.
:param configuration: if True, the submodule is deleted from the configuration,
otherwise it isn't. Although this should be enabled most of the times,
this flag enables you to safely delete the repository of your submodule.
:param dry_run: if True, we will not actually do anything, but throw the errors
we would usually throw
:return: self
:note: doesn't work in bare repositories
:note: doesn't work atomically, as failure to remove any part of the submodule will leave
an inconsistent state
:raise InvalidGitRepositoryError: thrown if the repository cannot be deleted
:raise OSError: if directories or files could not be removed"""
if not (module or configuration):
raise ValueError("Need to specify to delete at least the module, or the configuration")
# END handle parameters
# Recursively remove children of this submodule
nc = 0
for csm in self.children():
nc += 1
csm.remove(module, force, configuration, dry_run)
del(csm)
# end
if configuration and not dry_run and nc > 0:
# Assure we don't leave the parent repository in a dirty state, and commit our changes
# It's important for recursive, unforced, deletions to work as expected
self.module().index.commit("Removed at least one of child-modules of '%s'" % self.name)
# end handle recursion
# DELETE REPOSITORY WORKING TREE
################################
if module and self.module_exists():
mod = self.module()
git_dir = mod.git_dir
if force:
# take the fast lane and just delete everything in our module path
# TODO: If we run into permission problems, we have a highly inconsistent
# state. Delete the .git folders last, start with the submodules first
mp = self.abspath
method = None
if osp.islink(mp):
method = os.remove
elif osp.isdir(mp):
method = rmtree
elif osp.exists(mp):
raise AssertionError("Cannot forcibly delete repository as it was neither a link, nor a directory")
# END handle brutal deletion
if not dry_run:
assert method
method(mp)
# END apply deletion method
else:
# verify we may delete our module
if mod.is_dirty(index=True, working_tree=True, untracked_files=True):
raise InvalidGitRepositoryError(
"Cannot delete module at %s with any modifications, unless force is specified"
% mod.working_tree_dir)
# END check for dirt
# figure out whether we have new commits compared to the remotes
# NOTE: If the user pulled all the time, the remote heads might
# not have been updated, so commits coming from the remote look
# as if they come from us. But we stay strictly read-only and
# don't fetch beforehand.
for remote in mod.remotes:
num_branches_with_new_commits = 0
rrefs = remote.refs
for rref in rrefs:
num_branches_with_new_commits += len(mod.git.cherry(rref)) != 0
# END for each remote ref
# not a single remote branch contained all our commits
if len(rrefs) and num_branches_with_new_commits == len(rrefs):
raise InvalidGitRepositoryError(
"Cannot delete module at %s as there are new commits" % mod.working_tree_dir)
# END handle new commits
# have to manually delete references as python's scoping is
# not existing, they could keep handles open ( on windows this is a problem )
if len(rrefs):
del(rref)
# END handle remotes
del(rrefs)
del(remote)
# END for each remote
# finally delete our own submodule
if not dry_run:
self._clear_cache()
wtd = mod.working_tree_dir
del(mod) # release file-handles (windows)
import gc
gc.collect()
try:
rmtree(wtd)
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
# END delete tree if possible
# END handle force
if not dry_run and osp.isdir(git_dir):
self._clear_cache()
try:
rmtree(git_dir)
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
# end handle separate bare repository
# END handle module deletion
# void our data not to delay invalid access
if not dry_run:
self._clear_cache()
# DELETE CONFIGURATION
######################
if configuration and not dry_run:
# first the index-entry
parent_index = self.repo.index
try:
del(parent_index.entries[parent_index.entry_key(self.path, 0)])
except KeyError:
pass
# END delete entry
parent_index.write()
# now git config - need the config intact, otherwise we can't query
# information anymore
with self.repo.config_writer() as writer:
writer.remove_section(sm_section(self.name))
with self.config_writer() as writer:
writer.remove_section()
# END delete configuration
return self
|
Remove this submodule from the repository. This will remove our entry
from the .gitmodules file and the entry in the .git/config file.
:param module: If True, the module checkout we point to will be deleted
as well. If the module is currently on a commit which is not part
of any branch in the remote, if the currently checked out branch
working tree, or untracked files,
is ahead of its tracking branch, if you have modifications in the
In case the removal of the repository fails for these reasons, the
submodule status will not have been altered.
If this submodule has child-modules on its own, these will be deleted
prior to touching the own module.
:param force: Enforces the deletion of the module even though it contains
modifications. This basically enforces a brute-force file system based
deletion.
:param configuration: if True, the submodule is deleted from the configuration,
otherwise it isn't. Although this should be enabled most of the times,
this flag enables you to safely delete the repository of your submodule.
:param dry_run: if True, we will not actually do anything, but throw the errors
we would usually throw
:return: self
:note: doesn't work in bare repositories
:note: doesn't work atomically, as failure to remove any part of the submodule will leave
an inconsistent state
:raise InvalidGitRepositoryError: thrown if the repository cannot be deleted
:raise OSError: if directories or files could not be removed
|
def _isdst(dt):
"""Check if date is in dst.
"""
if type(dt) == datetime.date:
dt = datetime.datetime.combine(dt, datetime.datetime.min.time())
dtc = dt.replace(year=datetime.datetime.now().year)
if time.localtime(dtc.timestamp()).tm_isdst == 1:
return True
return False
|
Check if date is in dst.
|
def _connect(self):
"""Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
"""
def tryConnect():
self.connector = d = maybeDeferred(connect)
d.addCallback(cbConnect)
d.addErrback(ebConnect)
def connect():
endpoint = self._endpointFactory(self._reactor, self.host, self.port)
log.debug('%r: connecting with %s', self, endpoint)
return endpoint.connect(self)
def cbConnect(proto):
log.debug('%r: connected to %r', self, proto.transport.getPeer())
self._failures = 0
self.connector = None
self.proto = proto
if self._dDown:
proto.transport.loseConnection()
else:
self._sendQueued()
def ebConnect(fail):
if self._dDown:
log.debug('%r: breaking connect loop due to %r after close()', self, fail)
return fail
self._failures += 1
delay = self._retryPolicy(self._failures)
log.debug('%r: failure %d to connect -> %s; retry in %.2f seconds.',
self, self._failures, fail.value, delay)
self.connector = d = deferLater(self._reactor, delay, lambda: None)
d.addCallback(cbDelayed)
def cbDelayed(result):
tryConnect()
self._failures = 0
tryConnect()
|
Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
|
def send(self, pkt):
"""Send a packet"""
# Use the routing table to find the output interface
iff = pkt.route()[0]
if iff is None:
iff = conf.iface
# Assign the network interface to the BPF handle
if self.assigned_interface != iff:
try:
fcntl.ioctl(self.outs, BIOCSETIF, struct.pack("16s16x", iff.encode())) # noqa: E501
except IOError:
raise Scapy_Exception("BIOCSETIF failed on %s" % iff)
self.assigned_interface = iff
# Build the frame
frame = raw(self.guessed_cls() / pkt)
pkt.sent_time = time.time()
# Send the frame
L2bpfSocket.send(self, frame)
|
Send a packet
|
def _claim(cls, cdata: Any) -> "Tileset":
"""Return a new Tileset that owns the provided TCOD_Tileset* object."""
self = object.__new__(cls) # type: Tileset
if cdata == ffi.NULL:
raise RuntimeError("Tileset initialized with nullptr.")
self._tileset_p = ffi.gc(cdata, lib.TCOD_tileset_delete)
return self
|
Return a new Tileset that owns the provided TCOD_Tileset* object.
|
def _parse_triggered_hits(self, file_obj):
"""Parse and store triggered hits."""
for _ in range(self.n_triggered_hits):
dom_id, pmt_id = unpack('<ib', file_obj.read(5))
tdc_time = unpack('>I', file_obj.read(4))[0]
tot = unpack('<b', file_obj.read(1))[0]
trigger_mask = unpack('<Q', file_obj.read(8))
self.triggered_hits.append(
(dom_id, pmt_id, tdc_time, tot, trigger_mask)
)
|
Parse and store triggered hits.
|
def __fetch(self, url, payload):
"""Fetch requests from groupsio API"""
r = requests.get(url, params=payload, auth=self.auth, verify=self.verify)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise e
return r
|
Fetch requests from groupsio API
|
def getMaxStmIdForStm(stm):
"""
Get maximum _instId from all assigments in statement
"""
maxId = 0
if isinstance(stm, Assignment):
return stm._instId
elif isinstance(stm, WaitStm):
return maxId
else:
for _stm in stm._iter_stms():
maxId = max(maxId, getMaxStmIdForStm(_stm))
return maxId
|
Get maximum _instId from all assigments in statement
|
def connect_paragraph(self, paragraph, paragraphs):
""" Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
"""
if paragraph.depth > 0:
n = range(len(paragraphs))
n.reverse()
for i in n:
if paragraphs[i].depth == paragraph.depth-1:
paragraph.parent = paragraphs[i]
paragraphs[i].children.append(paragraph)
break
return paragraph
|
Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
|
def _fetch_cached_output(self, items, result):
"""
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
"""
if not appsettings.FLUENT_CONTENTS_CACHE_OUTPUT or not self.use_cached_output:
result.add_remaining_list(items)
return
for contentitem in items:
result.add_ordering(contentitem)
output = None
try:
plugin = contentitem.plugin
except PluginNotFound as ex:
result.store_exception(contentitem, ex) # Will deal with that later.
logger.debug("- item #%s has no matching plugin: %s", contentitem.pk, str(ex))
continue
# Respect the cache output setting of the plugin
if self.can_use_cached_output(contentitem):
result.add_plugin_timeout(plugin)
output = plugin.get_cached_output(result.placeholder_name, contentitem)
# Support transition to new output format.
if output is not None and not isinstance(output, ContentItemOutput):
output = None
logger.debug("Flushed cached output of {0}#{1} to store new ContentItemOutput format (key: {2})".format(
plugin.type_name,
contentitem.pk,
get_placeholder_name(contentitem.placeholder)
))
# For debugging, ignore cached values when the template is updated.
if output and settings.DEBUG:
cachekey = get_rendering_cache_key(result.placeholder_name, contentitem)
if is_template_updated(self.request, contentitem, cachekey):
output = None
if output:
result.store_output(contentitem, output)
else:
result.add_remaining(contentitem)
|
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
|
async def get_config(self):
"""Return the configuration settings for this model.
:returns: A ``dict`` mapping keys to `ConfigValue` instances,
which have `source` and `value` attributes.
"""
config_facade = client.ModelConfigFacade.from_connection(
self.connection()
)
result = await config_facade.ModelGet()
config = result.config
for key, value in config.items():
config[key] = ConfigValue.from_json(value)
return config
|
Return the configuration settings for this model.
:returns: A ``dict`` mapping keys to `ConfigValue` instances,
which have `source` and `value` attributes.
|
def _get_reciprocal(self):
"""Return the :class:`Arrow` that connects my origin and destination
in the opposite direction, if it exists.
"""
orign = self.portal['origin']
destn = self.portal['destination']
if (
destn in self.board.arrow and
orign in self.board.arrow[destn]
):
return self.board.arrow[destn][orign]
else:
return None
|
Return the :class:`Arrow` that connects my origin and destination
in the opposite direction, if it exists.
|
def smt_dataset(directory='data/',
train=False,
dev=False,
test=False,
train_filename='train.txt',
dev_filename='dev.txt',
test_filename='test.txt',
extracted_name='trees',
check_files=['trees/train.txt'],
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip',
fine_grained=False,
subtrees=False):
"""
Load the Stanford Sentiment Treebank dataset.
Semantic word spaces have been very useful but cannot express the meaning of longer phrases in
a principled way. Further progress towards understanding compositionality in tasks such as
sentiment detection requires richer supervised training and evaluation resources and more
powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes
fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and
presents new challenges for sentiment compositionality.
**Reference**:
https://nlp.stanford.edu/sentiment/index.html
**Citation:**
Richard Socher, Alex Perelygin, Jean Y. Wu, Jason Chuang, Christopher D. Manning,
Andrew Y. Ng and Christopher Potts. Recursive Deep Models for Semantic Compositionality Over a
Sentiment Treebank
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the development split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
dev_filename (str, optional): The filename of the development split.
test_filename (str, optional): The filename of the test split.
extracted_name (str, optional): Name of the extracted dataset directory.
check_files (str, optional): Check if these files exist, then this download was successful.
url (str, optional): URL of the dataset `tar.gz` file.
subtrees (bool, optional): Whether to include sentiment-tagged subphrases in addition to
complete examples.
fine_grained (bool, optional): Whether to use 5-class instead of 3-class labeling.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset`: Tuple with the training tokens, dev
tokens and test tokens in order if their respective boolean argument is true.
Example:
>>> from torchnlp.datasets import smt_dataset # doctest: +SKIP
>>> train = smt_dataset(train=True) # doctest: +SKIP
>>> train[5] # doctest: +SKIP
{
'text': "Whether or not you 're enlightened by any of Derrida 's lectures on ...",
'label': 'positive'
}
"""
download_file_maybe_extract(url=url, directory=directory, check_files=check_files)
ret = []
splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)]
splits = [f for (requested, f) in splits if requested]
for filename in splits:
full_path = os.path.join(directory, extracted_name, filename)
examples = []
with io.open(full_path, encoding='utf-8') as f:
for line in f:
line = line.strip()
if subtrees:
examples.extend(parse_tree(line, subtrees=subtrees, fine_grained=fine_grained))
else:
examples.append(parse_tree(line, subtrees=subtrees, fine_grained=fine_grained))
ret.append(Dataset(examples))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
|
Load the Stanford Sentiment Treebank dataset.
Semantic word spaces have been very useful but cannot express the meaning of longer phrases in
a principled way. Further progress towards understanding compositionality in tasks such as
sentiment detection requires richer supervised training and evaluation resources and more
powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes
fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and
presents new challenges for sentiment compositionality.
**Reference**:
https://nlp.stanford.edu/sentiment/index.html
**Citation:**
Richard Socher, Alex Perelygin, Jean Y. Wu, Jason Chuang, Christopher D. Manning,
Andrew Y. Ng and Christopher Potts. Recursive Deep Models for Semantic Compositionality Over a
Sentiment Treebank
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the development split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
dev_filename (str, optional): The filename of the development split.
test_filename (str, optional): The filename of the test split.
extracted_name (str, optional): Name of the extracted dataset directory.
check_files (str, optional): Check if these files exist, then this download was successful.
url (str, optional): URL of the dataset `tar.gz` file.
subtrees (bool, optional): Whether to include sentiment-tagged subphrases in addition to
complete examples.
fine_grained (bool, optional): Whether to use 5-class instead of 3-class labeling.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset`: Tuple with the training tokens, dev
tokens and test tokens in order if their respective boolean argument is true.
Example:
>>> from torchnlp.datasets import smt_dataset # doctest: +SKIP
>>> train = smt_dataset(train=True) # doctest: +SKIP
>>> train[5] # doctest: +SKIP
{
'text': "Whether or not you 're enlightened by any of Derrida 's lectures on ...",
'label': 'positive'
}
|
def fake_keypress(self, key, repeat=1):
"""
Fake a keypress
Usage: C{keyboard.fake_keypress(key, repeat=1)}
Uses XTest to 'fake' a keypress. This is useful to send keypresses to some
applications which won't respond to keyboard.send_key()
@param key: they key to be sent (e.g. "s" or "<enter>")
@param repeat: number of times to repeat the key event
"""
for _ in range(repeat):
self.mediator.fake_keypress(key)
|
Fake a keypress
Usage: C{keyboard.fake_keypress(key, repeat=1)}
Uses XTest to 'fake' a keypress. This is useful to send keypresses to some
applications which won't respond to keyboard.send_key()
@param key: they key to be sent (e.g. "s" or "<enter>")
@param repeat: number of times to repeat the key event
|
def format_python2_stmts(python_stmts, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
formats python2 statements
"""
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack': True, 'context': True, 'dups': True }
parsed = parse_python2(python_stmts, show_tokens=show_tokens,
parser_debug=parser_debug)
assert parsed == 'file_input', 'Should have parsed grammar start'
formatter = Python2Formatter()
if showast:
print(parsed)
# What we've been waiting for: Generate source from AST!
python2_formatted_str = formatter.traverse(parsed)
return python2_formatted_str
|
formats python2 statements
|
def from_resolver(cls, spec_resolver):
"""Creates a customized Draft4ExtendedValidator.
:param spec_resolver: resolver for the spec
:type resolver: :class:`jsonschema.RefResolver`
"""
spec_validators = cls._get_spec_validators(spec_resolver)
return validators.extend(Draft4Validator, spec_validators)
|
Creates a customized Draft4ExtendedValidator.
:param spec_resolver: resolver for the spec
:type resolver: :class:`jsonschema.RefResolver`
|
def matrix2map(data_matrix, map_shape):
r"""Matrix to Map
This method transforms a 2D matrix to a 2D map
Parameters
----------
data_matrix : np.ndarray
Input data matrix, 2D array
map_shape : tuple
2D shape of the output map
Returns
-------
np.ndarray 2D map
Raises
------
ValueError
For invalid layout
Examples
--------
>>> from modopt.base.transform import matrix2map
>>> a = np.array([[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14],
[3, 7, 11, 15]])
>>> matrix2map(a, (2, 2))
array([[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 12, 13],
[10, 11, 14, 15]])
"""
map_shape = np.array(map_shape)
# Get the shape and layout of the images
image_shape = np.sqrt(data_matrix.shape[0]).astype(int)
layout = np.array(map_shape // np.repeat(image_shape, 2), dtype='int')
# Map objects from matrix
data_map = np.zeros(map_shape)
temp = data_matrix.reshape(image_shape, image_shape, data_matrix.shape[1])
for i in range(data_matrix.shape[1]):
lower = (image_shape * (i // layout[1]),
image_shape * (i % layout[1]))
upper = (image_shape * (i // layout[1] + 1),
image_shape * (i % layout[1] + 1))
data_map[lower[0]:upper[0], lower[1]:upper[1]] = temp[:, :, i]
return data_map.astype(int)
|
r"""Matrix to Map
This method transforms a 2D matrix to a 2D map
Parameters
----------
data_matrix : np.ndarray
Input data matrix, 2D array
map_shape : tuple
2D shape of the output map
Returns
-------
np.ndarray 2D map
Raises
------
ValueError
For invalid layout
Examples
--------
>>> from modopt.base.transform import matrix2map
>>> a = np.array([[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14],
[3, 7, 11, 15]])
>>> matrix2map(a, (2, 2))
array([[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 12, 13],
[10, 11, 14, 15]])
|
def derationalize_denom(expr):
"""Try to de-rationalize the denominator of the given expression.
The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from
``sqrt(2)/2``.
Specifically, this matches `expr` against the following pattern::
Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...)
and returns a tuple ``(numerator, denom_sq, post_factor)``, where
``numerator`` and ``denom_sq`` are ``n`` and ``d`` in the above pattern (of
type `int`), respectively, and ``post_factor`` is the product of the
remaining factors (``...`` in `expr`). The result will fulfill the
following identity::
(numerator / sqrt(denom_sq)) * post_factor == expr
If `expr` does not follow the appropriate pattern, a :exc:`ValueError` is
raised.
"""
r_pos = -1
p_pos = -1
numerator = S.Zero
denom_sq = S.One
post_factors = []
if isinstance(expr, Mul):
for pos, factor in enumerate(expr.args):
if isinstance(factor, Rational) and r_pos < 0:
r_pos = pos
numerator, denom_sq = factor.p, factor.q
elif isinstance(factor, Pow) and r_pos >= 0:
if factor == sqrt(denom_sq):
p_pos = pos
else:
post_factors.append(factor)
else:
post_factors.append(factor)
if r_pos >= 0 and p_pos >= 0:
return numerator, denom_sq, Mul(*post_factors)
else:
raise ValueError("Cannot derationalize")
else:
raise ValueError("expr is not a Mul instance")
|
Try to de-rationalize the denominator of the given expression.
The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from
``sqrt(2)/2``.
Specifically, this matches `expr` against the following pattern::
Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...)
and returns a tuple ``(numerator, denom_sq, post_factor)``, where
``numerator`` and ``denom_sq`` are ``n`` and ``d`` in the above pattern (of
type `int`), respectively, and ``post_factor`` is the product of the
remaining factors (``...`` in `expr`). The result will fulfill the
following identity::
(numerator / sqrt(denom_sq)) * post_factor == expr
If `expr` does not follow the appropriate pattern, a :exc:`ValueError` is
raised.
|
def kill(self, signal=None):
"""
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.kill(self.id, signal=signal)
|
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms)
|
Updates the query line items to match the latest rule options.
|
def main(argv=None):
"""Parse passed in cooked single HTML."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('collated_html', type=argparse.FileType('r'),
help='Path to the collated html'
' file (use - for stdin)')
parser.add_argument('-d', '--dump-tree', action='store_true',
help='Print out parsed model tree.')
parser.add_argument('-o', '--output', type=argparse.FileType('w+'),
help='Write out epub of parsed tree.')
parser.add_argument('-i', '--input', type=argparse.FileType('r'),
help='Read and copy resources/ for output epub.')
args = parser.parse_args(argv)
if args.input and args.output == sys.stdout:
raise ValueError('Cannot output to stdout if reading resources')
from cnxepub.collation import reconstitute
binder = reconstitute(args.collated_html)
if args.dump_tree:
print(pformat(cnxepub.model_to_tree(binder)),
file=sys.stdout)
if args.output:
cnxepub.adapters.make_epub(binder, args.output)
if args.input:
args.output.seek(0)
zout = ZipFile(args.output, 'a', ZIP_DEFLATED)
zin = ZipFile(args.input, 'r')
for res in zin.namelist():
if res.startswith('resources'):
zres = zin.open(res)
zi = zin.getinfo(res)
zout.writestr(zi, zres.read(), ZIP_DEFLATED)
zout.close()
# TODO Check for documents that have no identifier.
# These should likely be composite-documents
# or the the metadata got wiped out.
# docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter)
# if x.ident_hash is None]
return 0
|
Parse passed in cooked single HTML.
|
def temperature_effectiveness_TEMA_H(R1, NTU1, Ntp, optimal=True):
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `optimal` option. The supported cases are as follows:
* One tube pass (tube fluid split into two streams individually mixed,
shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet
tube side next to inlet shell-side)
1-1 TEMA H, tube fluid split into two streams individually mixed, shell
fluid mixed:
.. math::
P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2)
A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\}
B = \frac{1-D}{1-R_1 D/2}
D = \exp[-NTU_1(1-R_1/2)/2]
E = (A + B - ABR_1/2)/2
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section:
.. math::
P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right]
B = (1+H)(1+E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
H = [1 - \exp(-2\beta)]/(4/R_1 -1)
E = [1 - \exp(-\beta)]/(4/R_1 - 1)
D = [1 - \exp(-\alpha)]/(4/R_1 + 1)
\alpha = NTU_1(4 + R_1)/8
\beta = NTU_1(4-R_1)/8
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section
but with the inlet tube stream coming in next to the shell fluid inlet
in an inefficient way (this is only shown in [2]_, and the stream 1/2
convention in it is different but converted here; P1 is still returned):
.. math::
P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right]
B = (1 + H)(1 + E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
D = \frac{1 - \exp(-\alpha)}{1 - 4R_2}
E = \frac{\exp(-\beta) - 1}{4R_2 +1}
H = \frac{\exp(-2\beta) - 1}{4R_2 +1}
\alpha = \frac{NTU_2}{8}(4R_2 -1)
\beta = \frac{NTU_2}{8}(4R_2 +1)
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, or 2, [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes greater than 1 or 2, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
0.5730728284905833
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
if Ntp == 1:
A = 1./(1 + R1/2.)*(1. - exp(-NTU1*(1. + R1/2.)/2.))
D = exp(-NTU1*(1. - R1/2.)/2.)
if R1 != 2:
B = (1. - D)/(1. - R1*D/2.)
else:
B = NTU1/(2. + NTU1)
E = (A + B - A*B*R1/2.)/2.
P1 = E*(1. + (1. - B*R1/2.)*(1. - A*R1/2. + A*B*R1)) - A*B*(1. - B*R1/2.)
elif Ntp == 2 and optimal:
alpha = NTU1*(4. + R1)/8.
beta = NTU1*(4. - R1)/8.
D = (1. - exp(-alpha))/(4./R1 + 1)
if R1 != 4:
E = (1. - exp(-beta))/(4./R1 - 1.)
H = (1. - exp(-2.*beta))/(4./R1 - 1.)
else:
E = NTU1/2.
H = NTU1
G = (1-D)**2*(D**2 + E**2) + D**2*(1+E)**2
B = (1. + H)*(1. + E)**2
P1 = 1./R1*(1. - (1. - D)**4/(B - 4.*G/R1))
elif Ntp == 2 and not optimal:
R1_orig = R1
#NTU2 = NTU1*R1_orig but we want to treat it as NTU1 in this case
NTU1 = NTU1*R1_orig # switch 1
# R2 = 1/R1 but we want to treat it as R1 in this case
R1 = 1./R1_orig # switch 2
beta = NTU1*(4.*R1 + 1)/8.
alpha = NTU1/8.*(4.*R1 - 1.)
H = (exp(-2.*beta) - 1.)/(4.*R1 + 1.)
E = (exp(-beta) - 1.)/(4.*R1 + 1.)
B = (1. + H)*(1. + E)**2
if R1 != 0.25:
D = (1. - exp(-alpha))/(1. - 4.*R1)
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
else:
D = -NTU1/8.
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
P1 = P1/R1_orig # switch 3, confirmed
else:
raise Exception('Supported numbers of tube passes are 1 and 2.')
return P1
|
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `optimal` option. The supported cases are as follows:
* One tube pass (tube fluid split into two streams individually mixed,
shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet
tube side next to inlet shell-side)
1-1 TEMA H, tube fluid split into two streams individually mixed, shell
fluid mixed:
.. math::
P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2)
A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\}
B = \frac{1-D}{1-R_1 D/2}
D = \exp[-NTU_1(1-R_1/2)/2]
E = (A + B - ABR_1/2)/2
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section:
.. math::
P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right]
B = (1+H)(1+E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
H = [1 - \exp(-2\beta)]/(4/R_1 -1)
E = [1 - \exp(-\beta)]/(4/R_1 - 1)
D = [1 - \exp(-\alpha)]/(4/R_1 + 1)
\alpha = NTU_1(4 + R_1)/8
\beta = NTU_1(4-R_1)/8
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section
but with the inlet tube stream coming in next to the shell fluid inlet
in an inefficient way (this is only shown in [2]_, and the stream 1/2
convention in it is different but converted here; P1 is still returned):
.. math::
P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right]
B = (1 + H)(1 + E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
D = \frac{1 - \exp(-\alpha)}{1 - 4R_2}
E = \frac{\exp(-\beta) - 1}{4R_2 +1}
H = \frac{\exp(-2\beta) - 1}{4R_2 +1}
\alpha = \frac{NTU_2}{8}(4R_2 -1)
\beta = \frac{NTU_2}{8}(4R_2 +1)
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, or 2, [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes greater than 1 or 2, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
0.5730728284905833
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
|
def YiqToRgb(y, i, q):
'''Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.YiqToRgb(0.592263, 0.458874, -0.0499818)
'(1, 0.5, 5.442e-07)'
'''
r = y + (i * 0.9562) + (q * 0.6210)
g = y - (i * 0.2717) - (q * 0.6485)
b = y - (i * 1.1053) + (q * 1.7020)
return (r, g, b)
|
Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.YiqToRgb(0.592263, 0.458874, -0.0499818)
'(1, 0.5, 5.442e-07)'
|
def _iterdump(self, file_name, headers=None):
"""
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame
"""
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.headers_normal['step_index_txt']
point_txt = self.headers_normal['data_point_txt']
cycle_txt = self.headers_normal['cycle_index_txt']
self.logger.debug("iterating through file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self.__get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.headers_normal['test_id_txt']]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no]
test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no]) # OBS
test_name = global_data_df[self.headers_global['test_name_txt']][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns.extend(headers)
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.headers_normal['test_id_txt'], test_ID)
sql_5 = "order by %s" % self.headers_normal['data_point_txt']
import time
info_list = []
info_header = ["cycle", "row_count", "start_point", "end_point"]
info_header.extend(headers)
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
start_point = normal_df[point_txt].min()
end_point = normal_df[point_txt].max()
last = normal_df.iloc[-1, :]
step_list = [cycle_number, row_count, start_point, end_point]
step_list.extend([last[x] for x in headers])
info_list.append(step_list)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict
|
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame
|
def parse(self, scope):
""" Parse Node
args:
scope (Scope): Scope object
raises:
SyntaxError
returns:
str
"""
assert (len(self.tokens) == 3)
expr = self.process(self.tokens, scope)
A, O, B = [
e[0] if isinstance(e, tuple) else e for e in expr
if str(e).strip()
]
try:
a, ua = utility.analyze_number(A, 'Illegal element in expression')
b, ub = utility.analyze_number(B, 'Illegal element in expression')
except SyntaxError:
return ' '.join([str(A), str(O), str(B)])
if (a is False or b is False):
return ' '.join([str(A), str(O), str(B)])
if ua == 'color' or ub == 'color':
return color.Color().process((A, O, B))
if a == 0 and O == '/':
# NOTE(saschpe): The ugliest but valid CSS since sliced bread: 'font: 0/1 a;'
return ''.join([str(A), str(O), str(B), ' '])
out = self.operate(a, b, O)
if isinstance(out, bool):
return out
return self.with_units(out, ua, ub)
|
Parse Node
args:
scope (Scope): Scope object
raises:
SyntaxError
returns:
str
|
def cli(obj, purge):
"""List alert suppressions."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/blackouts')
click.echo(json.dumps(r['blackouts'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'priority': 'P', 'environment': 'ENVIRONMENT', 'service': 'SERVICE', 'resource': 'RESOURCE',
'event': 'EVENT', 'group': 'GROUP', 'tags': 'TAGS', 'customer': 'CUSTOMER', 'startTime': 'START', 'endTime': 'END',
'duration': 'DURATION', 'user': 'USER', 'createTime': 'CREATED', 'text': 'COMMENT',
'status': 'STATUS', 'remaining': 'REMAINING'
}
blackouts = client.get_blackouts()
click.echo(tabulate([b.tabular(timezone) for b in blackouts], headers=headers, tablefmt=obj['output']))
expired = [b for b in blackouts if b.status == 'expired']
if purge:
with click.progressbar(expired, label='Purging {} blackouts'.format(len(expired))) as bar:
for b in bar:
client.delete_blackout(b.id)
|
List alert suppressions.
|
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
class SwaggerSchemaView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
renderer_classes = [
CoreJSONRenderer,
renderers.OpenAPIRenderer,
renderers.SwaggerUIRenderer
]
def get(self, request):
generator = SchemaGenerator(
title=title,
url=url,
patterns=patterns,
urlconf=urlconf
)
schema = generator.get_schema(request=request)
if not schema:
raise exceptions.ValidationError(
'The schema generator did not return a schema Document'
)
return Response(schema)
return SwaggerSchemaView.as_view()
|
Returns schema view which renders Swagger/OpenAPI.
|
def get_base_route(cls):
"""Returns the route base to use for the current class."""
base_route = cls.__name__.lower()
if cls.base_route is not None:
base_route = cls.base_route
base_rule = parse_rule(base_route)
cls.base_args = [r[2] for r in base_rule]
return base_route.strip("/")
|
Returns the route base to use for the current class.
|
def _trace_dispatch(frame, event, arg):
# type: (Any, str, Optional[Any]) -> None
"""
This is the main hook passed to setprofile().
It implement python profiler interface.
Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace
"""
# Bail if we're not tracing.
if not running:
return
# Get counter for this code object. Bail if we don't care about this function.
# An explicit None is stored in the table when we no longer care.
code = frame.f_code
key = id(code)
n = sampling_counters.get(key, 0)
if n is None:
return
if event == 'call':
# Bump counter and bail depending on sampling sequence.
sampling_counters[key] = n + 1
# Each function gets traced at most MAX_SAMPLES_PER_FUNC times per run.
# NOTE: There's a race condition if two threads call the same function.
# I don't think we should care, so what if it gets probed an extra time.
if n not in sampling_sequence:
if n > LAST_SAMPLE:
sampling_counters[key] = None # We're no longer interested in this function.
call_pending.discard(key) # Avoid getting events out of sync
return
# Mark that we are looking for a return from this code object.
call_pending.add(key)
elif event == 'return':
if key not in call_pending:
# No pending call event -- ignore this event. We only collect
# return events when we know the corresponding call event.
return
call_pending.discard(key) # Avoid race conditions
else:
# Ignore other events, such as c_call and c_return.
return
# Track calls under current directory only.
filename = _filter_filename(code.co_filename)
if filename:
func_name = get_function_name_from_frame(frame)
if not func_name or func_name[0] == '<':
# Could be a lambda or a comprehension; we're not interested.
sampling_counters[key] = None
else:
function_key = FunctionKey(filename, code.co_firstlineno, func_name)
if event == 'call':
# TODO(guido): Make this faster
arg_info = inspect.getargvalues(frame) # type: ArgInfo
resolved_types = prep_args(arg_info)
_task_queue.put(KeyAndTypes(function_key, resolved_types))
elif event == 'return':
# This event is also triggered if a function yields or raises an exception.
# We can tell the difference by looking at the bytecode.
# (We don't get here for C functions so the bytecode always exists.)
last_opcode = code.co_code[frame.f_lasti]
if last_opcode == RETURN_VALUE_OPCODE:
if code.co_flags & CO_GENERATOR:
# Return from a generator.
t = resolve_type(FakeIterator([]))
else:
t = resolve_type(arg)
elif last_opcode == YIELD_VALUE_OPCODE:
# Yield from a generator.
# TODO: Unify generators -- currently each YIELD is turned into
# a separate call, so a function yielding ints and strs will be
# typed as Union[Iterator[int], Iterator[str]] -- this should be
# Iterator[Union[int, str]].
t = resolve_type(FakeIterator([arg]))
else:
# This branch is also taken when returning from a generator.
# TODO: returning non-trivial values from generators, per PEP 380;
# and async def / await stuff.
t = NoReturnType
_task_queue.put(KeyAndReturn(function_key, t))
else:
sampling_counters[key] = None
|
This is the main hook passed to setprofile().
It implement python profiler interface.
Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace
|
def wait(self, time):
"""Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
"""
self._wait = Event()
return not self._wait.wait(time)
|
Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
|
def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_data(url)
for value in response['values']:
yield value
url = response.get('next', None)
|
Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection.
|
def postprocess_segments(self):
"""Convert the format of the segment class members."""
# make segs a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for iseg, seg in enumerate(self.segs):
mask = np.zeros(self._adata.shape[0], dtype=bool)
mask[seg] = True
self.segs[iseg] = mask
# convert to arrays
self.segs = np.array(self.segs)
self.segs_tips = np.array(self.segs_tips)
|
Convert the format of the segment class members.
|
def solve(self, scenario, solver):
"""
Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks
"""
clusters = set(self.clustering.busmap.values)
n = len(clusters)
self.stats = {'clusters': pd.DataFrame(
index=sorted(clusters),
columns=["decompose", "spread", "transfer"])}
profile = cProfile.Profile()
for i, cluster in enumerate(sorted(clusters)):
print('---')
print('Decompose cluster %s (%d/%d)' % (cluster, i+1, n))
profile.enable()
t = time.time()
partial_network, externals = self.construct_partial_network(
cluster,
scenario)
profile.disable()
self.stats['clusters'].loc[cluster, 'decompose'] = time.time() - t
print('Decomposed in ',
self.stats['clusters'].loc[cluster, 'decompose'])
t = time.time()
profile.enable()
self.solve_partial_network(cluster, partial_network, scenario,
solver)
profile.disable()
self.stats['clusters'].loc[cluster, 'spread'] = time.time() - t
print('Result distributed in ',
self.stats['clusters'].loc[cluster, 'spread'])
profile.enable()
t = time.time()
self.transfer_results(partial_network, externals)
profile.disable()
self.stats['clusters'].loc[cluster, 'transfer'] = time.time() - t
print('Results transferred in ',
self.stats['clusters'].loc[cluster, 'transfer'])
profile.enable()
t = time.time()
print('---')
fs = (mc("sum"), mc("sum"))
for bt, ts in (
('generators', {'p': fs, 'q': fs}),
('storage_units', {'p': fs, 'state_of_charge': fs, 'q': fs})):
print("Attribute sums, {}, clustered - disaggregated:" .format(bt))
cnb = getattr(self.clustered_network, bt)
onb = getattr(self.original_network, bt)
print("{:>{}}: {}".format('p_nom_opt', 4 + len('state_of_charge'),
reduce(lambda x, f: f(x), fs[:-1], cnb['p_nom_opt'])
-
reduce(lambda x, f: f(x), fs[:-1], onb['p_nom_opt'])))
print("Series sums, {}, clustered - disaggregated:" .format(bt))
cnb = getattr(self.clustered_network, bt + '_t')
onb = getattr(self.original_network, bt + '_t')
for s in ts:
print("{:>{}}: {}".format(s, 4 + len('state_of_charge'),
reduce(lambda x, f: f(x), ts[s], cnb[s])
-
reduce(lambda x, f: f(x), ts[s], onb[s])))
profile.disable()
self.stats['check'] = time.time() - t
print('Checks computed in ', self.stats['check'])
|
Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks
|
def stop(self):
"""Stop the server.
Do nothing if server is already not running.
"""
if self.is_run:
self._service.shutdown()
self._service.server_close()
|
Stop the server.
Do nothing if server is already not running.
|
def regressOut(Y, X, return_b=False):
"""
regresses out X from Y
"""
Xd = la.pinv(X)
b = Xd.dot(Y)
Y_out = Y-X.dot(b)
if return_b:
return Y_out, b
else:
return Y_out
|
regresses out X from Y
|
def Transformer(source_vocab_size,
target_vocab_size,
mode='train',
num_layers=6,
feature_depth=512,
feedforward_depth=2048,
num_heads=8,
dropout=0.1,
shared_embedding=True,
max_len=200,
return_evals=False):
"""Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
shared_embedding: bool: specify whether source/target embeddings are tied.
max_len: int: maximum symbol length for positional encoding
return_evals: bool: whether to generate decode-time evaluation functions
Returns:
A namedtuple containing model 'init' and 'apply' functions for training and
the 'evals' functions that itself returns a namedtuple containing evaluation
functions for the trained encoder, decoder, and generator substax.
"""
# Input embedding and positional encoding
inject_position = layers.Serial(
layers.Dropout(dropout, mode=mode),
layers.PositionalEncoding(feature_depth, max_len=max_len)
)
if shared_embedding:
assert source_vocab_size == target_vocab_size
# Weight-shared Embedding
embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size))
source_embedding_layer = layers.Serial(embedding, inject_position)
target_embedding_layer = source_embedding_layer
else:
source_embedding = layers.Embedding(feature_depth, source_vocab_size)
target_embedding = layers.Embedding(feature_depth, target_vocab_size)
source_embedding_layer = layers.Serial(source_embedding, inject_position)
target_embedding_layer = layers.Serial(target_embedding, inject_position)
# Multi-headed Attention and Feed-forward layers
multi_attention = layers.MultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout, mode=mode)
# Encoder
@layers.Lambda
def Encoder(source, source_mask):
"""Transformer encoder stack.
Args:
source: layer variable: raw source sequences
source_mask: layer variable: self-attention mask
Returns:
Layer variable that outputs encoded source.
"""
encoder_layer = layers.Serial(
# input attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
source_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode),
)
return layers.Serial(
source,
source_embedding_layer,
layers.repeat(encoder_layer, num_layers),
layers.LayerNorm(),
)
# Decoder
@layers.Lambda
def Decoder(memory, target, target_mask, memory_mask):
"""Transformer decoder stack.
Args:
memory: layer variable: encoded source sequences
target: layer variable: raw target sequences
target_mask: layer variable: self-attention mask
memory_mask: layer variable: memory attention mask
Returns:
Layer variable that outputs encoded source.
"""
decoder_layer = layers.Serial(
# target attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
target_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# target attends to encoded source
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
memory, # key
memory, # value
memory_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode)
)
return layers.Serial(
target,
target_embedding_layer,
layers.repeat(decoder_layer, num_layers),
layers.LayerNorm(),
)
# The Transformer
@layers.Lambda
def transformer(source, target, source_mask, target_mask, memory_mask): # pylint: disable=invalid-name
encoded_source = Encoder(source, source_mask)
return Decoder(encoded_source, target, target_mask, memory_mask)
# Finally, bind the generator transform to use later for inference.
@layers.Lambda
def Generator(encoded_target):
return layers.Serial(
encoded_target,
layers.Dense(target_vocab_size),
layers.LogSoftmax
)
# Model-Building and Evaluation Functions
# Get entire model's the layer pair
top_init, top_apply = Generator(transformer)
# By default act as a normal constructor and emit an (init, apply) pair.
if not return_evals:
return (top_init, top_apply)
else:
raise ValueError('inference in this model is still a work in progress')
|
Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
shared_embedding: bool: specify whether source/target embeddings are tied.
max_len: int: maximum symbol length for positional encoding
return_evals: bool: whether to generate decode-time evaluation functions
Returns:
A namedtuple containing model 'init' and 'apply' functions for training and
the 'evals' functions that itself returns a namedtuple containing evaluation
functions for the trained encoder, decoder, and generator substax.
|
def _default_hparams():
"""A set of basic model hyperparameters."""
return hparam.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from features to a space compatible with
# chosen model architecture. It comprises key-value pairs of a feature
# name (str) and its modality type.
modality={},
vocab_size={},
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
|
A set of basic model hyperparameters.
|
def generate_dict_schema(size, valid):
""" Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
"""
schema = {}
generator_items = []
# Generate schema
for i in range(0, size):
while True:
key_schema, key_generator = generate_random_schema(valid)
if key_schema not in schema:
break
value_schema, value_generator = generate_random_schema(valid)
schema[key_schema] = value_schema
generator_items.append((key_generator, value_generator))
# Samples
generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count())
# Finish
return schema, generator
|
Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
|
def before(point):
""" True if point datetime specification is before now.
NOTE: If point is specified it is supposed to be in local time.
Not UTC/GMT !! This is because that is what gmtime() expects.
"""
if not point:
return True
if isinstance(point, six.string_types):
point = str_to_time(point)
elif isinstance(point, int):
point = time.gmtime(point)
return time.gmtime() <= point
|
True if point datetime specification is before now.
NOTE: If point is specified it is supposed to be in local time.
Not UTC/GMT !! This is because that is what gmtime() expects.
|
def reminder_validator(input_str):
"""
Allows a string that matches utils.REMINDER_REGEX.
Raises ValidationError otherwise.
"""
match = re.match(REMINDER_REGEX, input_str)
if match or input_str == '.':
return input_str
else:
raise ValidationError('Expected format: <number><w|d|h|m> '
'<popup|email|sms>. (Ctrl-C to exit)\n')
|
Allows a string that matches utils.REMINDER_REGEX.
Raises ValidationError otherwise.
|
def crypto_config_from_table_info(materials_provider, attribute_actions, table_info):
"""Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
ec_kwargs = table_info.encryption_context_values
if table_info.primary_index is not None:
ec_kwargs.update(
{"partition_key_name": table_info.primary_index.partition, "sort_key_name": table_info.primary_index.sort}
)
return CryptoConfig(
materials_provider=materials_provider,
encryption_context=EncryptionContext(**ec_kwargs),
attribute_actions=attribute_actions,
)
|
Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
|
def trim_snapshots(self, hourly_backups = 8, daily_backups = 7,
weekly_backups = 4):
"""
Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
move back in time.
If ebs volumes have a 'Name' tag with a value, their snapshots
will be assigned the same tag when they are created. The values
of the 'Name' tags for snapshots are used by this function to
group snapshots taken from the same volume (or from a series
of like-named volumes over time) for trimming.
For every group of like-named snapshots, this function retains
the newest and oldest snapshots, as well as, by default, the
first snapshots taken in each of the last eight hours, the first
snapshots taken in each of the last seven days, the first snapshots
taken in the last 4 weeks (counting Midnight Sunday morning as
the start of the week), and the first snapshot from the first
Sunday of each month forever.
:type hourly_backups: int
:param hourly_backups: How many recent hourly backups should be saved.
:type daily_backups: int
:param daily_backups: How many recent daily backups should be saved.
:type weekly_backups: int
:param weekly_backups: How many recent weekly backups should be saved.
"""
# This function first builds up an ordered list of target times
# that snapshots should be saved for (last 8 hours, last 7 days, etc.).
# Then a map of snapshots is constructed, with the keys being
# the snapshot / volume names and the values being arrays of
# chronologically sorted snapshots.
# Finally, for each array in the map, we go through the snapshot
# array and the target time array in an interleaved fashion,
# deleting snapshots whose start_times don't immediately follow a
# target time (we delete a snapshot if there's another snapshot
# that was made closer to the preceding target time).
now = datetime.utcnow()
last_hour = datetime(now.year, now.month, now.day, now.hour)
last_midnight = datetime(now.year, now.month, now.day)
last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7)
start_of_month = datetime(now.year, now.month, 1)
target_backup_times = []
# there are no snapshots older than 1/1/2007
oldest_snapshot_date = datetime(2007, 1, 1)
for hour in range(0, hourly_backups):
target_backup_times.append(last_hour - timedelta(hours = hour))
for day in range(0, daily_backups):
target_backup_times.append(last_midnight - timedelta(days = day))
for week in range(0, weekly_backups):
target_backup_times.append(last_sunday - timedelta(weeks = week))
one_day = timedelta(days = 1)
while start_of_month > oldest_snapshot_date:
# append the start of the month to the list of
# snapshot dates to save:
target_backup_times.append(start_of_month)
# there's no timedelta setting for one month, so instead:
# decrement the day by one, so we go to the final day of
# the previous month...
start_of_month -= one_day
# ... and then go to the first day of that previous month:
start_of_month = datetime(start_of_month.year,
start_of_month.month, 1)
temp = []
for t in target_backup_times:
if temp.__contains__(t) == False:
temp.append(t)
target_backup_times = temp
# make the oldeest dates first, and make sure the month start
# and last four week's start are in the proper order
target_backup_times.sort()
# get all the snapshots, sort them by date and time, and
# organize them into one array for each volume:
all_snapshots = self.get_all_snapshots(owner = 'self')
all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time))
snaps_for_each_volume = {}
for snap in all_snapshots:
# the snapshot name and the volume name are the same.
# The snapshot name is set from the volume
# name at the time the snapshot is taken
volume_name = snap.tags.get('Name')
if volume_name:
# only examine snapshots that have a volume name
snaps_for_volume = snaps_for_each_volume.get(volume_name)
if not snaps_for_volume:
snaps_for_volume = []
snaps_for_each_volume[volume_name] = snaps_for_volume
snaps_for_volume.append(snap)
# Do a running comparison of snapshot dates to desired time
#periods, keeping the oldest snapshot in each
# time period and deleting the rest:
for volume_name in snaps_for_each_volume:
snaps = snaps_for_each_volume[volume_name]
snaps = snaps[:-1] # never delete the newest snapshot
time_period_number = 0
snap_found_for_this_time_period = False
for snap in snaps:
check_this_snap = True
while check_this_snap and time_period_number < target_backup_times.__len__():
snap_date = datetime.strptime(snap.start_time,
'%Y-%m-%dT%H:%M:%S.000Z')
if snap_date < target_backup_times[time_period_number]:
# the snap date is before the cutoff date.
# Figure out if it's the first snap in this
# date range and act accordingly (since both
#date the date ranges and the snapshots
# are sorted chronologically, we know this
#snapshot isn't in an earlier date range):
if snap_found_for_this_time_period == True:
if not snap.tags.get('preserve_snapshot'):
# as long as the snapshot wasn't marked
# with the 'preserve_snapshot' tag, delete it:
try:
self.delete_snapshot(snap.id)
boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time))
except EC2ResponseError:
boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time))
# go on and look at the next snapshot,
#leaving the time period alone
else:
# this was the first snapshot found for this
#time period. Leave it alone and look at the
# next snapshot:
snap_found_for_this_time_period = True
check_this_snap = False
else:
# the snap is after the cutoff date. Check it
# against the next cutoff date
time_period_number += 1
snap_found_for_this_time_period = False
|
Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
move back in time.
If ebs volumes have a 'Name' tag with a value, their snapshots
will be assigned the same tag when they are created. The values
of the 'Name' tags for snapshots are used by this function to
group snapshots taken from the same volume (or from a series
of like-named volumes over time) for trimming.
For every group of like-named snapshots, this function retains
the newest and oldest snapshots, as well as, by default, the
first snapshots taken in each of the last eight hours, the first
snapshots taken in each of the last seven days, the first snapshots
taken in the last 4 weeks (counting Midnight Sunday morning as
the start of the week), and the first snapshot from the first
Sunday of each month forever.
:type hourly_backups: int
:param hourly_backups: How many recent hourly backups should be saved.
:type daily_backups: int
:param daily_backups: How many recent daily backups should be saved.
:type weekly_backups: int
:param weekly_backups: How many recent weekly backups should be saved.
|
def extract_pool_attr(cls, req):
""" Extract pool attributes from arbitary dict.
"""
attr = {}
if 'id' in req:
attr['id'] = int(req['id'])
if 'name' in req:
attr['name'] = req['name']
if 'description' in req:
attr['description'] = req['description']
if 'default_type' in req:
attr['default_type'] = req['default_type']
if 'ipv4_default_prefix_length' in req:
attr['ipv4_default_prefix_length'] = int(req['ipv4_default_prefix_length'])
if 'ipv6_default_prefix_length' in req:
attr['ipv6_default_prefix_length'] = int(req['ipv6_default_prefix_length'])
return attr
|
Extract pool attributes from arbitary dict.
|
def delete_report(report):
"""Delete report(s), supports globbing.
"""
for path in glob.glob(os.path.join(_get_reports_path(), report)):
shutil.rmtree(path)
|
Delete report(s), supports globbing.
|
def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = np.size(aryMdlParamsChnk, axis=0)
# Determine number of motion directions
varNumMtnDrtn = aryBoxCar.shape[2]
# Output array with pRF model time courses:
aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol])
# Loop through different motion directions:
for idxMtn in range(0, varNumMtnDrtn):
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crtGauss2D(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :],
aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# pRF time course model (i.e. not yet scaled for size of the pRF).
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp
# Put column with the indicies of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut)
|
Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
|
def list_metrics():
"""List metrics available."""
for name, operator in ALL_OPERATORS.items():
print(f"{name} operator:")
if len(operator.cls.metrics) > 0:
print(
tabulate.tabulate(
headers=("Name", "Description", "Type"),
tabular_data=operator.cls.metrics,
tablefmt=DEFAULT_GRID_STYLE,
)
)
|
List metrics available.
|
def search_variants(
self, variant_set_id, start=None, end=None, reference_name=None,
call_set_ids=None):
"""
Returns an iterator over the Variants fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: The ID of the
:class:`ga4gh.protocol.VariantSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:param list call_set_ids: Only return variant calls which belong to
call sets with these IDs. If an empty array, returns variants
without any call objects. If null, returns all variant calls.
:return: An iterator over the :class:`ga4gh.protocol.Variant` objects
defined by the query parameters.
:rtype: iter
"""
request = protocol.SearchVariantsRequest()
request.reference_name = pb.string(reference_name)
request.start = pb.int(start)
request.end = pb.int(end)
request.variant_set_id = variant_set_id
request.call_set_ids.extend(pb.string(call_set_ids))
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variants", protocol.SearchVariantsResponse)
|
Returns an iterator over the Variants fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: The ID of the
:class:`ga4gh.protocol.VariantSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:param list call_set_ids: Only return variant calls which belong to
call sets with these IDs. If an empty array, returns variants
without any call objects. If null, returns all variant calls.
:return: An iterator over the :class:`ga4gh.protocol.Variant` objects
defined by the query parameters.
:rtype: iter
|
def get_slide_vars(self, slide_src, source=None):
""" Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents.
"""
presenter_notes = None
find = re.search(r'<h\d[^>]*>presenter notes</h\d>', slide_src,
re.DOTALL | re.UNICODE | re.IGNORECASE)
if find:
if self.presenter_notes:
presenter_notes = slide_src[find.end():].strip()
slide_src = slide_src[:find.start()]
find = re.search(r'(<h(\d+?).*?>(.+?)</h\d>)\s?(.+)?', slide_src,
re.DOTALL | re.UNICODE)
if not find:
header = level = title = None
content = slide_src.strip()
else:
header = find.group(1)
level = int(find.group(2))
title = find.group(3)
content = find.group(4).strip() if find.group(4) else find.group(4)
slide_classes = []
if header:
header, _ = self.process_macros(header, source)
if content:
content, slide_classes = self.process_macros(content, source)
source_dict = {}
if source:
source_dict = {'rel_path': source,
'abs_path': os.path.abspath(source)}
if header or content:
return {'header': header, 'title': title, 'level': level,
'content': content, 'classes': slide_classes,
'source': source_dict, 'presenter_notes': presenter_notes,
'math_output': self.math_output}
|
Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents.
|
def _correctArtefacts(self, image, threshold):
'''
Apply a thresholded median replacing high gradients
and values beyond the boundaries
'''
image = np.nan_to_num(image)
medianThreshold(image, threshold, copy=False)
return image
|
Apply a thresholded median replacing high gradients
and values beyond the boundaries
|
def autodiscover():
"""
Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator.
"""
global LOADED
if LOADED:
return
LOADED = True
for app in get_app_name_list():
try:
module = import_module(app)
except ImportError:
pass
else:
try:
import_module("%s.page_processors" % app)
except:
if module_has_submodule(module, "page_processors"):
raise
|
Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator.
|
def unique(func, num_args=0, max_attempts=100, cache=None):
"""
wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False
"""
if cache is None:
cache = _cache_unique
@wraps(func)
def wrapper(*args):
key = "%s_%s" % (str(func.__name__), str(args[:num_args]))
attempt = 0
while attempt < max_attempts:
attempt += 1
drawn = cache.get(key, [])
result = func(*args)
if result not in drawn:
drawn.append(result)
cache[key] = drawn
return result
raise MaxAttemptException()
return wrapper
|
wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False
|
def drain_events(self, timeout=None):
"""Wait for an event on a channel."""
chanmap = self.channels
chanid, method_sig, args, content = self._wait_multiple(
chanmap, None, timeout=timeout,
)
channel = chanmap[chanid]
if (content and
channel.auto_encode_decode and
hasattr(content, 'content_encoding')):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = (self._method_override.get(method_sig) or
channel._METHOD_MAP.get(method_sig, None))
if amqp_method is None:
raise AMQPNotImplementedError(
'Unknown AMQP method {0!r}'.format(method_sig))
if content is None:
return amqp_method(channel, args)
else:
return amqp_method(channel, args, content)
|
Wait for an event on a channel.
|
def _read_fasta_files(f, args):
""" read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * :code:`seq_l`: is a list of seq_obj objects, containing
the information of each sequence
* :code:`sample_l`: is a list with the name of the samples
(column two of the config file)
"""
seq_l = {}
sample_l = []
idx = 1
for line1 in f:
line1 = line1.strip()
cols = line1.split("\t")
with open(cols[0], 'r') as fasta:
sample_l.append(cols[1])
for line in fasta:
if line.startswith(">"):
idx += 1
counts = int(re.search("x([0-9]+)", line.strip()).group(1))
else:
seq = line.strip()
seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq
if counts > int(args.minc) and len(seq) > int(args.minl):
if seq not in seq_l:
seq_l[seq] = sequence_unique(idx, seq)
seq_l[seq].add_exp(cols[1], counts)
return seq_l, sample_l
|
read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * :code:`seq_l`: is a list of seq_obj objects, containing
the information of each sequence
* :code:`sample_l`: is a list with the name of the samples
(column two of the config file)
|
def parse_boolargs(self, args):
"""Returns an array populated by given values, with the indices of
those values dependent on given boolen tests on self.
The given `args` should be a list of tuples, with the first element the
return value and the second argument a string that evaluates to either
True or False for each element in self.
Each boolean argument is evaluated on elements for which every prior
boolean argument was False. For example, if array `foo` has a field
`bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`,
then the returned array will have `1`s at the indices for
which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`,
and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`.
The last argument in the list may have "else", an empty string, None,
or simply list a return value. In any of these cases, any element not
yet populated will be assigned the last return value.
Parameters
----------
args : {(list of) tuples, value}
One or more return values and boolean argument determining where
they should go.
Returns
-------
return_values : array
An array with length equal to self, with values populated with the
return values.
leftover_indices : array
An array of indices that evaluated to False for all arguments.
These indices will not have been popluated with any value,
defaulting to whatever numpy uses for a zero for the return
values' dtype. If there are no leftovers, an empty array is
returned.
Examples
--------
Given the following array:
>>> arr = FieldArray(5, dtype=[('mtotal', float)])
>>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.])
Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the
elements 1 and 4 are leftover):
>>> arr.parse_boolargs(('TaylorF2', 'mtotal<4'))
(array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''],
dtype='|S8'),
array([1, 4]))
Return `"TaylorF2"` for all elements with `mtotal < 4`,
`"SEOBNR_ROM_DoubleSpin"` otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')])
(array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
The following will also return the same:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"`
for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"`
otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
(array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
Just return `"TaylorF2"` for all elements:
>>> arr.parse_boolargs('TaylorF2')
(array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'],
dtype='|S8'),
array([], dtype=int64))
"""
if not isinstance(args, list):
args = [args]
# format the arguments
return_vals = []
bool_args = []
for arg in args:
if not isinstance(arg, tuple):
return_val = arg
bool_arg = None
elif len(arg) == 1:
return_val = arg[0]
bool_arg = None
elif len(arg) == 2:
return_val, bool_arg = arg
else:
raise ValueError("argument not formatted correctly")
return_vals.append(return_val)
bool_args.append(bool_arg)
# get the output dtype
outdtype = numpy.array(return_vals).dtype
out = numpy.zeros(self.size, dtype=outdtype)
mask = numpy.zeros(self.size, dtype=bool)
leftovers = numpy.ones(self.size, dtype=bool)
for ii,(boolarg,val) in enumerate(zip(bool_args, return_vals)):
if boolarg is None or boolarg == '' or boolarg.lower() == 'else':
if ii+1 != len(bool_args):
raise ValueError("only the last item may not provide "
"any boolean arguments")
mask = leftovers
else:
mask = leftovers & self[boolarg]
out[mask] = val
leftovers &= ~mask
return out, numpy.where(leftovers)[0]
|
Returns an array populated by given values, with the indices of
those values dependent on given boolen tests on self.
The given `args` should be a list of tuples, with the first element the
return value and the second argument a string that evaluates to either
True or False for each element in self.
Each boolean argument is evaluated on elements for which every prior
boolean argument was False. For example, if array `foo` has a field
`bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`,
then the returned array will have `1`s at the indices for
which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`,
and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`.
The last argument in the list may have "else", an empty string, None,
or simply list a return value. In any of these cases, any element not
yet populated will be assigned the last return value.
Parameters
----------
args : {(list of) tuples, value}
One or more return values and boolean argument determining where
they should go.
Returns
-------
return_values : array
An array with length equal to self, with values populated with the
return values.
leftover_indices : array
An array of indices that evaluated to False for all arguments.
These indices will not have been popluated with any value,
defaulting to whatever numpy uses for a zero for the return
values' dtype. If there are no leftovers, an empty array is
returned.
Examples
--------
Given the following array:
>>> arr = FieldArray(5, dtype=[('mtotal', float)])
>>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.])
Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the
elements 1 and 4 are leftover):
>>> arr.parse_boolargs(('TaylorF2', 'mtotal<4'))
(array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''],
dtype='|S8'),
array([1, 4]))
Return `"TaylorF2"` for all elements with `mtotal < 4`,
`"SEOBNR_ROM_DoubleSpin"` otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')])
(array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
The following will also return the same:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"`
for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"`
otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
(array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
Just return `"TaylorF2"` for all elements:
>>> arr.parse_boolargs('TaylorF2')
(array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'],
dtype='|S8'),
array([], dtype=int64))
|
def set_nsxcontroller_ip(self, **kwargs):
"""
Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
"""
name = kwargs.pop('name')
ip_addr = str((kwargs.pop('ip_addr', None)))
nsxipaddress = ip_interface(unicode(ip_addr))
if nsxipaddress.version != 4:
raise ValueError('NSX Controller ip must be IPV4')
ip_args = dict(name=name, address=ip_addr)
method_name = 'nsx_controller_connection_addr_address'
method_class = self._brocade_tunnels
nsxcontroller_attr = getattr(method_class, method_name)
config = nsxcontroller_attr(**ip_args)
output = self._callback(config)
return output
|
Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
|
def getAddPerson(self):
"""
Return an L{AddPersonFragment} which is a child of this fragment and
which will add a person to C{self.organizer}.
"""
fragment = AddPersonFragment(self.organizer)
fragment.setFragmentParent(self)
return fragment
|
Return an L{AddPersonFragment} which is a child of this fragment and
which will add a person to C{self.organizer}.
|
def assets(lon=None, lat=None, begin=None, end=None):
'''
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/assets
QUERY PARAMETERS
Parameter Type Default Description
lat float n/a Latitude
lon float n/a Longitude
begin YYYY-MM-DD n/a beginning of date range
end YYYY-MM-DD today end of date range
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/assets?lon=100.75&lat=1.5&begin=2014-02-01&api_key=DEMO_KEY
'''
base_url = "https://api.nasa.gov/planetary/earth/assets?"
if not lon or not lat:
raise ValueError(
"assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5")
else:
try:
validate_float(lon, lat)
# Floats are entered/displayed as decimal numbers, but your computer
# (in fact, your standard C library) stores them as binary.
# You get some side effects from this transition:
# >>> print len(repr(0.1))
# 19
# >>> print repr(0.1)
# 0.10000000000000001
# Thus using decimal to str transition is more reliant
lon = decimal.Decimal(lon)
lat = decimal.Decimal(lat)
base_url += "lon=" + str(lon) + "&" + "lat=" + str(lat) + "&"
except:
raise ValueError(
"assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5")
if not begin:
raise ValueError(
"Begin date is missing, which is mandatory. Format : YYYY-MM-DD")
else:
try:
vali_date(begin)
base_url += "begin=" + begin + "&"
except:
raise ValueError("Incorrect date format, should be YYYY-MM-DD")
if end:
try:
vali_date(end)
base_url += "end=" + end + "&"
except:
raise ValueError("Incorrect date format, should be YYYY-MM-DD")
req_url = base_url + "api_key=" + nasa_api_key()
return dispatch_http_get(req_url)
|
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/assets
QUERY PARAMETERS
Parameter Type Default Description
lat float n/a Latitude
lon float n/a Longitude
begin YYYY-MM-DD n/a beginning of date range
end YYYY-MM-DD today end of date range
api_key string DEMO_KEY api.nasa.gov key for expanded usage
EXAMPLE QUERY
https://api.nasa.gov/planetary/earth/assets?lon=100.75&lat=1.5&begin=2014-02-01&api_key=DEMO_KEY
|
def visit_BinaryOperation(self, node):
"""Visitor for `BinaryOperation` AST node."""
self.visit(node.left)
self.visit(node.right)
|
Visitor for `BinaryOperation` AST node.
|
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive
|
Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
|
def new_job(frontier, job_conf):
'''Returns new Job.'''
validate_conf(job_conf)
job = Job(frontier.rr, {
"conf": job_conf, "status": "ACTIVE",
"started": doublethink.utcnow()})
if "id" in job_conf:
job.id = job_conf["id"]
if "max_claimed_sites" in job_conf:
job.max_claimed_sites = job_conf["max_claimed_sites"]
job.save()
sites = []
pages = []
for seed_conf in job_conf["seeds"]:
merged_conf = merge(seed_conf, job_conf)
merged_conf.pop("seeds")
merged_conf["job_id"] = job.id
merged_conf["seed"] = merged_conf.pop("url")
site = brozzler.Site(frontier.rr, merged_conf)
site.id = str(uuid.uuid4())
sites.append(site)
pages.append(new_seed_page(frontier, site))
# insert in batches to avoid this error
# rethinkdb.errors.ReqlDriverError: Query size (167883036) greater than maximum (134217727) in:
for batch in (pages[i:i+500] for i in range(0, len(pages), 500)):
logging.info('inserting batch of %s pages', len(batch))
result = frontier.rr.table('pages').insert(batch).run()
for batch in (sites[i:i+100] for i in range(0, len(sites), 100)):
logging.info('inserting batch of %s sites', len(batch))
result = frontier.rr.table('sites').insert(batch).run()
logging.info('job %s fully started', job.id)
return job
|
Returns new Job.
|
def run_unlock(device_type, args):
"""Unlock hardware device (for future interaction)."""
util.setup_logging(verbosity=args.verbose)
with device_type() as d:
log.info('unlocked %s device', d)
|
Unlock hardware device (for future interaction).
|
def get_metric_parsers(metric_packages=tuple(), include_defaults=True):
"""Gets all of the metric parsers.
Args:
metric_packages - Defaults to no extra packages. An iterable of
metric containing packages. A metric inherits DiffParserBase
and does not have __metric__ = False
A metric package must be imported using import a.b.c
include_defaults - Whether to include the generic metric parsers
"""
metric_parsers = set()
if include_defaults:
import git_code_debt.metrics
metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls))
for metric_package in metric_packages:
metric_parsers.update(discover(metric_package, is_metric_cls))
return metric_parsers
|
Gets all of the metric parsers.
Args:
metric_packages - Defaults to no extra packages. An iterable of
metric containing packages. A metric inherits DiffParserBase
and does not have __metric__ = False
A metric package must be imported using import a.b.c
include_defaults - Whether to include the generic metric parsers
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.