code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_marks()
return True | Called when one of the Move/Draw/Edit radio buttons is selected. |
def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool:
"""Determine if a number of Suggested Actions are supported by a Channel.
Args:
channel_id (str): The Channel to check the if Suggested Actions are supported in.
button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.
Returns:
bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions.
"""
max_actions = {
# https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies
Channels.facebook: 10,
Channels.skype: 10,
# https://developers.line.biz/en/reference/messaging-api/#items-object
Channels.line: 13,
# https://dev.kik.com/#/docs/messaging#text-response-object
Channels.kik: 20,
Channels.telegram: 100,
Channels.slack: 100,
Channels.emulator: 100,
Channels.direct_line: 100,
Channels.webchat: 100,
}
return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False | Determine if a number of Suggested Actions are supported by a Channel.
Args:
channel_id (str): The Channel to check the if Suggested Actions are supported in.
button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.
Returns:
bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions. |
def is_annual(self):
"""Check if an analysis period is annual."""
if (self.st_month, self.st_day, self.st_hour, self.end_month,
self.end_day, self.end_hour) == (1, 1, 0, 12, 31, 23):
return True
else:
return False | Check if an analysis period is annual. |
def validate(data, schema=None):
"""Validate the given dictionary against the given schema.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
jsonschema.ValidationError: if the data is invalid.
"""
schema = _load_schema_for_record(data, schema)
return jsonschema_validate(
instance=data,
schema=schema,
resolver=LocalRefResolver.from_schema(schema),
format_checker=inspire_format_checker,
) | Validate the given dictionary against the given schema.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
jsonschema.ValidationError: if the data is invalid. |
def insert_from_xmldoc(connection, source_xmldoc, preserve_ids = False, verbose = False):
"""
Insert the tables from an in-ram XML document into the database at
the given connection. If preserve_ids is False (default), then row
IDs are modified during the insert process to prevent collisions
with IDs already in the database. If preserve_ids is True then IDs
are not modified; this will result in database consistency
violations if any of the IDs of newly-inserted rows collide with
row IDs already in the database, and is generally only sensible
when inserting a document into an empty database. If verbose is
True then progress reports will be printed to stderr.
"""
#
# enable/disable ID remapping
#
orig_DBTable_append = dbtables.DBTable.append
if not preserve_ids:
try:
dbtables.idmap_create(connection)
except sqlite3.OperationalError:
# assume table already exists
pass
dbtables.idmap_sync(connection)
dbtables.DBTable.append = dbtables.DBTable._remapping_append
else:
dbtables.DBTable.append = dbtables.DBTable._append
try:
#
# create a place-holder XML representation of the target
# document so we can pass the correct tree to update_ids().
# note that only tables present in the source document need
# ID ramapping, so xmldoc only contains representations of
# the tables in the target document that are also in the
# source document
#
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
#
# iterate over tables in the source XML tree, inserting
# each into the target database
#
for tbl in source_xmldoc.getElementsByTagName(ligolw.Table.tagName):
#
# instantiate the correct table class, connected to the
# target database, and save in XML tree
#
name = tbl.Name
try:
cls = dbtables.TableByName[name]
except KeyError:
cls = dbtables.DBTable
dbtbl = xmldoc.childNodes[-1].appendChild(cls(tbl.attributes, connection = connection))
#
# copy table element child nodes from source XML tree
#
for elem in tbl.childNodes:
if elem.tagName == ligolw.Stream.tagName:
dbtbl._end_of_columns()
dbtbl.appendChild(type(elem)(elem.attributes))
#
# copy table rows from source XML tree
#
for row in tbl:
dbtbl.append(row)
dbtbl._end_of_rows()
#
# update references to row IDs and clean up ID remapping
#
if not preserve_ids:
update_ids(connection, xmldoc, verbose = verbose)
finally:
dbtables.DBTable.append = orig_DBTable_append
#
# done. unlink the document to delete database cursor objects it
# retains
#
connection.commit()
xmldoc.unlink() | Insert the tables from an in-ram XML document into the database at
the given connection. If preserve_ids is False (default), then row
IDs are modified during the insert process to prevent collisions
with IDs already in the database. If preserve_ids is True then IDs
are not modified; this will result in database consistency
violations if any of the IDs of newly-inserted rows collide with
row IDs already in the database, and is generally only sensible
when inserting a document into an empty database. If verbose is
True then progress reports will be printed to stderr. |
def get_success_url(self):
"""
Returns redirect URL for valid form submittal.
:rtype: str.
"""
if self.success_url:
url = force_text(self.success_url)
else:
url = reverse('{0}:index'.format(self.url_namespace))
return url | Returns redirect URL for valid form submittal.
:rtype: str. |
def execute(self):
"""Run selected module generator."""
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root) | Run selected module generator. |
async def self_check(cls):
"""
Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration
"""
# noinspection PyTypeChecker
async for check in super(Telegram, cls).self_check():
yield check
s = cls.settings()
try:
assert isinstance(s['token'], str)
except (KeyError, TypeError, AssertionError):
yield HealthCheckFail(
'00005',
'Missing "token" for Telegram platform. You can obtain one by'
'registering your bot in Telegram.',
)
if not hasattr(settings, 'BERNARD_BASE_URL'):
yield HealthCheckFail(
'00005',
'"BERNARD_BASE_URL" cannot be found in the configuration. The'
'Telegram platform needs it because it uses it to '
'automatically register its hook.'
)
if not hasattr(settings, 'WEBVIEW_SECRET_KEY'):
yield HealthCheckFail(
'00005',
'"WEBVIEW_SECRET_KEY" cannot be found in the configuration. '
'It is required in order to be able to create secure postback '
'URLs.'
) | Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration |
def repeater(call, args=None, kwargs=None, retries=4):
"""
repeat call x-times: docker API is just awesome
:param call: function
:param args: tuple, args for function
:param kwargs: dict, kwargs for function
:param retries: int, how many times we try?
:return: response of the call
"""
args = args or ()
kwargs = kwargs or {}
t = 1.0
for x in range(retries):
try:
return call(*args, **kwargs)
except APIError as ex:
logger.error("query #%d: docker returned an error: %r", x, ex)
except Exception as ex:
# this may be pretty bad
log_last_traceback()
logger.error("query #%d: generic error: %r", x, ex)
t *= 2
time.sleep(t) | repeat call x-times: docker API is just awesome
:param call: function
:param args: tuple, args for function
:param kwargs: dict, kwargs for function
:param retries: int, how many times we try?
:return: response of the call |
def run_the_target(G, target, settings):
"""
Wrapper function that sends to commands in a target's 'formula'
to run_commands()
Args:
The graph we are going to build
The target to run
The settings dictionary
"""
sprint = settings["sprint"]
sprint("Running target {}".format(target))
the_formula = get_the_node_dict(G, target)["formula"]
run_commands(the_formula, settings) | Wrapper function that sends to commands in a target's 'formula'
to run_commands()
Args:
The graph we are going to build
The target to run
The settings dictionary |
def get_parameter_value(self, parameter, from_cache=True, timeout=10):
"""
Retrieve the current value of the specified parameter.
:param str parameter: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param bool from_cache: If ``False`` this call will block until a
fresh value is received on the processor.
If ``True`` the server returns the latest
value instead (which may be ``None``).
:param float timeout: The amount of seconds to wait for a fresh value.
(ignored if ``from_cache=True``).
:rtype: .ParameterValue
"""
params = {
'fromCache': from_cache,
'timeout': int(timeout * 1000),
}
parameter = adapt_name_for_rest(parameter)
url = '/processors/{}/{}/parameters{}'.format(
self._instance, self._processor, parameter)
response = self._client.get_proto(url, params=params)
proto = pvalue_pb2.ParameterValue()
proto.ParseFromString(response.content)
# Server returns ParameterValue with only 'id' set if no
# value existed. Convert this to ``None``.
if proto.HasField('rawValue') or proto.HasField('engValue'):
return ParameterValue(proto)
return None | Retrieve the current value of the specified parameter.
:param str parameter: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param bool from_cache: If ``False`` this call will block until a
fresh value is received on the processor.
If ``True`` the server returns the latest
value instead (which may be ``None``).
:param float timeout: The amount of seconds to wait for a fresh value.
(ignored if ``from_cache=True``).
:rtype: .ParameterValue |
def update(self, *iterables):
"""Update the set, adding elements from all *iterables*."""
_set = self._set
values = set(chain(*iterables))
if (4 * len(values)) > len(_set):
_list = self._list
_set.update(values)
_list.clear()
_list.update(_set)
else:
_add = self.add
for value in values:
_add(value)
return self | Update the set, adding elements from all *iterables*. |
def _applyInter(finter0, finter1, conflict="ignore"):
"""
Return the restriction of first interval by the second.
Args:
- inter0, inter1 (tuple of Feature): intervals
Return(tuple of Feature): the resulting interval
- conflict(str): if a property hasn't compatible values/constrains, do:
- ``"error"``: raise exception.
- ``"ignore"``: return None.
- ``"me"``: return finter0.
- ``"other"``: return finter1.
"""
OPTIONS = ["error", "ignore", "me", "other"]
assert conflict in OPTIONS, "Invalid value in `conflict`."
# Compute the comparison of the interval extremes
min_int = -2**63
# Remember, None <= number and None <= None are True, but number <= None is False.
inter0 = tuple([f.getValue() if f else min_int for f in finter0])
inter1 = tuple([f.getValue() if f else min_int for f in finter1])
le00 = inter0[0] <= inter1[0] # finter0[0] <= finter1[0]
le01 = inter1[1] == min_int or inter0[0] <= inter1[1] # finter0[0] <= finter1[1]
le11 = inter1[1] == min_int or (inter0[1] != min_int and inter0[1] <= inter1[1]) # finter0[1] <= finter1[1]
ge00 = not le00 or inter0[0] == inter1[0] # finter0[0] >= finter1[0]
ge10 = inter0[1] == min_int or inter0[1] >= inter1[0] # finter0[1] >= finter1[0]
# print "\n".join("%s: %s" % (s, v) for v, s in [
# (le00, "finter0[0] <= finter1[0]"),
# (le01, "finter0[0] <= finter1[1]"),
# (le11, "finter0[1] <= finter1[1]"),
# (ge00, "finter0[0] >= finter1[0]"),
# (ge10, "finter0[1] >= finter1[0]") ])
# First interval is ( ), second interval is [ ]
if le00 and ge10 and le11: # ( [ ) ] chain first-second
return finter1[0], finter0[1]
elif le00 and ge10 and not le11: # ( [ ] ) second is inside first
return finter1
elif ge00 and le01 and le11: # [ ( ) ] first is inside second
return finter0
elif ge00 and le01 and not le11: # [ ( ] ) chain second-first
return finter0[0], finter1[1]
elif conflict == "me":
return finter0
elif conflict == "other":
return finter1
elif conflict == "error":
raise Exception("Disjoint intervals!")
return None | Return the restriction of first interval by the second.
Args:
- inter0, inter1 (tuple of Feature): intervals
Return(tuple of Feature): the resulting interval
- conflict(str): if a property hasn't compatible values/constrains, do:
- ``"error"``: raise exception.
- ``"ignore"``: return None.
- ``"me"``: return finter0.
- ``"other"``: return finter1. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict | Return a json dictionary representing this model. |
def del_password(name):
'''
Deletes the account password
:param str name: The user name of the account
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on user not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
'''
# This removes the password
cmd = "dscl . -passwd /Users/{0} ''".format(name)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'eDSUnknownNodeName' in exc.strerror:
raise CommandExecutionError('User not found: {0}'.format(name))
raise CommandExecutionError('Unknown error: {0}'.format(exc.strerror))
# This is so it looks right in shadow.info
cmd = "dscl . -create /Users/{0} Password '*'".format(name)
salt.utils.mac_utils.execute_return_success(cmd)
return info(name)['passwd'] == '*' | Deletes the account password
:param str name: The user name of the account
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on user not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username |
def searchIndex(self, printData=True):
"""Search the index with all the repo's specified parameters"""
backupValue = copy.deepcopy(self.output.printData)
self.output.printData = printData
self.data = self.index.search(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension)
self.output.printData = backupValue
return self.data | Search the index with all the repo's specified parameters |
def format(self, dt, fmt, locale=None):
"""
Formats a DateTime instance with a given format and locale.
:param dt: The instance to format
:type dt: pendulum.DateTime
:param fmt: The format to use
:type fmt: str
:param locale: The locale to use
:type locale: str or Locale or None
:rtype: str
"""
if not locale:
locale = pendulum.get_locale()
locale = Locale.load(locale)
result = self._FORMAT_RE.sub(
lambda m: m.group(1)
if m.group(1)
else m.group(2)
if m.group(2)
else self._format_token(dt, m.group(3), locale),
fmt,
)
return decode(result) | Formats a DateTime instance with a given format and locale.
:param dt: The instance to format
:type dt: pendulum.DateTime
:param fmt: The format to use
:type fmt: str
:param locale: The locale to use
:type locale: str or Locale or None
:rtype: str |
def from_pubkey(cls, pubkey, compressed=False, version=56, prefix=None):
# Ensure this is a public key
pubkey = PublicKey(pubkey)
if compressed:
pubkey = pubkey.compressed()
else:
pubkey = pubkey.uncompressed()
""" Derive address using ``RIPEMD160(SHA256(x))`` """
addressbin = ripemd160(hexlify(hashlib.sha256(unhexlify(pubkey)).digest()))
return cls(hexlify(addressbin).decode("ascii")) | Derive address using ``RIPEMD160(SHA256(x))`` |
def generate_data_type(self, data_type):
"""Output a data type definition (a struct or union)."""
if isinstance(data_type, Struct):
# Output a struct definition.
self.emit('')
self.emit('struct %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
type_repr = self.format_data_type(field.data_type)
if not field.has_default:
self.emit('%s %s' % (field.name, type_repr))
else:
self.emit('%s %s = %s' %
(field.name, type_repr, self.format_value(field.default)))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
elif isinstance(data_type, Union):
# Output a union definition.
self.emit('')
self.emit('union %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
name = field.name
# Add a star for a catch-all field.
# (There are two ways to recognize these.)
if field.catch_all or field is data_type.catch_all_field:
name += '*'
if isinstance(field.data_type, Void):
self.emit('%s' % (name))
else:
type_repr = self.format_data_type(field.data_type)
self.emit('%s %s' % (name, type_repr))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
else:
# Don't know what this is.
self.emit('')
self.emit('# ??? %s' % repr(data_type)) | Output a data type definition (a struct or union). |
def print_bytes(data):
"""
Function to visualize byte streams. Split into bytes, print to console.
:param bs: BYTE STRING
"""
bs = bytearray(data)
symbols_in_one_line = 8
n = len(bs) // symbols_in_one_line
i = 0
for i in range(n):
print(str(i*symbols_in_one_line)+" | "+" ".join(["%02X" % b for b in bs[i*symbols_in_one_line:(i+1)*symbols_in_one_line]])) # for every 8 symbols line
if not len(bs) % symbols_in_one_line == 0:
print(str((i+1)*symbols_in_one_line)+" | "+" ".join(["%02X" % b for b in bs[(i+1)*symbols_in_one_line:]])+"\n") | Function to visualize byte streams. Split into bytes, print to console.
:param bs: BYTE STRING |
def __store_clustering_results(self, amount_clusters, leaf_blocks):
"""!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
"""
self.__clusters = [[] for _ in range(amount_clusters)]
for block in leaf_blocks:
index = block.get_cluster()
if index is not None:
self.__clusters[index] += block.get_points()
else:
self.__noise += block.get_points()
self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]
self.__noise = list(set(self.__noise)) | !
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells). |
def _validate( # pylint: disable=too-many-arguments
cls,
sign,
integer_part,
non_repeating_part,
repeating_part,
base
):
"""
Check if radix is valid.
:param int sign: -1, 0, or 1 as appropriate
:param integer_part: the part on the left side of the radix
:type integer_part: list of int
:param non_repeating_part: non repeating part on left side
:type non_repeating_part: list of int
:param repeating_part: repeating part
:type repeating_part: list of int
:param int base: base of the radix, must be at least 2
:returns: BasesValueError if invalid values
:rtype: BasesValueError or NoneType
Complexity: O(len(integer_part + non_repeating_part + repeating_part))
"""
if any(x < 0 or x >= base for x in integer_part):
return BasesValueError(
integer_part,
"integer_part",
"values must be between 0 and %s" % base
)
if any(x < 0 or x >= base for x in non_repeating_part):
return BasesValueError(
non_repeating_part,
"non_repeating_part",
"values must be between 0 and %s" % base
)
if any(x < 0 or x >= base for x in repeating_part):
return BasesValueError(
repeating_part,
"repeating_part",
"values must be between 0 and %s" % base
)
if base < 2:
return BasesValueError(base, "base", "must be at least 2")
if sign not in (-1, 0, 1) or sign is True or sign is False:
return BasesValueError(
sign,
"sign",
"must be an int between -1 and 1"
)
return None | Check if radix is valid.
:param int sign: -1, 0, or 1 as appropriate
:param integer_part: the part on the left side of the radix
:type integer_part: list of int
:param non_repeating_part: non repeating part on left side
:type non_repeating_part: list of int
:param repeating_part: repeating part
:type repeating_part: list of int
:param int base: base of the radix, must be at least 2
:returns: BasesValueError if invalid values
:rtype: BasesValueError or NoneType
Complexity: O(len(integer_part + non_repeating_part + repeating_part)) |
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt | Maxheap version of a heappop. |
def texts(self: object, fileids: str, plaintext: bool = True):
"""
Returns the text content of a .tess file, i.e. removing the bracketed
citation info (e.g. "<Ach. Tat. 1.1.0>")
"""
for doc in self.docs(fileids):
if plaintext==True:
doc = re.sub(r'<.+?>\s', '', doc) # Remove citation info
doc = doc.rstrip() # Clean up final line breaks
yield doc | Returns the text content of a .tess file, i.e. removing the bracketed
citation info (e.g. "<Ach. Tat. 1.1.0>") |
def collect_iptable(self, tablename):
""" When running the iptables command, it unfortunately auto-loads
the modules before trying to get output. Some people explicitly
don't want this, so check if the modules are loaded before running
the command. If they aren't loaded, there can't possibly be any
relevant rules in that table """
modname = "iptable_"+tablename
if self.check_ext_prog("grep -q %s /proc/modules" % modname):
cmd = "iptables -t "+tablename+" -nvL"
self.add_cmd_output(cmd) | When running the iptables command, it unfortunately auto-loads
the modules before trying to get output. Some people explicitly
don't want this, so check if the modules are loaded before running
the command. If they aren't loaded, there can't possibly be any
relevant rules in that table |
def average(sequence, key):
"""Averages a sequence based on a key."""
return sum(map(key, sequence)) / float(len(sequence)) | Averages a sequence based on a key. |
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
data = file(in_path).read()
data = base64.b64encode(data)
data = dict(mode='put', data=data, out_path=out_path)
# TODO: support chunked file transfer
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response) | transfer a file from local to remote |
def lsa_twitter(cased_tokens):
""" Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """
# Only 5 of these tokens are saved for a no_below=2 filter:
# PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing
if cased_tokens is None:
cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' +
'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip'
).split()
cased_tokens += [s + 's' for s in cased_tokens]
cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \
'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split()
allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens]
allcase_tokens += [s.title() for s in cased_tokens]
allcase_tokens += [s.upper() for s in cased_tokens]
KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens]
# takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs
vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl')
if os.path.isfile(vocab_path):
print('Loading vocab: {} ...'.format(vocab_path))
vocab = Dictionary.load(vocab_path)
print(' len(vocab) loaded: {}'.format(len(vocab.dfs)))
else:
tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz')
print('Loading tweets: {} ...'.format(tweets_path))
tweets = read_csv(tweets_path)
tweets = pd.np.array(tweets.text.str.split())
with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f:
for tokens in tweets:
f.write((' '.join(tokens) + '\n').encode('utf-8'))
# tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8'))
# tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8'))
# tweets.to_csv('tweets.csv.gz', compression='gzip')
print('Computing vocab from {} tweets...'.format(len(tweets)))
vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS))
vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS))
print(' len(vocab) after filtering: {}'.format(len(vocab.dfs)))
# no time at all, just a bookeeping step, doesn't actually compute anything
tfidf = TfidfModel(id2word=vocab, dictionary=vocab)
tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs))))
tweets = [vocab.doc2bow(tw) for tw in tweets]
json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w'))
gc.collect()
# LSA is more useful name than LSA
lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2)
return lsa | Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens |
def validate(self, data):
"""Validate data. Raise NotValid error for invalid data."""
validated = self._validated(data)
errors = []
for validator in self.additional_validators:
if not validator(validated):
errors.append(
"%s invalidated by '%s'" % (
validated, _get_repr(validator)))
if errors:
raise NotValid(*errors)
if self.default is UNSPECIFIED:
return validated
if self.null_values is not UNSPECIFIED\
and validated in self.null_values:
return self.default
if validated is None:
return self.default
return validated | Validate data. Raise NotValid error for invalid data. |
def select_many(
self,
collection_selector=identity,
result_selector=identity):
'''Projects each element of a sequence to an intermediate new sequence,
flattens the resulting sequences into one sequence and optionally
transforms the flattened sequence using a selector function.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector: An optional unary function mapping the elements in
the flattened intermediate sequence to corresponding elements
of the result sequence. The single argument of the
result_selector is the value of an element from the flattened
intermediate sequence. The return value should be the
corresponding value in the result sequence. The default
result_selector is the identity function.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If either collection_selector or result_selector are not
callable.
'''
if self.closed():
raise ValueError("Attempt to call select_many() on a closed "
"Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many() parameter projector={0} is not "
"callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many() parameter selector={selector} is "
" not callable".format(selector=repr(result_selector)))
sequences = self.select(collection_selector)
chained_sequence = itertools.chain.from_iterable(sequences)
return self._create(chained_sequence).select(result_selector) | Projects each element of a sequence to an intermediate new sequence,
flattens the resulting sequences into one sequence and optionally
transforms the flattened sequence using a selector function.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector: An optional unary function mapping the elements in
the flattened intermediate sequence to corresponding elements
of the result sequence. The single argument of the
result_selector is the value of an element from the flattened
intermediate sequence. The return value should be the
corresponding value in the result sequence. The default
result_selector is the identity function.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If either collection_selector or result_selector are not
callable. |
def masses(self):
"""
:returns: peak masses
:rtype: list of floats
"""
buf = ffi.new("double[]", self.size)
ims.spectrum_masses(self.ptr, buf)
return list(buf) | :returns: peak masses
:rtype: list of floats |
def remove_entry_listener(self, registration_id):
"""
Removes the specified entry listener. Returns silently if there is no such listener added before.
:param registration_id: (str), id of registered listener.
:return: (bool), ``true`` if registration is removed, ``false`` otherwise.
"""
return self._stop_listening(registration_id,
lambda i: replicated_map_remove_entry_listener_codec.encode_request(self.name, i)) | Removes the specified entry listener. Returns silently if there is no such listener added before.
:param registration_id: (str), id of registered listener.
:return: (bool), ``true`` if registration is removed, ``false`` otherwise. |
def _find_utmp():
'''
Figure out which utmp file to use when determining runlevel.
Sometimes /var/run/utmp doesn't exist, /run/utmp is the new hotness.
'''
result = {}
# These are the likely locations for the file on Ubuntu
for utmp in '/var/run/utmp', '/run/utmp':
try:
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
if result:
return result[sorted(result).pop()]
else:
return False | Figure out which utmp file to use when determining runlevel.
Sometimes /var/run/utmp doesn't exist, /run/utmp is the new hotness. |
def registerDisplay(func):
"""
Registers a function to the display hook queue to be called on hook.
Look at the sys.displayhook documentation for more information.
:param func | <callable>
"""
setup()
ref = weakref.ref(func)
if ref not in _displayhooks:
_displayhooks.append(ref) | Registers a function to the display hook queue to be called on hook.
Look at the sys.displayhook documentation for more information.
:param func | <callable> |
def upgrade():
"""Upgrade database."""
op.create_table(
'records_metadata',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column(
'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), nullable=True),
sa.Column('version_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'records_metadata_version',
sa.Column('created', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('updated', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(),
autoincrement=False, nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), autoincrement=False, nullable=True),
sa.Column('version_id', sa.Integer(),
autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(),
autoincrement=False, nullable=False),
sa.Column('end_transaction_id',
sa.BigInteger(), nullable=True),
sa.Column('operation_type',
sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(
op.f('ix_records_metadata_version_end_transaction_id'),
'records_metadata_version', ['end_transaction_id'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_operation_type'),
'records_metadata_version', ['operation_type'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_transaction_id'),
'records_metadata_version', ['transaction_id'], unique=False
) | Upgrade database. |
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given inverse link of f.
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: likelihood evaluated for this point
:rtype: float
.. Note:
Each y_i must be in {0, 1}
"""
#objective = (inv_link_f**y) * ((1.-inv_link_f)**(1.-y))
return np.where(y==1, inv_link_f, 1.-inv_link_f) | Likelihood function given inverse link of f.
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: likelihood evaluated for this point
:rtype: float
.. Note:
Each y_i must be in {0, 1} |
def scheme_host_port_prefix(self, scheme='http', host='host',
port=None, prefix=None):
"""Return URI composed of scheme, server, port, and prefix."""
uri = scheme + '://' + host
if (port and not ((scheme == 'http' and port == 80) or
(scheme == 'https' and port == 443))):
uri += ':' + str(port)
if (prefix):
uri += '/' + prefix
return uri | Return URI composed of scheme, server, port, and prefix. |
def yyparse(self, lexfile):
"""
Args:
lexfile (str): Flex file to be parsed
Returns:
DFA: A dfa automaton
"""
temp = tempfile.gettempdir()
self.outfile = temp+'/'+''.join(
random.choice(
string.ascii_uppercase + string.digits) for _ in range(5)) + '_lex.yy.c'
self._create_automaton_from_regex(lexfile)
states_num, delta = self._create_delta()
states = self._create_states(states_num)
accepted_states = self._read_accept_states()
if self.alphabet != []:
alphabet = self.alphabet
else:
alphabet = createalphabet()
mma = DFA(alphabet)
for state in states:
if state != 0:
for char in alphabet:
nextstate = delta(state, char)
mma.add_arc(state - 1, nextstate - 1, char)
if state in accepted_states:
mma[state - 1].final = True
if os.path.exists(self.outfile):
os.remove(self.outfile)
return mma | Args:
lexfile (str): Flex file to be parsed
Returns:
DFA: A dfa automaton |
def get_new_pid(institute):
""" Return a new Project ID
Keyword arguments:
institute_id -- Institute id
"""
number = '0001'
prefix = 'p%s' % institute.name.replace(' ', '')[:4]
found = True
while found:
try:
Project.objects.get(pid=prefix + number)
number = str(int(number) + 1)
if len(number) == 1:
number = '000' + number
elif len(number) == 2:
number = '00' + number
elif len(number) == 3:
number = '0' + number
except Project.DoesNotExist:
found = False
return prefix + number | Return a new Project ID
Keyword arguments:
institute_id -- Institute id |
def response_add(self, request, obj, post_url_continue=POST_URL_CONTINUE):
"""If we're adding, save must be "save and continue editing"
Two exceptions to that workflow:
* The user has pressed the 'Save and add another' button
* We are adding a user in a popup
"""
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(ExportAdmin, self).response_add(request,
obj,
post_url_continue) | If we're adding, save must be "save and continue editing"
Two exceptions to that workflow:
* The user has pressed the 'Save and add another' button
* We are adding a user in a popup |
def concatenate_variables(scope, variables, container):
'''
This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all
integer inputs would be converted to floats before concatenation.
'''
# Check if it's possible to concatenate those inputs.
type_set = set(type(variable.type) for variable in variables)
number_type_set = {FloatType, FloatTensorType, Int64Type, Int64TensorType}
if StringType in type_set and any(number_type in type_set for number_type in number_type_set):
raise RuntimeError('We are not able to concatenate numerical tensor(s) and string tensor(s)')
input_names = [] # input variables' names we want to concatenate
input_dims = [] # dimensions of the variables that is going to be concatenated
# Collect input variable names and do cast if needed
for variable in variables:
if isinstance(variable.type, (Int64TensorType, Int64Type)):
input_names.append(convert_integer_to_float(scope, variable, container))
else:
input_names.append(variable.full_name)
# We assume input variables' shape are [1, C_1], ..., [1, C_n] if there are n inputs.
input_dims.append(variable.type.shape[1])
if len(input_names) == 1:
# No need to concatenate tensors if there is only one input
return input_names[0]
else:
# To combine all inputs, we need a FeatureVectorizer
op_type = 'FeatureVectorizer'
attrs = {'name': scope.get_unique_operator_name(op_type), 'inputdimensions': input_dims}
# Create a variable name to capture feature vectorizer's output
concatenated_name = scope.get_unique_variable_name('concatenated')
# Set up our FeatureVectorizer
container.add_node(op_type, input_names, concatenated_name, op_domain='ai.onnx.ml', **attrs)
return concatenated_name | This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all
integer inputs would be converted to floats before concatenation. |
def formatter(self):
"""
Creates and returns a Formatter capable of nicely formatting Lambda function logs
Returns
-------
LogsFormatter
"""
formatter_chain = [
LambdaLogMsgFormatters.colorize_errors,
# Format JSON "before" highlighting the keywords. Otherwise, JSON will be invalid from all the
# ANSI color codes and fail to pretty print
JSONMsgFormatter.format_json,
KeywordHighlighter(self._filter_pattern).highlight_keywords,
]
return LogsFormatter(self.colored, formatter_chain) | Creates and returns a Formatter capable of nicely formatting Lambda function logs
Returns
-------
LogsFormatter |
def _set_logging(
logger_name="colin",
level=logging.INFO,
handler_class=logging.StreamHandler,
handler_kwargs=None,
format='%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s',
date_format='%H:%M:%S'):
"""
Set personal logger for this library.
:param logger_name: str, name of the logger
:param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler
:param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr)
:param handler_kwargs: dict, keyword arguments to handler's constructor
:param format: str, formatting style
:param date_format: str, date style in the logs
"""
if level != logging.NOTSET:
logger = logging.getLogger(logger_name)
logger.setLevel(level)
# do not readd handlers if they are already present
if not [x for x in logger.handlers if isinstance(x, handler_class)]:
handler_kwargs = handler_kwargs or {}
handler = handler_class(**handler_kwargs)
handler.setLevel(level)
formatter = logging.Formatter(format, date_format)
handler.setFormatter(formatter)
logger.addHandler(handler) | Set personal logger for this library.
:param logger_name: str, name of the logger
:param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler
:param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr)
:param handler_kwargs: dict, keyword arguments to handler's constructor
:param format: str, formatting style
:param date_format: str, date style in the logs |
def _add_instruction(self, instruction, value):
"""
:param instruction: instruction name to be added
:param value: instruction value
"""
if (instruction == 'LABEL' or instruction == 'ENV') and len(value) == 2:
new_line = instruction + ' ' + '='.join(map(quote, value)) + '\n'
else:
new_line = '{0} {1}\n'.format(instruction, value)
if new_line:
lines = self.lines
if not lines[len(lines) - 1].endswith('\n'):
new_line = '\n' + new_line
lines += new_line
self.lines = lines | :param instruction: instruction name to be added
:param value: instruction value |
def json(self) -> dict:
"""Returns json compatible state of the Button instance.
Returns:
control_json: Json representation of Button state.
"""
content = {}
content['name'] = self.name
content['callback'] = self.callback
self.control_json['content'] = content
return self.control_json | Returns json compatible state of the Button instance.
Returns:
control_json: Json representation of Button state. |
def filter_data(self, field, filter_value, filter_operator, field_converter=None):
"""Filter the data given the provided.
Args:
field (string): The field to filter on.
filter_value (string | list): The value to match.
filter_operator (string): The operator for comparison.
field_converter (method): A method used to convert the field before comparison.
Returns:
(set): List of matching data objects
"""
data = []
if self._indexes.get(field) is not None:
data = self._index_filter(
self._indexes.get(field), filter_value, filter_operator, field_converter
)
# else:
# data = self._loop_filter(field, filter_value, filter_operator)
# if set_operator == "intersection":
# self._filtered_results.intersection(data)
# elif set_operator == "union":
# self._filtered_results.union(data)
return set(data) | Filter the data given the provided.
Args:
field (string): The field to filter on.
filter_value (string | list): The value to match.
filter_operator (string): The operator for comparison.
field_converter (method): A method used to convert the field before comparison.
Returns:
(set): List of matching data objects |
def _reload(self, force=False):
"""Reloads the configuration from the file and environment variables. Useful if using
`os.environ` instead of this class' `set_env` method, or if the underlying configuration
file is changed externally.
"""
self._config_map = dict()
self._registered_env_keys = set()
self.__reload_sources(force)
self.__load_environment_keys()
self.verify()
self._clear_memoization() | Reloads the configuration from the file and environment variables. Useful if using
`os.environ` instead of this class' `set_env` method, or if the underlying configuration
file is changed externally. |
def parse_port_from_tensorboard_output(tensorboard_output: str) -> int:
"""
Parse tensorboard port from its outputted message.
:param tensorboard_output: Output message of Tensorboard
in format TensorBoard 1.8.0 at http://martin-VirtualBox:36869
:return: Returns the port TensorBoard is listening on.
:raise UnexpectedOutputError
"""
search = re.search("at http://[^:]+:([0-9]+)", tensorboard_output)
if search is not None:
port = search.group(1)
return int(port)
else:
raise UnexpectedOutputError(tensorboard_output, "Address and port where Tensorboard has started,"
" e.g. TensorBoard 1.8.0 at http://martin-VirtualBox:36869") | Parse tensorboard port from its outputted message.
:param tensorboard_output: Output message of Tensorboard
in format TensorBoard 1.8.0 at http://martin-VirtualBox:36869
:return: Returns the port TensorBoard is listening on.
:raise UnexpectedOutputError |
def validate(self):
"""
Cleans and validates the field values
"""
for name, field in self._fields.items():
v = getattr(self, name)
if v is None and not self._values[name].explicit and field.has_default:
v = field.get_default()
val = field.validate(v)
setattr(self, name, val) | Cleans and validates the field values |
def get_alexa_rankings(self, domains):
"""Retrieves the most recent VT info for a set of domains.
Args:
domains: list of string domains.
Returns:
A dict with the domain as key and the VT report as value.
"""
api_name = 'alexa_rankings'
(all_responses, domains) = self._bulk_cache_lookup(api_name, domains)
responses = self._request_reports(domains)
for domain, response in zip(domains, responses):
xml_response = self._extract_response_xml(domain, response)
if self._cache:
self._cache.cache_value(api_name, domain, response)
all_responses[domain] = xml_response
return all_responses | Retrieves the most recent VT info for a set of domains.
Args:
domains: list of string domains.
Returns:
A dict with the domain as key and the VT report as value. |
def ll(self,*args,**kwargs):
"""
NAME:
ll
PURPOSE:
return Galactic longitude
INPUT:
t - (optional) time at which to get ll
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
l(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'ll')
lbd= self._lbd(*args,**kwargs)
return lbd[:,0] | NAME:
ll
PURPOSE:
return Galactic longitude
INPUT:
t - (optional) time at which to get ll
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
l(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU) |
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() | Erase output screen. |
def _join(lst, key, sep=";"):
"""Auxiliary function to join same elements of a list of dictionaries if
the elements are not None.
"""
return sep.join([d[key] for d in lst if d[key]]) | Auxiliary function to join same elements of a list of dictionaries if
the elements are not None. |
def _read_audio_data(self, file_path):
"""
Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception
"""
try:
self.log(u"Reading audio data...")
# if we know the TTS outputs to PCM16 mono WAVE
# with the correct sample rate,
# we can read samples directly from it,
# without an intermediate conversion through ffmpeg
audio_file = AudioFile(
file_path=file_path,
file_format=self.OUTPUT_AUDIO_FORMAT,
rconf=self.rconf,
logger=self.logger
)
audio_file.read_samples_from_file()
self.log([u"Duration of '%s': %f", file_path, audio_file.audio_length])
self.log(u"Reading audio data... done")
return (True, (
audio_file.audio_length,
audio_file.audio_sample_rate,
audio_file.audio_format,
audio_file.audio_samples
))
except (AudioFileUnsupportedFormatError, OSError) as exc:
self.log_exc(u"An unexpected error occurred while reading audio data", exc, True, None)
return (False, None) | Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception |
def _makeTags(tagStr, xml,
suppress_LT=Suppress("<"),
suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue )))
+ Optional("/", default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+ suppress_GT)
else:
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printables, excludeChars=">")
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue))))
+ Optional("/",default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+ suppress_GT)
closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(lambda t: t.__setitem__("start"+"".join(resname.replace(":"," ").title().split()), t.copy()))
closeTag = closeTag("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag | Internal helper to construct opening and closing tag expressions, given a tag name |
def visit_Num(self, node: ast.Num) -> Union[int, float]:
"""Recompute the value as the number at the node."""
result = node.n
self.recomputed_values[node] = result
return result | Recompute the value as the number at the node. |
def get_log_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.*
"""
if not self.supports_log_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.LogLookupSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the log lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogLookupSession) - a ``LogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_lookup()`` is ``true``.* |
async def fetch_emoji(self, emoji_id):
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data) | |coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji. |
def add_tweets(self, url, last_modified, tweets):
"""Adds new tweets to the cache."""
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False | Adds new tweets to the cache. |
def enableEditing(self, editable=True):
"""
Sets the DataFrameModel and columnDtypeModel's
editable properties.
:param editable: bool
defaults to True,
False disables most editing methods.
:return:
None
"""
self.editable = editable
self._columnDtypeModel.setEditable(self.editable) | Sets the DataFrameModel and columnDtypeModel's
editable properties.
:param editable: bool
defaults to True,
False disables most editing methods.
:return:
None |
def minimum_image(self):
"""Align the system according to the minimum image convention"""
if self.box_vectors is None:
raise ValueError('No periodic vectors defined')
else:
self.r_array = minimum_image(self.r_array, self.box_vectors.diagonal())
return self | Align the system according to the minimum image convention |
def add_property(self, property_):
"""
Add a property to this thing.
property_ -- property to add
"""
property_.set_href_prefix(self.href_prefix)
self.properties[property_.name] = property_ | Add a property to this thing.
property_ -- property to add |
def M200(self, Rs, rho0, c):
"""
M(R_200) calculation for NFW profile
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param c: concentration
:type c: float [4,40]
:return: M(R_200) density
"""
return 4*np.pi*rho0*Rs**3*(np.log(1.+c)-c/(1.+c)) | M(R_200) calculation for NFW profile
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param c: concentration
:type c: float [4,40]
:return: M(R_200) density |
def python_like_mod_finder(import_line, alt_path=None,
stop_token=None):
"""
Locate a module path based on an import line in an python-like file
import_line is the line of source code containing the import
alt_path specifies an alternate base path for the module
stop_token specifies the desired name to stop on
This is used to a find the path to python-like modules
(e.g. cython and enaml) for a goto definition.
"""
if stop_token and '.' in stop_token:
stop_token = stop_token.split('.')[-1]
tokens = re.split(r'\W', import_line)
if tokens[0] in ['from', 'import']:
# find the base location
try:
_, path, _ = imp.find_module(tokens[1])
except ImportError:
if alt_path:
path = osp.join(alt_path, tokens[1])
else:
path = None
if path:
path = osp.realpath(path)
if not tokens[1] == stop_token:
for part in tokens[2:]:
if part in ['import', 'cimport', 'as']:
break
path = osp.join(path, part)
if part == stop_token:
break
# from package import module
if stop_token and not stop_token in path:
for ext in python_like_exts():
fname = '%s%s' % (stop_token, ext)
if osp.exists(osp.join(path, fname)):
return osp.join(path, fname)
# from module import name
for ext in python_like_exts():
fname = '%s%s' % (path, ext)
if osp.exists(fname):
return fname
# if it is a file, return it
if osp.exists(path) and not osp.isdir(path):
return path
# default to the package file
path = osp.join(path, '__init__.py')
if osp.exists(path):
return path | Locate a module path based on an import line in an python-like file
import_line is the line of source code containing the import
alt_path specifies an alternate base path for the module
stop_token specifies the desired name to stop on
This is used to a find the path to python-like modules
(e.g. cython and enaml) for a goto definition. |
def add_perf_task(task, auth, url):
"""
function takes the a python dict containing all necessary fields for a performance tasks,
transforms the dict into JSON and issues a RESTFUL call to create the performance task. device.
:param task: dictionary containing all required fields for performance tasks
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_task = {'indexDesc': '1.3.6.1.4.1.9.9.13.1.3.1.3',
'indexType': '[index1[0]:ciscoEnvMonTemperatureStatusValue:1:0]',
'itemFunction': '1.3.6.1.4.1.9.9.13.1.3.1.3',
'itemName': 'Cisco_Temperature',
'selectDefaultUnit': '400',
'unit': 'Celsius'}
>>> new_perf_task = add_perf_task(new_task, auth.creds, auth.url)
"""
add_perf_task_url = "/imcrs/perf/task"
f_url = url + add_perf_task_url
payload = json.dumps(task)
response = requests.post(f_url, data=payload, auth=auth, headers=HEADERS)
try:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' add_perf_task: An Error has occured' | function takes the a python dict containing all necessary fields for a performance tasks,
transforms the dict into JSON and issues a RESTFUL call to create the performance task. device.
:param task: dictionary containing all required fields for performance tasks
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_task = {'indexDesc': '1.3.6.1.4.1.9.9.13.1.3.1.3',
'indexType': '[index1[0]:ciscoEnvMonTemperatureStatusValue:1:0]',
'itemFunction': '1.3.6.1.4.1.9.9.13.1.3.1.3',
'itemName': 'Cisco_Temperature',
'selectDefaultUnit': '400',
'unit': 'Celsius'}
>>> new_perf_task = add_perf_task(new_task, auth.creds, auth.url) |
def get_subtree(self, name): # noqa: D302
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name) | r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2'] |
def _unique(list_of_dicts):
'''
Returns an unique list of dictionaries given a list that may contain duplicates.
'''
unique_list = []
for ele in list_of_dicts:
if ele not in unique_list:
unique_list.append(ele)
return unique_list | Returns an unique list of dictionaries given a list that may contain duplicates. |
def generate_np(self, x_val, **kwargs):
"""
Generate adversarial examples and return them as a NumPy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
"""
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
packed = self.construct_variables(kwargs)
fixed, feedable, _, hash_key = packed
if hash_key not in self.graphs:
self.construct_graph(fixed, feedable, x_val, hash_key)
else:
# remove the None arguments, they are just left blank
for k in list(feedable.keys()):
if feedable[k] is None:
del feedable[k]
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, feed_dict) | Generate adversarial examples and return them as a NumPy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples. |
def init_threads(tf_session):
""" Starts threads running """
coord = tf.train.Coordinator()
threads = list()
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(tf_session, coord=coord, daemon=True, start=True))
return threads, coord | Starts threads running |
def connect(self):
"""
Connect to a host on a given (SSL) port using PyOpenSSL.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if not USE_STDLIB_SSL:
ssl_ctx = configure_pyopenssl_context(self.credentials)
# attempt to upgrade the socket to TLS
cxn = OpenSSL.SSL.Connection(ssl_ctx, sock)
cxn.set_connect_state()
while True:
try:
cxn.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise SecurityError('bad handshake - ' + str(e))
break
self.sock = RiakWrappedSocket(cxn, sock)
self.credentials._check_revoked_cert(self.sock)
else:
ssl_ctx = configure_ssl_context(self.credentials)
if self.timeout is not None:
sock.settimeout(self.timeout)
self.sock = ssl.SSLSocket(sock=sock,
keyfile=self.credentials.pkey_file,
certfile=self.credentials.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.credentials.cacert_file,
ciphers=self.credentials.ciphers,
server_hostname=self.host)
self.sock.context = ssl_ctx | Connect to a host on a given (SSL) port using PyOpenSSL. |
def decode(self, encoded):
""" Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``.
"""
encoded = super().decode(encoded)
return self.tokenizer.decode([self.itos[index] for index in encoded]) | Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``. |
def execute(self, action):
"""Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
Return:
A float, the reward received for the action that was executed,
or None if no reward is offered.
"""
self.logger.debug('Executing action: %s', action)
reward = self.wrapped.execute(action)
if reward:
self.total_reward += reward
self.steps += 1
self.logger.debug('Reward received on this step: %.5f',
reward or 0)
self.logger.debug('Average reward per step: %.5f',
self.total_reward / self.steps)
return reward | Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
Return:
A float, the reward received for the action that was executed,
or None if no reward is offered. |
def _save_upload_state_to_file(self):
"""if create and create_file has execute, save upload state
to file for next resume upload if current upload process is
interrupted.
"""
if os.access(self.file_dir, os.W_OK | os.R_OK | os.X_OK):
save_file = self.file + '.upload'
data = {
'upload_token': self.upload_token,
'upload_server_ip': self.upload_server_ip
}
with open(save_file, 'w') as f:
json.dump(data, f) | if create and create_file has execute, save upload state
to file for next resume upload if current upload process is
interrupted. |
def append_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also appends `value` to the list that is at the end of `in_dict` traversed
with `keys`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to append to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
if last_key not in dict_pointer or dict_pointer[last_key] is None:
dict_pointer[last_key] = []
try:
dict_pointer[last_key].append(value)
except AttributeError:
raise SaltInvocationError('The last key contains a {}, which cannot append.'
''.format(type(dict_pointer[last_key])))
return in_dict | Ensures that in_dict contains the series of recursive keys defined in keys.
Also appends `value` to the list that is at the end of `in_dict` traversed
with `keys`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to append to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place. |
def norm(self, x):
"""Return the weighted norm of ``x``.
Parameters
----------
x1 : `NumpyTensor`
Tensor whose norm is calculated.
Returns
-------
norm : float
The norm of the tensor.
"""
if self.exponent == 2.0:
return float(np.sqrt(self.const) * _norm_default(x))
elif self.exponent == float('inf'):
return float(self.const * _pnorm_default(x, self.exponent))
else:
return float((self.const ** (1 / self.exponent) *
_pnorm_default(x, self.exponent))) | Return the weighted norm of ``x``.
Parameters
----------
x1 : `NumpyTensor`
Tensor whose norm is calculated.
Returns
-------
norm : float
The norm of the tensor. |
def remove_phenotype(self, ind_obj, phenotypes=None):
"""Remove multiple phenotypes from an individual."""
if phenotypes is None:
logger.info("delete all phenotypes related to %s", ind_obj.ind_id)
self.query(PhenotypeTerm).filter_by(ind_id=ind_obj.id).delete()
else:
for term in ind_obj.phenotypes:
if term.phenotype_id in phenotypes:
logger.info("delete phenotype: %s from %s",
term.phenotype_id, ind_obj.ind_id)
self.session.delete(term)
logger.debug('persist removals')
self.save()
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj) | Remove multiple phenotypes from an individual. |
def _import_plugin(module_name, plugin_path, modnames, modlist):
"""Import the plugin `module_name` from `plugin_path`, add it to `modlist`
and adds its name to `modnames`.
"""
if module_name in modnames:
return
try:
# First add a mock module with the LOCALEPATH attribute so that the
# helper method can find the locale on import
mock = _ModuleMock()
mock.LOCALEPATH = osp.join(plugin_path, module_name, 'locale')
sys.modules[module_name] = mock
if osp.isdir(osp.join(plugin_path, module_name)):
module = _import_module_from_path(module_name, plugin_path)
else:
module = None
# Then restore the actual loaded module instead of the mock
if module and getattr(module, 'PLUGIN_CLASS', False):
sys.modules[module_name] = module
modlist.append(module)
modnames.append(module_name)
except Exception:
sys.stderr.write("ERROR: 3rd party plugin import failed for "
"`{0}`\n".format(module_name))
traceback.print_exc(file=sys.stderr) | Import the plugin `module_name` from `plugin_path`, add it to `modlist`
and adds its name to `modnames`. |
def generate_seeds(num, root_seed, secret):
""" Deterministically generate list of seeds from a root seed.
:param num: Numbers of seeds to generate as int
:param root_seed: Seed to start off with.
:return: seed values as a list of length num
"""
# Generate a starting seed from the root
if num < 0:
raise HeartbeatError('%s is not greater than 0' % num)
if secret is None:
raise HeartbeatError('secret can not be of type NoneType')
seeds = []
try:
tmp_seed = hashlib.sha256(root_seed).digest()
except TypeError:
tmp_seed = hashlib.sha256(str(root_seed).encode()).digest()
# Deterministically generate the rest of the seeds
for x in range(num):
seeds.append(tmp_seed)
h = hashlib.sha256(tmp_seed)
h.update(secret)
tmp_seed = h.digest()
return seeds | Deterministically generate list of seeds from a root seed.
:param num: Numbers of seeds to generate as int
:param root_seed: Seed to start off with.
:return: seed values as a list of length num |
def assert_operations(self, *args):
"""Assets if the requested operations are allowed in this context."""
if not set(args).issubset(self.allowed_operations):
raise http.exceptions.Forbidden() | Assets if the requested operations are allowed in this context. |
def start(self):
"""Start IOLoop in daemonized thread."""
assert self._thread is None, 'thread already started'
# configure thread
self._thread = Thread(target=self._start_io_loop)
self._thread.daemon = True
# begin thread and block until ready
self._thread.start()
self._ready.wait() | Start IOLoop in daemonized thread. |
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf | Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants. |
def _render(self, request, template=None, status=200, context={}, headers={}, prefix_template_path=True):
"""
Render a HTTP response.
:param request: A django.http.HttpRequest instance.
:param template: A string describing the path to a template.
:param status: An integer describing the HTTP status code to respond with.
:param context: A dictionary describing variables to populate the template with.
:param headers: A dictionary describing HTTP headers.
:param prefix_template_path: A boolean describing whether to prefix the template with the view's template path.
Please note that ``template`` must not specify an extension, as one will be appended
according to the request format. For example, a value of ``blog/posts/index``
would populate ``blog/posts/index.html`` for requests that query the resource's
HTML representation.
If no template that matches the request format exists at the given location, or if ``template`` is ``None``,
Respite will attempt to serialize the template context automatically. You can change the way your models
are serialized by defining ``serialize`` methods that return a dictionary::
class NuclearMissile(models.Model):
serial_number = models.IntegerField()
is_armed = models.BooleanField()
launch_code = models.IntegerField()
def serialize(self):
return {
'serial_number': self.serial_number,
'is_armed': self.is_armed
}
If the request format is not supported by the view (as determined by the ``supported_formats``
property or a specific view's ``override_supported_formats`` decorator), this function will
yield HTTP 406 Not Acceptable.
"""
format = self._get_format(request)
# Render 406 Not Acceptable if the requested format isn't supported.
if not format:
return HttpResponse(status=406)
if template:
if prefix_template_path:
template_path = '%s.%s' % (self.template_path + template, format.extension)
else:
template_path = '%s.%s' % (template, format.extension)
try:
response = render(
request = request,
template_name = template_path,
dictionary = context,
status = status,
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET)
)
except TemplateDoesNotExist:
try:
response = HttpResponse(
content = serializers.find(format)(context).serialize(request),
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET),
status = status
)
except serializers.UnknownSerializer:
raise self.Error(
'No template exists at %(template_path)s, and no serializer found for %(format)s' % {
'template_path': template_path,
'format': format
}
)
else:
response = HttpResponse(
content = serializers.find(format)(context).serialize(request),
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET),
status = status
)
for header, value in headers.items():
response[header] = value
return response | Render a HTTP response.
:param request: A django.http.HttpRequest instance.
:param template: A string describing the path to a template.
:param status: An integer describing the HTTP status code to respond with.
:param context: A dictionary describing variables to populate the template with.
:param headers: A dictionary describing HTTP headers.
:param prefix_template_path: A boolean describing whether to prefix the template with the view's template path.
Please note that ``template`` must not specify an extension, as one will be appended
according to the request format. For example, a value of ``blog/posts/index``
would populate ``blog/posts/index.html`` for requests that query the resource's
HTML representation.
If no template that matches the request format exists at the given location, or if ``template`` is ``None``,
Respite will attempt to serialize the template context automatically. You can change the way your models
are serialized by defining ``serialize`` methods that return a dictionary::
class NuclearMissile(models.Model):
serial_number = models.IntegerField()
is_armed = models.BooleanField()
launch_code = models.IntegerField()
def serialize(self):
return {
'serial_number': self.serial_number,
'is_armed': self.is_armed
}
If the request format is not supported by the view (as determined by the ``supported_formats``
property or a specific view's ``override_supported_formats`` decorator), this function will
yield HTTP 406 Not Acceptable. |
def rename(self, new_name, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 5 and self.write_concern:
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
sock_info.command('admin', cmd, parse_write_concern_error=True) | Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. |
def get_idx_by_name(self, name):
# type: (str) -> Optional[int]
""" get_idx_by_name returns the index of a matching registered header
This implementation will prefer returning a static entry index whenever
possible. If multiple matching header name are found in the static
table, there is insurance that the first entry (lowest index number)
will be returned.
If no matching header is found, this method returns None.
"""
name = name.lower()
for key, val in six.iteritems(type(self)._static_entries):
if val.name() == name:
return key
for idx, val in enumerate(self._dynamic_table):
if val.name() == name:
return type(self)._static_entries_last_idx + idx + 1
return None | get_idx_by_name returns the index of a matching registered header
This implementation will prefer returning a static entry index whenever
possible. If multiple matching header name are found in the static
table, there is insurance that the first entry (lowest index number)
will be returned.
If no matching header is found, this method returns None. |
def get_hub():
"""Return the instance of the hub."""
try:
hub = _local.hub
except AttributeError:
# The Hub can only be instantiated from the root fiber. No other fibers
# can run until the Hub is there, so the root will always be the first
# one to call get_hub().
assert fibers.current().parent is None
hub = _local.hub = Hub()
return hub | Return the instance of the hub. |
def check_title_match(expected_title, pa11y_results, logger):
"""
Check if Scrapy reports any issue with the HTML <title> element.
If so, compare that <title> element to the title that we got in the
A11yItem. If they don't match, something is screwy, and pa11y isn't
parsing the page that we expect.
"""
if not pa11y_results:
# no output from pa11y, nothing to check.
return
title_errs = [err for err in pa11y_results
if err["context"].startswith("<title")]
for err in title_errs:
title_elmt = html.fragment_fromstring(err["context"])
# pa11ycrawler will elide the title, so grab whatever true
# content we can from the output
elided_title = title_elmt.text.strip()
if elided_title.endswith("..."):
pa11y_title = elided_title[0:-4].strip()
else:
pa11y_title = elided_title
# check that they match -- the elided version should be a substring
# of the full version
if pa11y_title not in expected_title:
# whoa, something's screwy!
msg = (
u'Parser mismatch! '
u'Scrapy saw full title "{scrapy_title}", '
u'Pa11y saw elided title "{elided_title}".'
).format(
scrapy_title=expected_title, elided_title=elided_title,
)
logger.error(msg) | Check if Scrapy reports any issue with the HTML <title> element.
If so, compare that <title> element to the title that we got in the
A11yItem. If they don't match, something is screwy, and pa11y isn't
parsing the page that we expect. |
def get_pids_in_revision_chain(client, did):
"""Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned.
"""
def _req(p):
return d1_common.xml.get_req_val(p)
def _opt(p, a):
return d1_common.xml.get_opt_val(p, a)
sysmeta_pyxb = client.getSystemMetadata(did)
# Walk to tail
while _opt(sysmeta_pyxb, 'obsoletes'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletes'))
chain_pid_list = [_req(sysmeta_pyxb.identifier)]
# Walk from tail to head, recording traversed PIDs
while _opt(sysmeta_pyxb, 'obsoletedBy'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletedBy'))
chain_pid_list.append(_req(sysmeta_pyxb.identifier))
return chain_pid_list | Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned. |
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Extracts relevant install history entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key.
"""
for entry in top_level:
datetime_value = entry.get('date', None)
package_identifiers = entry.get('packageIdentifiers', [])
if not datetime_value or not package_identifiers:
continue
display_name = entry.get('displayName', '<UNKNOWN>')
display_version = entry.get('displayVersion', '<DISPLAY_VERSION>')
process_name = entry.get('processName', '<PROCESS_NAME>')
package_identifiers = ', '.join(package_identifiers)
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: '
'{3:s}.').format(
display_name, display_version, process_name, package_identifiers)
event_data.key = ''
event_data.root = '/item'
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant install history entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key. |
def get_wake_on_modem():
'''
Displays whether 'wake on modem' is on or off if supported
:return: A string value representing the "wake on modem" settings
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' power.get_wake_on_modem
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -getwakeonmodem')
return salt.utils.mac_utils.validate_enabled(
salt.utils.mac_utils.parse_return(ret)) == 'on' | Displays whether 'wake on modem' is on or off if supported
:return: A string value representing the "wake on modem" settings
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' power.get_wake_on_modem |
def show_csrs():
'''
Show certificate requests for this API key
CLI Example:
.. code-block:: bash
salt-run digicert.show_csrs
'''
data = salt.utils.http.query(
'{0}/certificaterequests'.format(_base_url()),
status=True,
decode=True,
decode_type='json',
header_dict={
'tppl-api-key': _api_key(),
},
)
status = data['status']
if six.text_type(status).startswith('4') or six.text_type(status).startswith('5'):
raise CommandExecutionError(
'There was an API error: {0}'.format(data['error'])
)
return data.get('dict', {}) | Show certificate requests for this API key
CLI Example:
.. code-block:: bash
salt-run digicert.show_csrs |
def check_instance(function):
""" Wrapper that tests the type of _session.
Purpose: This decorator function is used by all functions within
| the Jaide class that interact with a device to ensure the
| proper session type is in use. If it is not, it will
| attempt to migrate _session to that type before moving
| to the originally requested function.
| > **NOTE:** This function is a decorator, and should not be
| > used directly. All other methods in this class that touch
| > the Junos device are wrapped by this function to ensure the
| > proper connection type is used.
@param function: the function that is being wrapped around
@type function: function
@returns: the originally requested function
@rtype: function
"""
def wrapper(self, *args, **kwargs):
func_trans = {
"commit": manager.Manager,
"compare_config": manager.Manager,
"commit_check": manager.Manager,
"device_info": manager.Manager,
"diff_config": manager.Manager,
"health_check": manager.Manager,
"interface_errors": manager.Manager,
"op_cmd": paramiko.client.SSHClient,
"shell_cmd": paramiko.client.SSHClient,
"scp_pull": paramiko.client.SSHClient,
"scp_push": paramiko.client.SSHClient
}
# when doing an operational command, logging in as root
# brings you to shell, so we need to enter the device as a shell
# connection, and move to cli to perform the command
# this is a one-off because the isinstance() check will be bypassed
if self.username == "root" and function.__name__ == "op_cmd":
if not self._session:
self.conn_type = "paramiko"
self.connect()
if not self._shell:
self.conn_type = "root"
self.connect()
self.shell_to_cli() # check if we're in the cli
# Have to call shell command separately, since we are using _shell
# for comparison, not _session.
elif function.__name__ == 'shell_cmd':
if not self._shell:
self.conn_type = "shell"
self.connect()
self.cli_to_shell() # check if we're in shell.
if isinstance(self._session, func_trans[function.__name__]):
# If they're doing SCP, we have to check for both _session and
# _scp
if function.__name__ in ['scp_pull', 'scp_push']:
if not isinstance(self._scp, SCPClient):
self.conn_type = "scp"
self.connect()
else:
self.disconnect()
if function.__name__ == "op_cmd":
self.conn_type = "paramiko"
elif function.__name__ in ["scp_pull", "scp_push"]:
self.conn_type = "scp"
else:
self.conn_type = "ncclient"
self.connect()
return function(self, *args, **kwargs)
return wrapper | Wrapper that tests the type of _session.
Purpose: This decorator function is used by all functions within
| the Jaide class that interact with a device to ensure the
| proper session type is in use. If it is not, it will
| attempt to migrate _session to that type before moving
| to the originally requested function.
| > **NOTE:** This function is a decorator, and should not be
| > used directly. All other methods in this class that touch
| > the Junos device are wrapped by this function to ensure the
| > proper connection type is used.
@param function: the function that is being wrapped around
@type function: function
@returns: the originally requested function
@rtype: function |
def _append_callback_id(ids, obj, callback_id):
'''
Helper function adding a callback ID to the IDs dict.
The callback ids dict maps an object to event callback ids.
:param ids: dict of callback IDs to update
:param obj: one of the keys of REGISTER_FUNCTIONS
:param callback_id: the result of _register_callback
'''
if obj not in ids:
ids[obj] = []
ids[obj].append(callback_id) | Helper function adding a callback ID to the IDs dict.
The callback ids dict maps an object to event callback ids.
:param ids: dict of callback IDs to update
:param obj: one of the keys of REGISTER_FUNCTIONS
:param callback_id: the result of _register_callback |
def requires_submit(func):
"""
Decorator to ensure that a submit has been performed before
calling the method.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
if self._future is None:
raise JobError("Job not submitted yet!. You have to .submit() first!")
return func(self, *args, **kwargs)
return _wrapper | Decorator to ensure that a submit has been performed before
calling the method.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function. |
def make_polynomial(degree=3, n_samples=100, bias=0.0, noise=0.0,
return_coefs=False, random_state=None):
"""
Generate a noisy polynomial for a regression problem
Examples
--------
>>> X, y, coefs = make_polynomial(degree=3, n_samples=200, noise=.5,
... return_coefs=True, random_state=1)
"""
generator = check_random_state(random_state)
# TODO: Add arguments to support other priors
coefs = generator.randn(degree + 1)
pows = np.arange(degree + 1)
poly = np.vectorize(lambda x: np.sum(coefs * x ** pows))
X, y = make_regression(poly, n_samples=n_samples, bias=bias, noise=noise,
random_state=random_state)
if return_coefs:
return X, y, coefs
return X, y | Generate a noisy polynomial for a regression problem
Examples
--------
>>> X, y, coefs = make_polynomial(degree=3, n_samples=200, noise=.5,
... return_coefs=True, random_state=1) |
def add_method_model(self,
func, # type: Callable
name=None, # type: Optional[str]
description=None, # type: Optional[str]
owner=None, # type: object
):
# type: (...) -> MethodModel
"""Register a function to be added to the block"""
if name is None:
name = func.__name__
method = MethodModel.from_callable(func, description)
self._add_field(owner, name, method, func)
return method | Register a function to be added to the block |
def _filter_vcf(out_file):
"""Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference.
"""
in_file = out_file.replace(".vcf", "-ori.vcf")
FILTER_line = ('##FILTER=<ID=SBIAS,Description="Due to bias">\n'
'##FILTER=<ID=5BP,Description="Due to 5BP">\n'
'##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n')
SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="somatic event">\n'
if not utils.file_exists(in_file):
shutil.move(out_file, in_file)
with file_transaction(out_file) as tx_out_file:
with open(in_file) as in_handle, open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("##normalSample="):
normal_name = line.strip().split("=")[1]
if line.startswith("##patient_id="):
tumor_name = line.strip().split("=")[1]
if line.startswith("#CHROM"):
line = line.replace("Normal", normal_name)
line = line.replace("Tumour", tumor_name)
if line.startswith("##INFO=<ID=FS"):
line = line.replace("ID=FS", "ID=RNT")
if line.find("FS=") > -1:
line = line.replace("FS=", "RNT=")
if "5BP" in line:
line = sub("5BP[0-9]+", "5BP", line)
if line.find("PASS") == -1:
line = _set_reject(line)
if line.find("PASS") > - 1 and line.find("SOMATIC") == -1:
line = _set_reject(line)
if not _has_ambiguous_ref_allele(line):
out_handle.write(line)
if line.startswith("##FILTER") and FILTER_line:
out_handle.write("%s" % FILTER_line)
FILTER_line = ""
if line.startswith("##INFO") and SOMATIC_line:
out_handle.write("%s" % SOMATIC_line)
SOMATIC_line = ""
return out_file | Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference. |
def check_parameter_similarity(files_dict):
"""
Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input.
"""
try:
parameter_names = files_dict.itervalues().next().keys() # get the parameter names of the first file, to check if these are the same in the other files
except AttributeError: # if there is no parameter at all
if any(i is not None for i in files_dict.itervalues()): # check if there is also no parameter for the other files
return False
else:
return True
if any(parameter_names != i.keys() for i in files_dict.itervalues()):
return False
return True | Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input. |
def get_sitetree(self, alias):
"""Gets site tree items from the given site tree.
Caches result to dictionary.
Returns (tree alias, tree items) tuple.
:param str|unicode alias:
:rtype: tuple
"""
cache_ = self.cache
get_cache_entry = cache_.get_entry
set_cache_entry = cache_.set_entry
caching_required = False
if not self.current_app_is_admin():
# We do not need i18n for a tree rendered in Admin dropdown.
alias = self.resolve_tree_i18n_alias(alias)
sitetree = get_cache_entry('sitetrees', alias)
if not sitetree:
if DYNAMIC_ONLY:
sitetree = []
else:
sitetree = (
MODEL_TREE_ITEM_CLASS.objects.
select_related('parent', 'tree').
prefetch_related('access_permissions__content_type').
filter(tree__alias__exact=alias).
order_by('parent__sort_order', 'sort_order'))
sitetree = self.attach_dynamic_tree_items(alias, sitetree)
set_cache_entry('sitetrees', alias, sitetree)
caching_required = True
parents = get_cache_entry('parents', alias)
if not parents:
parents = defaultdict(list)
for item in sitetree:
parent = getattr(item, 'parent')
parents[parent].append(item)
set_cache_entry('parents', alias, parents)
# Prepare items by ids cache if needed.
if caching_required:
# We need this extra pass to avoid future problems on items depth calculation.
cache_update = cache_.update_entry_value
for item in sitetree:
cache_update('items_by_ids', alias, {item.id: item})
url = self.url
calculate_item_depth = self.calculate_item_depth
for item in sitetree:
if caching_required:
item.has_children = False
if not hasattr(item, 'depth'):
item.depth = calculate_item_depth(alias, item.id)
item.depth_range = range(item.depth)
# Resolve item permissions.
if item.access_restricted:
permissions_src = (
item.permissions if getattr(item, 'is_dynamic', False)
else item.access_permissions.all())
item.perms = set(
['%s.%s' % (perm.content_type.app_label, perm.codename) for perm in permissions_src])
# Contextual properties.
item.url_resolved = url(item)
item.title_resolved = LazyTitle(item.title) if VARIABLE_TAG_START in item.title else item.title
item.is_current = False
item.in_current_branch = False
# Get current item for the given sitetree.
self.get_tree_current_item(alias)
# Save sitetree data into cache if needed.
if caching_required:
cache_.save()
return alias, sitetree | Gets site tree items from the given site tree.
Caches result to dictionary.
Returns (tree alias, tree items) tuple.
:param str|unicode alias:
:rtype: tuple |
def isExpandKeyEvent(self, keyEvent):
"""Check if key event should expand rectangular selection"""
return keyEvent.modifiers() & Qt.ShiftModifier and \
keyEvent.modifiers() & Qt.AltModifier and \
keyEvent.key() in (Qt.Key_Left, Qt.Key_Right, Qt.Key_Down, Qt.Key_Up,
Qt.Key_PageUp, Qt.Key_PageDown, Qt.Key_Home, Qt.Key_End) | Check if key event should expand rectangular selection |
def imshow(x, y, z, ax, **kwargs):
"""
Image plot of 2d DataArray using matplotlib.pyplot
Wraps :func:`matplotlib:matplotlib.pyplot.imshow`
While other plot methods require the DataArray to be strictly
two-dimensional, ``imshow`` also accepts a 3D array where some
dimension can be interpreted as RGB or RGBA color channels and
allows this dimension to be specified via the kwarg ``rgb=``.
Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA
data, by applying a single scaling factor and offset to all bands.
Passing ``robust=True`` infers ``vmin`` and ``vmax``
:ref:`in the usual way <robust-plotting>`.
.. note::
This function needs uniformly spaced coordinates to
properly label the axes. Call DataArray.plot() to check.
The pixels are centered on the coordinates values. Ie, if the coordinate
value is 3.2 then the pixels for those coordinates will be centered on 3.2.
"""
if x.ndim != 1 or y.ndim != 1:
raise ValueError('imshow requires 1D coordinates, try using '
'pcolormesh or contour(f)')
# Centering the pixels- Assumes uniform spacing
try:
xstep = (x[1] - x[0]) / 2.0
except IndexError:
# Arbitrary default value, similar to matplotlib behaviour
xstep = .1
try:
ystep = (y[1] - y[0]) / 2.0
except IndexError:
ystep = .1
left, right = x[0] - xstep, x[-1] + xstep
bottom, top = y[-1] + ystep, y[0] - ystep
defaults = {'origin': 'upper',
'interpolation': 'nearest'}
if not hasattr(ax, 'projection'):
# not for cartopy geoaxes
defaults['aspect'] = 'auto'
# Allow user to override these defaults
defaults.update(kwargs)
if defaults['origin'] == 'upper':
defaults['extent'] = [left, right, bottom, top]
else:
defaults['extent'] = [left, right, top, bottom]
if z.ndim == 3:
# matplotlib imshow uses black for missing data, but Xarray makes
# missing data transparent. We therefore add an alpha channel if
# there isn't one, and set it to transparent where data is masked.
if z.shape[-1] == 3:
alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)
if np.issubdtype(z.dtype, np.integer):
alpha *= 255
z = np.ma.concatenate((z, alpha), axis=2)
else:
z = z.copy()
z[np.any(z.mask, axis=-1), -1] = 0
primitive = ax.imshow(z, **defaults)
return primitive | Image plot of 2d DataArray using matplotlib.pyplot
Wraps :func:`matplotlib:matplotlib.pyplot.imshow`
While other plot methods require the DataArray to be strictly
two-dimensional, ``imshow`` also accepts a 3D array where some
dimension can be interpreted as RGB or RGBA color channels and
allows this dimension to be specified via the kwarg ``rgb=``.
Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA
data, by applying a single scaling factor and offset to all bands.
Passing ``robust=True`` infers ``vmin`` and ``vmax``
:ref:`in the usual way <robust-plotting>`.
.. note::
This function needs uniformly spaced coordinates to
properly label the axes. Call DataArray.plot() to check.
The pixels are centered on the coordinates values. Ie, if the coordinate
value is 3.2 then the pixels for those coordinates will be centered on 3.2. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.