code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def gen_centers(self):
"""Set the centre of the Gaussian basis
functions be spaced evenly throughout run time"""
'''x_track = self.cs.discrete_rollout()
t = np.arange(len(x_track))*self.dt
# choose the points in time we'd like centers to be at
c_des = np.linspace(0, self.cs.run_time, self.bfs)
self.c = np.zeros(len(c_des))
for ii, point in enumerate(c_des):
diff = abs(t - point)
self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]'''
# desired spacings along x
# need to be spaced evenly between 1 and exp(-ax)
# lowest number should be only as far as x gets
if self.bfs == 2:
first = 0.5 # MODIFIED to handle a small number of basis (e.g. 3)
last = 0.8
elif self.bfs == 3:
first = 0.4
last = 0.8
else:
first = 0.2
last = 0.8
#first = np.exp(-self.cs.ax*self.cs.run_time)
#last = 1.05 - first
des_c = np.linspace(first,last,self.bfs)
self.c = np.ones(len(des_c))
for n in range(len(des_c)):
# x = exp(-c), solving for c
self.c[n] = -np.log(des_c[n]) | Set the centre of the Gaussian basis
functions be spaced evenly throughout run time |
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer) | Unregister a transformer instance. |
def get_instance(self, payload):
"""
Build an instance of WorkerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
"""
return WorkerInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], ) | Build an instance of WorkerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance |
def assess_content(member,file_filter):
'''Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object.
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in file_filter['assess_content']:
return True
return False | Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object. |
def _validate_isvalid_quantity(self, isvalid_quantity, field, value):
"""Checks for valid given value and appropriate units.
Args:
isvalid_quantity (`bool`): flag from schema indicating quantity to be checked.
field (`str`): property associated with quantity in question.
value (`list`): list whose first element is a string representing a value with units
The rule's arguments are validated against this schema:
{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
quantity = Q_(value[0])
low_lim = 0.0 * units(property_units[field])
try:
if quantity <= low_lim:
self._error(
field, 'value must be greater than 0.0 {}'.format(property_units[field]),
)
except pint.DimensionalityError:
self._error(field, 'incompatible units; should be consistent '
'with ' + property_units[field]
) | Checks for valid given value and appropriate units.
Args:
isvalid_quantity (`bool`): flag from schema indicating quantity to be checked.
field (`str`): property associated with quantity in question.
value (`list`): list whose first element is a string representing a value with units
The rule's arguments are validated against this schema:
{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}} |
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
return self._binary_op(
"rsub", other, axis=axis, level=level, fill_value=fill_value
) | Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied. |
def set_wizard_step_description(self):
"""Set the text for description."""
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
field = self.parent.step_kw_field.selected_fields()
is_raster = is_raster_layer(self.parent.layer)
if is_raster:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_raster
else:
text_label = multiple_classified_hazard_classifications_raster
# noinspection PyAugmentAssignment
text_label = text_label % (
subcategory['name'], self.layer_purpose['name'])
else:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_vector
else:
text_label = multiple_classified_hazard_classifications_vector
# noinspection PyAugmentAssignment
text_label = text_label % (
subcategory['name'], self.layer_purpose['name'], field)
self.multi_classifications_label.setText(text_label) | Set the text for description. |
def create_identity(self, name, attrs=[]):
""" Create an Identity
:param: name identity name
:param: attrs list of dict of attributes (zimsoap format)
:returns: a zobjects.Identity object
"""
params = {
'name': name,
'a': attrs
}
resp = self.request('CreateIdentity', {'identity': params})
return zobjects.Identity.from_dict(resp['identity']) | Create an Identity
:param: name identity name
:param: attrs list of dict of attributes (zimsoap format)
:returns: a zobjects.Identity object |
def _parse_status(self, status):
"""
Parses the status string found in the table and applies the corresponding values.
Parameters
----------
status: :class:`str`
The string containing the status.
"""
if "rented" in status:
self.status = HouseStatus.RENTED
else:
m = list_auction_regex.search(status)
if m:
self.highest_bid = int(m.group('bid'))
if m.group("time_unit") == "day":
self.time_left = datetime.timedelta(days=int(m.group("time_left")))
else:
self.time_left = datetime.timedelta(hours=int(m.group("time_left")))
self.status = HouseStatus.AUCTIONED | Parses the status string found in the table and applies the corresponding values.
Parameters
----------
status: :class:`str`
The string containing the status. |
def verify_weave_options(opt, parser):
"""Parses the CLI options, verifies that they are consistent and
reasonable, and acts on them if they are
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes
parser : object
OptionParser instance.
"""
# PYTHONCOMPILED is initially set in pycbc.__init__
cache_dir = os.environ['PYTHONCOMPILED']
# Check whether to use a fixed directory for weave
if opt.fixed_weave_cache:
if os.environ.get("FIXED_WEAVE_CACHE", None):
cache_dir = os.environ["FIXED_WEAVE_CACHE"]
elif getattr(sys, 'frozen', False):
cache_dir = sys._MEIPASS
else:
cache_dir = os.path.join(os.getcwd(),"pycbc_inspiral")
os.environ['PYTHONCOMPILED'] = cache_dir
logging.debug("fixed_weave_cache: Setting weave cache to %s", cache_dir)
sys.path = [cache_dir] + sys.path
try: os.makedirs(cache_dir)
except OSError: pass
if not os.environ.get("LAL_DATA_PATH", None):
os.environ['LAL_DATA_PATH'] = cache_dir
# Check whether to use a private directory for weave
if opt.per_process_weave_cache:
cache_dir = os.path.join(cache_dir, str(os.getpid()))
os.environ['PYTHONCOMPILED'] = cache_dir
logging.info("Setting weave cache to %s", cache_dir)
if not os.path.exists(cache_dir):
try:
os.makedirs(cache_dir)
except:
logging.error("Unable to create weave cache %s", cache_dir)
sys.exit(1)
if opt.clear_weave_cache_at_start:
_clear_weave_cache()
os.makedirs(cache_dir)
if opt.clear_weave_cache_at_end:
atexit.register(_clear_weave_cache)
signal.signal(signal.SIGTERM, _clear_weave_cache) | Parses the CLI options, verifies that they are consistent and
reasonable, and acts on them if they are
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes
parser : object
OptionParser instance. |
def read(self):
"""Read a line of data from the input source at a time."""
line = self.trace_file.readline()
if line == '':
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get('timestamp', None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00" | Read a line of data from the input source at a time. |
def write_contents(self, filename, contents, directory=None):
""" write_contents: Write contents to filename in zip
Args:
contents: (str) contents of file
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip
"""
filepath = "{}/{}".format(directory.rstrip("/"), filename) if directory else filename
self._write_to_zipfile(filepath, contents)
return filepath | write_contents: Write contents to filename in zip
Args:
contents: (str) contents of file
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip |
def gcpool(name, start, room, lenout=_default_len_out):
"""
Return the character value of a kernel variable from the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gcpool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:param start: Which component to start retrieving for name.
:type start: int
:param room: The largest number of values to return.
:type room: int
:param lenout: The length of the output string.
:type lenout: int
:return: Values associated with name.
:rtype: list of str
"""
name = stypes.stringToCharP(name)
start = ctypes.c_int(start)
room = ctypes.c_int(room)
lenout = ctypes.c_int(lenout)
n = ctypes.c_int()
cvals = stypes.emptyCharArray(lenout, room)
found = ctypes.c_int()
libspice.gcpool_c(name, start, room, lenout, ctypes.byref(n),
ctypes.byref(cvals), ctypes.byref(found))
return [stypes.toPythonString(x.value) for x in
cvals[0:n.value]], bool(found.value) | Return the character value of a kernel variable from the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gcpool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:param start: Which component to start retrieving for name.
:type start: int
:param room: The largest number of values to return.
:type room: int
:param lenout: The length of the output string.
:type lenout: int
:return: Values associated with name.
:rtype: list of str |
def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1):
"""Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes."""
if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):
raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, "
"16, 18, 42 are supported.")
_validate_pianoroll(pianoroll)
def _drum_pattern_mask(res, tol):
"""Return a drum pattern mask with the given tolerance."""
if res == 24:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 4)
elif res == 12:
drum_pattern_mask = np.tile([1., tol, tol], 4)
elif res == 6:
drum_pattern_mask = np.tile([1., tol, tol], 2)
elif res == 18:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 3)
elif res == 9:
drum_pattern_mask = np.tile([1., tol, tol], 3)
elif res == 16:
drum_pattern_mask = np.tile([1., tol, 0., tol], 4)
elif res == 8:
drum_pattern_mask = np.tile([1., tol], 4)
elif res == 4:
drum_pattern_mask = np.tile([1., tol], 2)
return drum_pattern_mask
drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)
n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))
return n_in_pattern / np.count_nonzero(pianoroll) | Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes. |
def find_data(folder):
""" Include everything in the folder """
for (path, directories, filenames) in os.walk(folder):
for filename in filenames:
yield os.path.join('..', path, filename) | Include everything in the folder |
def instantiate(self, **extra_args):
""" Instantiate the model """
input_block = self.input_block.instantiate()
policy_backbone = self.policy_backbone.instantiate(**extra_args)
value_backbone = self.value_backbone.instantiate(**extra_args)
return StochasticPolicyModelSeparate(input_block, policy_backbone, value_backbone, extra_args['action_space']) | Instantiate the model |
def invite_by_email(self, email, sender=None, request=None, **kwargs):
"""Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
# TODO break out user creation process
if "username" in inspect.getargspec(
self.user_model.objects.create_user
).args:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
else:
user = self.user_model.objects.create(
email=email, password=self.user_model.objects.make_random_password()
)
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user | Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users. |
def in_a_while(days=0, seconds=0, microseconds=0, milliseconds=0,
minutes=0, hours=0, weeks=0, time_format=TIME_FORMAT):
"""
:param days:
:param seconds:
:param microseconds:
:param milliseconds:
:param minutes:
:param hours:
:param weeks:
:param time_format:
:return: Formatet string
"""
if not time_format:
time_format = TIME_FORMAT
return time_in_a_while(days, seconds, microseconds, milliseconds,
minutes, hours, weeks).strftime(time_format) | :param days:
:param seconds:
:param microseconds:
:param milliseconds:
:param minutes:
:param hours:
:param weeks:
:param time_format:
:return: Formatet string |
def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None,
iam_principal_permissions=None):
"""
Update a safe deposit box.
Keyword arguments:
owner (string) -- AD group that owns the safe deposit box
description (string) -- Description of the safe deposit box
user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id
iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn
and role_id
"""
# Grab current data
old_data = self.get_sdb_by_id(sdb_id)
# Assemble information to update
temp_data = {}
keys = ('owner', 'description', 'iam_principal_permissions', 'user_group_permissions')
for k in keys:
if k in old_data:
temp_data[k] = old_data[k]
if owner is not None:
temp_data["owner"] = owner
if description is not None:
temp_data["description"] = description
if user_group_permissions is not None and len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if iam_principal_permissions is not None and len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = put_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, data=str(data),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json() | Update a safe deposit box.
Keyword arguments:
owner (string) -- AD group that owns the safe deposit box
description (string) -- Description of the safe deposit box
user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id
iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn
and role_id |
def _to_EC_KEY(self):
"""
Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected.
"""
key = self._lib.EC_KEY_new_by_curve_name(self._nid)
return _ffi.gc(key, _lib.EC_KEY_free) | Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected. |
def _display_matches_gnu_readline(self, substitution: str, matches: List[str],
longest_match_length: int) -> None: # pragma: no cover
"""Prints a match list using GNU readline's rl_display_match_list()
This exists to print self.display_matches if it has data. Otherwise matches prints.
:param substitution: the substitution written to the command line
:param matches: the tab completion matches to display
:param longest_match_length: longest printed length of the matches
"""
if rl_type == RlType.GNU:
# Check if we should show display_matches
if self.display_matches:
matches_to_display = self.display_matches
# Recalculate longest_match_length for display_matches
longest_match_length = 0
for cur_match in matches_to_display:
cur_length = utils.ansi_safe_wcswidth(cur_match)
if cur_length > longest_match_length:
longest_match_length = cur_length
else:
matches_to_display = matches
# Add padding for visual appeal
matches_to_display, padding_length = self._pad_matches_to_display(matches_to_display)
longest_match_length += padding_length
# We will use readline's display function (rl_display_match_list()), so we
# need to encode our string as bytes to place in a C array.
encoded_substitution = bytes(substitution, encoding='utf-8')
encoded_matches = [bytes(cur_match, encoding='utf-8') for cur_match in matches_to_display]
# rl_display_match_list() expects matches to be in argv format where
# substitution is the first element, followed by the matches, and then a NULL.
# noinspection PyCallingNonCallable,PyTypeChecker
strings_array = (ctypes.c_char_p * (1 + len(encoded_matches) + 1))()
# Copy in the encoded strings and add a NULL to the end
strings_array[0] = encoded_substitution
strings_array[1:-1] = encoded_matches
strings_array[-1] = None
# Print the header if one exists
if self.completion_header:
sys.stdout.write('\n' + self.completion_header)
# Call readline's display function
# rl_display_match_list(strings_array, number of completion matches, longest match length)
readline_lib.rl_display_match_list(strings_array, len(encoded_matches), longest_match_length)
# Redraw prompt and input line
rl_force_redisplay() | Prints a match list using GNU readline's rl_display_match_list()
This exists to print self.display_matches if it has data. Otherwise matches prints.
:param substitution: the substitution written to the command line
:param matches: the tab completion matches to display
:param longest_match_length: longest printed length of the matches |
def get(self, request, customer_uuid):
"""
Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
"""
context = self._build_context(request, customer_uuid)
manage_learners_form = ManageLearnersForm(
user=request.user,
enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER]
)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context) | Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse |
def get_upstream_fork_point(self):
"""Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None
"""
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None | Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None |
def create_presenter(self, request, target_route):
""" Create presenter from the given requests and target routes
:param request: client request
:param target_route: route to use
:return: WWebPresenter
"""
presenter_name = target_route.presenter_name()
if self.presenter_collection().has(presenter_name) is False:
raise RuntimeError('No such presenter: %s' % presenter_name)
presenter_class = self.presenter_collection().presenter(presenter_name)
return self.presenter_factory().instantiate(presenter_class, request, target_route, self) | Create presenter from the given requests and target routes
:param request: client request
:param target_route: route to use
:return: WWebPresenter |
def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python2.7',
aws_environment_variables=None,
aws_kms_key_arn=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not aws_environment_variables:
aws_environment_variables = {}
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get("Variables", {})
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
}
)
resource_arn = response['FunctionArn']
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn | Given an existing function ARN, update the configuration variables. |
def cache_values(self, results):
"""
loads into DebugSession cache
"""
if results is None:
# self.fn was probably only used to compute side effects.
return
elif isinstance(results,np.ndarray):
# fn returns single np.ndarray.
# re-format it into a list
results=[results]
# check validity of fn output
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError('Number of output tensors does not match number of outputs produced by function')
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs)))
results=[results]
# assign each element in ndarrays to corresponding output tensor
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) | loads into DebugSession cache |
def upload_media(self, filename, progress=None):
"""Upload a file to be hosted on the target BMC
This will upload the specified data to
the BMC so that it will make it available to the system as an emulated
USB device.
:param filename: The filename to use, the basename of the parameter
will be given to the bmc.
:param filename: Optional callback for progress updates
"""
self.oem_init()
return self._oem.upload_media(filename, progress) | Upload a file to be hosted on the target BMC
This will upload the specified data to
the BMC so that it will make it available to the system as an emulated
USB device.
:param filename: The filename to use, the basename of the parameter
will be given to the bmc.
:param filename: Optional callback for progress updates |
def lfsr_next_one_seed(seed_iter, min_value_shift):
"""High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state.
"""
try:
seed = seed_iter.next()
except StopIteration:
return 0xFFFFFFFF
else:
if seed is None:
return 0xFFFFFFFF
else:
seed = int(seed) & 0xFFFFFFFF
working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF
min_value = 1 << min_value_shift
if working_seed < min_value:
working_seed = (seed << 24) & 0xFFFFFFFF
if working_seed < min_value:
working_seed ^= 0xFFFFFFFF
return working_seed | High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state. |
def _condition_number(self):
"""Condition number of x; ratio of largest to smallest eigenvalue."""
ev = np.linalg.eig(np.matmul(self.xwins.swapaxes(1, 2), self.xwins))[0]
return np.sqrt(ev.max(axis=1) / ev.min(axis=1)) | Condition number of x; ratio of largest to smallest eigenvalue. |
def index_worker_output(self, worker_name, md5, index_name, subfield):
""" Index worker output with the Indexer.
Args:
worker_name: 'strings', 'pe_features', whatever
md5: the md5 of the sample
index_name: the name of the index
subfield: index just this subfield (None for all)
Returns:
Nothing
"""
# Grab the data
if subfield:
data = self.work_request(worker_name, md5)[worker_name][subfield]
else:
data = self.work_request(worker_name, md5)[worker_name]
# Okay now index the data
self.indexer.index_data(data, index_name=index_name, doc_type='unknown') | Index worker output with the Indexer.
Args:
worker_name: 'strings', 'pe_features', whatever
md5: the md5 of the sample
index_name: the name of the index
subfield: index just this subfield (None for all)
Returns:
Nothing |
def _get_orb_lobster(orb):
"""
Args:
orb: string representation of orbital
Returns:
Orbital
"""
orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz",
"f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital
except AttributeError:
print("Orb not in list") | Args:
orb: string representation of orbital
Returns:
Orbital |
def config2(self):
"""Read the second set of configuration variables and return as a dictionary.
**NOTE: This method is supported by firmware v18+.**
:rtype: dictionary
:Example:
>>> a.config2()
{
'AMFanOnIdle': 0,
'AMIdleIntervalCount': 0,
'AMMaxDataArraysInFile': 61798,
'AMSamplingInterval': 1,
'AMOnlySavePMData': 0,
'AMLaserOnIdle': 0
}
"""
config = []
data = {}
# Send the command byte and sleep for 10 ms
self.cnxn.xfer([0x3D])
sleep(10e-3)
# Read the config variables by sending 256 empty bytes
for i in range(9):
resp = self.cnxn.xfer([0x00])[0]
config.append(resp)
data["AMSamplingInterval"] = self._16bit_unsigned(config[0], config[1])
data["AMIdleIntervalCount"] = self._16bit_unsigned(config[2], config[3])
data['AMFanOnIdle'] = config[4]
data['AMLaserOnIdle'] = config[5]
data['AMMaxDataArraysInFile'] = self._16bit_unsigned(config[6], config[7])
data['AMOnlySavePMData'] = config[8]
sleep(0.1)
return data | Read the second set of configuration variables and return as a dictionary.
**NOTE: This method is supported by firmware v18+.**
:rtype: dictionary
:Example:
>>> a.config2()
{
'AMFanOnIdle': 0,
'AMIdleIntervalCount': 0,
'AMMaxDataArraysInFile': 61798,
'AMSamplingInterval': 1,
'AMOnlySavePMData': 0,
'AMLaserOnIdle': 0
} |
def _execute_xmpp(connected_callback):
"""Connects to the XMPP server and executes custom code
:param connected_callback: function to execute after connecting
:return: return value of the callback
"""
from indico_chat.plugin import ChatPlugin
check_config()
jid = ChatPlugin.settings.get('bot_jid')
password = ChatPlugin.settings.get('bot_password')
if '@' not in jid:
jid = '{}@{}'.format(jid, ChatPlugin.settings.get('server'))
result = [None, None] # result, exception
app = current_app._get_current_object() # callback runs in another thread
def _session_start(event):
try:
with app.app_context():
result[0] = connected_callback(xmpp)
except Exception as e:
result[1] = e
if isinstance(e, IqError):
current_plugin.logger.exception('XMPP callback failed: %s', e.condition)
else:
current_plugin.logger.exception('XMPP callback failed')
finally:
xmpp.disconnect(wait=0)
xmpp = ClientXMPP(jid, password)
xmpp.register_plugin('xep_0045')
xmpp.register_plugin('xep_0004')
xmpp.register_plugin('xep_0030')
xmpp.add_event_handler('session_start', _session_start)
try:
xmpp.connect()
except Exception:
current_plugin.logger.exception('XMPP connection failed')
xmpp.disconnect()
raise
try:
xmpp.process(threaded=False)
finally:
xmpp.disconnect(wait=0)
if result[1] is not None:
raise result[1]
return result[0] | Connects to the XMPP server and executes custom code
:param connected_callback: function to execute after connecting
:return: return value of the callback |
def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
) | Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation. |
def offset(self, offset):
"""
Move all the intervals in the list by the given ``offset``.
:param offset: the shift to be applied
:type offset: :class:`~aeneas.exacttiming.TimeValue`
:raises TypeError: if ``offset`` is not an instance of ``TimeValue``
"""
self.log(u"Applying offset to all fragments...")
self.log([u" Offset %.3f", offset])
for fragment in self.fragments:
fragment.interval.offset(
offset=offset,
allow_negative=False,
min_begin_value=self.begin,
max_end_value=self.end
)
self.log(u"Applying offset to all fragments... done") | Move all the intervals in the list by the given ``offset``.
:param offset: the shift to be applied
:type offset: :class:`~aeneas.exacttiming.TimeValue`
:raises TypeError: if ``offset`` is not an instance of ``TimeValue`` |
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'container':
#: Only update what's needed
self.proxy.update_points(change)
else:
super(MapPolyline, self)._update_proxy(change) | An observer which sends the state change to the proxy. |
def save(self, filename, compressed=True):
""" Save a tensor to disk. """
# check for data
if not self.has_data:
return False
# read ext and save accordingly
_, file_ext = os.path.splitext(filename)
if compressed:
if file_ext != COMPRESSED_TENSOR_EXT:
raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT))
np.savez_compressed(filename,
self.data[:self.cur_index,...])
else:
if file_ext != TENSOR_EXT:
raise ValueError('Can only save tensor with .npy extension')
np.save(filename, self.data[:self.cur_index,...])
return True | Save a tensor to disk. |
def _process(self, word: str) -> List[str]:
"""
Process a word into a list of strings representing the syllables of the word. This
method describes rules for consonant grouping behaviors and then iteratively applies those
rules the list of letters that comprise the word, until all the letters are grouped into
appropriate syllable groups.
:param word:
:return:
"""
# if a blank arrives from splitting, just return an empty list
if len(word.strip()) == 0:
return []
word = self.convert_consonantal_i(word)
my_word = " " + word + " "
letters = list(my_word)
positions = []
for dipth in self.diphthongs:
if dipth in my_word:
dipth_matcher = re.compile("{}".format(dipth))
matches = dipth_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
matches = self.kw_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
letters = string_utils.merge_next(letters, positions)
letters = string_utils.remove_blanks(letters)
positions.clear()
if not self._contains_vowels("".join(letters)):
return ["".join(letters).strip()] # occurs when only 'qu' appears by ellision
positions = self._starting_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_right(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._starting_consonants_only(letters)
positions = self._ending_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_left(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._ending_consonants_only(letters)
positions = self._find_solo_consonant(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_solo_consonant(letters)
positions = self._find_consonant_cluster(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_consonant_cluster(letters)
return letters | Process a word into a list of strings representing the syllables of the word. This
method describes rules for consonant grouping behaviors and then iteratively applies those
rules the list of letters that comprise the word, until all the letters are grouped into
appropriate syllable groups.
:param word:
:return: |
def detect_keep_boundary(start, end, namespaces):
"""a helper to inspect a link and see if we should keep the link boundary
"""
result_start, result_end = False, False
parent_start = start.getparent()
parent_end = end.getparent()
if parent_start.tag == "{%s}p" % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_start = len(parent_start.getchildren()) > 1
if parent_end.tag == "{%s}p" % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_end = len(parent_end.getchildren()) > 1
return result_start, result_end | a helper to inspect a link and see if we should keep the link boundary |
def get_from(input_file, property_names):
'''
Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples.
'''
# get feature collections
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = [tuple([feat['properties'].get(x)
for x in property_names]) for feat in features]
return values | Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples. |
def itemconfigure(self, iid, rectangle_options, text_options):
"""
Configure options of items drawn on the Canvas
Low-level access to the individual elements of markers and other
items drawn on the timeline Canvas. All modifications are
overwritten when the TimeLine is redrawn.
"""
rectangle_id, text_id = self._markers[iid]["rectangle_id"], self._markers[iid]["text_id"]
if len(rectangle_options) != 0:
self._timeline.itemconfigure(rectangle_id, **rectangle_options)
if len(text_options) != 0:
self._timeline.itemconfigure(text_id, **text_options) | Configure options of items drawn on the Canvas
Low-level access to the individual elements of markers and other
items drawn on the timeline Canvas. All modifications are
overwritten when the TimeLine is redrawn. |
def import_pyfiles(path):
"""
Import all *.py files in specified directory.
"""
n = 0
for pyfile in glob.glob(os.path.join(path, '*.py')):
m = import_file(pyfile)
IMPORTED_BUILD_SOURCES.append(m)
n += 1
return n | Import all *.py files in specified directory. |
def dependencies(self):
"""
Read the contents of the rpm itself
:return:
"""
cpio = self.rpm.gzip_file.read()
content = cpio.read()
return [] | Read the contents of the rpm itself
:return: |
def set_axis_options(self, row, column, text):
"""Set additionnal options as plain text."""
subplot = self.get_subplot_at(row, column)
subplot.set_axis_options(text) | Set additionnal options as plain text. |
def get_stack_info(self, stack):
""" Get the template and parameters of the stack currently in AWS
Returns [ template, parameters ]
"""
stack_name = stack['StackId']
try:
template = self.cloudformation.get_template(
StackName=stack_name)['TemplateBody']
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
parameters = self.params_as_dict(stack.get('Parameters', []))
return [json.dumps(template), parameters] | Get the template and parameters of the stack currently in AWS
Returns [ template, parameters ] |
def top(self, sort_by):
"""Get the best results according to your custom sort method."""
sort = sorted(self.results, key=sort_by)
return sort | Get the best results according to your custom sort method. |
def get_slot_nio_bindings(self, slot_number):
"""
Returns slot NIO bindings.
:param slot_number: slot number
:returns: list of NIO bindings
"""
nio_bindings = yield from self._hypervisor.send('vm slot_nio_bindings "{name}" {slot_number}'.format(name=self._name,
slot_number=slot_number))
return nio_bindings | Returns slot NIO bindings.
:param slot_number: slot number
:returns: list of NIO bindings |
def imagetransformer_b12l_4h_b256_uncond_dr03_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams | works very well on 4x4. |
def from_p12_keyfile(cls, service_account_email, filename,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
with open(filename, 'rb') as file_obj:
private_key_pkcs12 = file_obj.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri) | Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library. |
def schedule(self, duration, at=None, delay=None, callback=None):
"""
schedules the measurement (to execute asynchronously).
:param duration: how long to run for.
:param at: the time to start at.
:param delay: the time to wait til starting (use at or delay).
:param callback: a callback.
:return: nothing.
"""
delay = self.calculateDelay(at, delay)
self.callback = callback
logger.info('Initiating measurement ' + self.name + ' for ' + str(duration) + 's in ' + str(delay) + 's')
self.statuses.append({'name': ScheduledMeasurementStatus.SCHEDULED.name, 'time': datetime.utcnow()})
threading.Timer(delay, self.execute, [duration]).start() | schedules the measurement (to execute asynchronously).
:param duration: how long to run for.
:param at: the time to start at.
:param delay: the time to wait til starting (use at or delay).
:param callback: a callback.
:return: nothing. |
def get_rotated(self, angle):
""" Return a vector rotated by angle from the given vector. Angle measured in radians counter-clockwise. """
result = self.copy()
result.rotate(angle)
return result | Return a vector rotated by angle from the given vector. Angle measured in radians counter-clockwise. |
def _do_ffts(detector, stream, Nc):
"""
Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int
"""
min_fftlen = int(stream[0][0].data.shape[0] +
detector.data[0].shape[0] - Nc)
fftlen = scipy.fftpack.next_fast_len(min_fftlen)
mplen = stream[0][0].data.shape[0]
ulen = detector.data[0].shape[0]
num_st_fd = [np.fft.rfft(tr.data, n=fftlen)
for tr in stream[0]]
denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen)
for tr in stream[0]]
# Frequency domain of boxcar
w = np.fft.rfft(np.ones(detector.data[0].shape[0]),
n=fftlen)
# This should go into the detector object as in Detex
detector_fd = []
for dat_mat in detector.data:
detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen)
for col in dat_mat.T]))
return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen | Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int |
def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],
'subseqs' : ['metal_2_5D', 'metal_3D']},
'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',
'surface_3D']},
'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']},
'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']}}
# Do combination counts for all types of subsequences
for suffix, info in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum
return protein_df | Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together |
def make_plot(
self, count, plot=None, show=False, plottype='probability',
bar=dict(alpha=0.15, color='b', linewidth=1.0, edgecolor='b'),
errorbar=dict(fmt='b.'),
gaussian=dict(ls='--', c='r')
):
""" Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified.
"""
if numpy.ndim(count) != 1:
raise ValueError('count must have dimension 1')
if plot is None:
import matplotlib.pyplot as plot
if len(count) == len(self.midpoints) + 2:
norm = numpy.sum(count)
data = numpy.asarray(count[1:-1]) / norm
elif len(count) != len(self.midpoints):
raise ValueError(
'wrong data length: %s != %s'
% (len(count), len(self.midpoints))
)
else:
data = numpy.asarray(count)
if plottype == 'cumulative':
data = numpy.cumsum(data)
data = numpy.array([0.] + data.tolist())
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.bins, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.fill_between(self.bins, 0, data_mean, **bar)
# mean, +- 1 sigma lines
plot.plot([self.bins[0], self.bins[-1]], [0.5, 0.5], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.158655254, 0.158655254], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.841344746, 0.841344746], 'k:')
else:
if plottype == 'density':
data = data / self.widths
if errorbar is not None:
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.midpoints, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.bar(self.bins[:-1], mean(data), width=self.widths, align='edge', **bar)
if gaussian is not None and self.g is not None:
# spline goes through the errorbar points for gaussian stats
if plottype == 'cumulative':
x = numpy.array(self.bins.tolist() + self.midpoints.tolist())
x.sort()
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
yspline = cspline.CSpline(x, y)
plot.ylabel('cumulative probability')
plot.ylim(0, 1.0)
elif plottype in ['density', 'probability']:
x = self.bins
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
x = self.midpoints
y = (y[1:] - y[:-1])
if plottype == 'density':
y /= self.widths
plot.ylabel('probability density')
else:
plot.ylabel('probability')
yspline = cspline.CSpline(x, y)
else:
raise ValueError('unknown plottype: ' + str(plottype))
if len(x) < 100:
ny = int(100. / len(x) + 0.5) * len(x)
else:
ny = len(x)
xplot = numpy.linspace(x[0], x[-1], ny)
plot.plot(xplot, yspline(xplot), **gaussian)
if show:
plot.show()
return plot | Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified. |
def type_id(self):
"""
Shortcut to retrieving the ContentType id of the model.
"""
try:
return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id
except DatabaseError as e:
raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e))) | Shortcut to retrieving the ContentType id of the model. |
def send_email(Source=None, Destination=None, Message=None, ReplyToAddresses=None, ReturnPath=None, SourceArn=None, ReturnPathArn=None, Tags=None, ConfigurationSetName=None):
"""
Composes an email message based on input data, and then immediately queues the message for sending.
There are several important points to know about SendEmail :
See also: AWS API Documentation
Examples
The following example sends a formatted email:
Expected Output:
:example: response = client.send_email(
Source='string',
Destination={
'ToAddresses': [
'string',
],
'CcAddresses': [
'string',
],
'BccAddresses': [
'string',
]
},
Message={
'Subject': {
'Data': 'string',
'Charset': 'string'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'string'
},
'Html': {
'Data': 'string',
'Charset': 'string'
}
}
},
ReplyToAddresses=[
'string',
],
ReturnPath='string',
SourceArn='string',
ReturnPathArn='string',
Tags=[
{
'Name': 'string',
'Value': 'string'
},
],
ConfigurationSetName='string'
)
:type Source: string
:param Source: [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
:type Destination: dict
:param Destination: [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
:type Message: dict
:param Message: [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
:type ReplyToAddresses: list
:param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
:type ReturnPath: string
:param ReturnPath: The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
:type SourceArn: string
:param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from user@example.com , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be user@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type ReturnPathArn: string
:param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use feedback@example.com , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be feedback@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type Tags: list
:param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
:type ConfigurationSetName: string
:param ConfigurationSetName: The name of the configuration set to use when you send an email using SendEmail .
:rtype: dict
:return: {
'MessageId': 'string'
}
:returns:
Source (string) -- [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
Destination (dict) -- [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
Message (dict) -- [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
ReturnPath (string) -- The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from user@example.com , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be user@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use feedback@example.com , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be feedback@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
Tags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendEmail .
"""
pass | Composes an email message based on input data, and then immediately queues the message for sending.
There are several important points to know about SendEmail :
See also: AWS API Documentation
Examples
The following example sends a formatted email:
Expected Output:
:example: response = client.send_email(
Source='string',
Destination={
'ToAddresses': [
'string',
],
'CcAddresses': [
'string',
],
'BccAddresses': [
'string',
]
},
Message={
'Subject': {
'Data': 'string',
'Charset': 'string'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'string'
},
'Html': {
'Data': 'string',
'Charset': 'string'
}
}
},
ReplyToAddresses=[
'string',
],
ReturnPath='string',
SourceArn='string',
ReturnPathArn='string',
Tags=[
{
'Name': 'string',
'Value': 'string'
},
],
ConfigurationSetName='string'
)
:type Source: string
:param Source: [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
:type Destination: dict
:param Destination: [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
:type Message: dict
:param Message: [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
:type ReplyToAddresses: list
:param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
:type ReturnPath: string
:param ReturnPath: The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
:type SourceArn: string
:param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from user@example.com , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be user@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type ReturnPathArn: string
:param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use feedback@example.com , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be feedback@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type Tags: list
:param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
:type ConfigurationSetName: string
:param ConfigurationSetName: The name of the configuration set to use when you send an email using SendEmail .
:rtype: dict
:return: {
'MessageId': 'string'
}
:returns:
Source (string) -- [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
Destination (dict) -- [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
Message (dict) -- [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
ReturnPath (string) -- The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from user@example.com , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be user@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use feedback@example.com , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be feedback@example.com .
For more information about sending authorization, see the Amazon SES Developer Guide .
Tags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendEmail . |
def collect_static_files(src_map, dst):
"""
Collect all static files and move them into a temporary location.
This is very similar to the ``collectstatic`` command.
"""
for rel_src, abs_src in src_map.iteritems():
abs_dst = os.path.join(dst, rel_src)
copy_file(abs_src, abs_dst) | Collect all static files and move them into a temporary location.
This is very similar to the ``collectstatic`` command. |
def dbus_readBytesTwoFDs(self, fd1, fd2, byte_count):
"""
Reads byte_count from fd1 and fd2. Returns concatenation.
"""
result = bytearray()
for fd in (fd1, fd2):
f = os.fdopen(fd, 'rb')
result.extend(f.read(byte_count))
f.close()
return result | Reads byte_count from fd1 and fd2. Returns concatenation. |
def knock_out(self):
"""Knockout gene by marking it as non-functional and setting all
associated reactions bounds to zero.
The change is reverted upon exit if executed within the model as
context.
"""
self.functional = False
for reaction in self.reactions:
if not reaction.functional:
reaction.bounds = (0, 0) | Knockout gene by marking it as non-functional and setting all
associated reactions bounds to zero.
The change is reverted upon exit if executed within the model as
context. |
def replace_url_query_values(url, replace_vals):
"""Replace querystring values in a url string.
>>> url = 'http://helloworld.com/some/path?test=5'
>>> replace_vals = {'test': 10}
>>> replace_url_query_values(url=url, replace_vals=replace_vals)
'http://helloworld.com/some/path?test=10'
"""
if '?' not in url:
return url
parsed_url = urlparse(url)
query = dict(parse_qsl(parsed_url.query))
query.update(replace_vals)
return '{0}?{1}'.format(url.split('?')[0], urlencode(query)) | Replace querystring values in a url string.
>>> url = 'http://helloworld.com/some/path?test=5'
>>> replace_vals = {'test': 10}
>>> replace_url_query_values(url=url, replace_vals=replace_vals)
'http://helloworld.com/some/path?test=10' |
def get_answers(self):
"""
Get answers either from inner_hits already present or by searching
elasticsearch.
"""
if 'inner_hits' in self.meta and 'answer' in self.meta.inner_hits:
return self.meta.inner_hits.answer.hits
return list(self.search_answers()) | Get answers either from inner_hits already present or by searching
elasticsearch. |
def dump_to_store(dataset, store, writer=None, encoder=None,
encoding=None, unlimited_dims=None):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer,
unlimited_dims=unlimited_dims) | Store dataset contents to a backends.*DataStore object. |
def fastqtransform(transform, fastq1, fastq2, fastq3, fastq4, keep_fastq_tags,
separate_cb, demuxed_cb, cores, fastq1out, fastq2out,
min_length):
''' Transform input reads to the tagcounts compatible read layout using
regular expressions as defined in a transform file. Outputs new format to
stdout.
'''
transform = json.load(open(transform))
options = _infer_transform_options(transform)
read_template = '{name}'
logger.info("Transforming %s." % fastq1)
if options.dual_index:
logger.info("Detected dual cellular indexes.")
if separate_cb:
read_template += ':CELL_{CB1}-{CB2}'
else:
read_template += ':CELL_{CB}'
elif options.triple_index:
logger.info("Detected triple cellular indexes.")
if separate_cb:
read_template += ':CELL_{CB1}-{CB2}-{CB3}'
else:
read_template += ':CELL_{CB}'
elif options.CB or demuxed_cb:
logger.info("Detected cellular barcodes.")
read_template += ':CELL_{CB}'
if options.MB:
logger.info("Detected UMI.")
read_template += ':UMI_{MB}'
if options.SB:
logger.info("Detected sample.")
read_template += ':SAMPLE_{SB}'
read_template += "{readnum}"
if keep_fastq_tags:
read_template += ' {fastqtag}'
read_template += '\n{seq}\n+\n{qual}\n'
paired = fastq1out and fastq2out
read1_regex = re.compile(transform['read1'])
read2_regex = re.compile(transform['read2']) if fastq2 else None
read3_regex = re.compile(transform['read3']) if fastq3 else None
read4_regex = re.compile(transform['read4']) if fastq4 else None
fastq_file1 = read_fastq(fastq1)
fastq_file2 = read_fastq(fastq2)
fastq_file3 = read_fastq(fastq3)
fastq_file4 = read_fastq(fastq4)
transform = partial(transformer, read1_regex=read1_regex,
read2_regex=read2_regex, read3_regex=read3_regex,
read4_regex=read4_regex, paired=paired)
fastq1out_fh = write_fastq(fastq1out)
fastq2out_fh = write_fastq(fastq2out)
p = multiprocessing.Pool(cores)
try :
zzip = itertools.izip
except AttributeError:
zzip = zip
chunks = tz.partition_all(10000, zzip(fastq_file1, fastq_file2, fastq_file3,
fastq_file4))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(transform, list(bigchunk)):
if paired:
for read1_dict, read2_dict in tz.partition(2, chunk):
if options.dual_index:
if not separate_cb:
read1_dict['CB'] = read1_dict['CB1'] + read1_dict['CB2']
read2_dict['CB'] = read2_dict['CB1'] + read2_dict['CB2']
if demuxed_cb:
read1_dict['CB'] = demuxed_cb
read2_dict['CB'] = demuxed_cb
# Deal with spaces in read names
if keep_fastq_tags:
name, tag = read1_dict['name'].split(' ')
read1_dict['name'] = name
read1_dict['fastqtag'] = tag
name, tag = read2_dict['name'].split(' ')
read2_dict['name'] = name
read2_dict['fastqtag'] = tag
else:
read1_dict['name'] = read1_dict['name'].partition(' ')[0]
read2_dict['name'] = read2_dict['name'].partition(' ')[0]
read1_dict = _extract_readnum(read1_dict)
read2_dict = _extract_readnum(read2_dict)
tooshort = (len(read1_dict['seq']) < min_length or
len(read2_dict['seq']) < min_length)
if not tooshort:
fastq1out_fh.write(read_template.format(**read1_dict))
fastq2out_fh.write(read_template.format(**read2_dict))
else:
for read1_dict in chunk:
if options.dual_index:
if not separate_cb:
read1_dict['CB'] = read1_dict['CB1'] + read1_dict['CB2']
if demuxed_cb:
read1_dict['CB'] = demuxed_cb
# Deal with spaces in read names
if keep_fastq_tags:
name, tag = read1_dict['name'].split(' ')
read1_dict['name'] = name
read1_dict['fastqtag'] = tag
else:
read1_dict['name'] = read1_dict['name'].partition(' ')[0]
read1_dict = _extract_readnum(read1_dict)
if len(read1_dict['seq']) >= min_length:
if fastq1out_fh:
fastq1out_fh.write(read_template.format(**read1_dict))
else:
sys.stdout.write(read_template.format(**read1_dict)) | Transform input reads to the tagcounts compatible read layout using
regular expressions as defined in a transform file. Outputs new format to
stdout. |
def load_creditscoring2(cost_mat_parameters=None):
"""Load and return the credit scoring PAKDD 2009 competition dataset (classification).
The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset.
Parameters
----------
cost_mat_parameters : Dictionary-like object, optional (default=None)
If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd'
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'cost_mat', the cost matrix of each example,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the full description of the dataset.
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring",
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50
>>> from costcla.datasets import load_creditscoring2
>>> data = load_creditscoring2()
>>> data.target[[10, 17, 50]]
array([1, 0, 0])
>>> data.cost_mat[[10, 17, 50]]
array([[ 209. , 547.965, 0. , 0. ],
[ 24. , 274.725, 0. , 0. ],
[ 89. , 371.25 , 0. , 0. ]])
"""
module_path = dirname(__file__)
raw_data = pd.read_csv(join(module_path, 'data', 'creditscoring2.csv.gz'), delimiter='\t', compression='gzip')
descr = open(join(module_path, 'descr', 'creditscoring2.rst')).read()
# Exclude TARGET_LABEL_BAD=1 == 'N'
raw_data = raw_data.loc[raw_data['TARGET_LABEL_BAD=1'] != 'N']
# Exclude 100<PERSONAL_NET_INCOME<10000
raw_data = raw_data.loc[(raw_data['PERSONAL_NET_INCOME'].values.astype(np.float) > 100)]
raw_data = raw_data.loc[(raw_data['PERSONAL_NET_INCOME'].values.astype(np.float) < 10000)]
target = raw_data['TARGET_LABEL_BAD=1'].values.astype(np.int)
# Continuous features
cols_con = ['ID_SHOP', 'AGE', 'AREA_CODE_RESIDENCIAL_PHONE', 'PAYMENT_DAY', 'SHOP_RANK',
'MONTHS_IN_RESIDENCE', 'MONTHS_IN_THE_JOB', 'PROFESSION_CODE', 'MATE_INCOME',
'QUANT_ADDITIONAL_CARDS_IN_THE_APPLICATION', 'PERSONAL_NET_INCOME']
data = raw_data[cols_con].astype(float)
cols_dummies = ['SEX', 'MARITAL_STATUS', 'FLAG_RESIDENCIAL_PHONE', 'RESIDENCE_TYPE',
'FLAG_MOTHERS_NAME', 'FLAG_FATHERS_NAME', 'FLAG_RESIDENCE_TOWN_eq_WORKING_TOWN',
'FLAG_RESIDENCE_STATE_eq_WORKING_STATE', 'FLAG_RESIDENCIAL_ADDRESS_eq_POSTAL_ADDRESS']
for col_ in cols_dummies:
temp_ = pd.get_dummies(raw_data[col_], prefix=col_)
data = data.join(temp_)
# Calculate cost_mat (see[1])
if cost_mat_parameters is None:
cost_mat_parameters = {'int_r': 0.63 / 12,
'int_cf': 0.165 / 12,
'cl_max': 25000 * 0.33,
'n_term': 24,
'k': 3,
'lgd': .75}
n_samples = data.shape[0]
pi_1 = target.mean()
monthly_income = data['PERSONAL_NET_INCOME'].values * 0.33
cost_mat = _creditscoring_costmat(monthly_income, np.zeros(n_samples), pi_1, cost_mat_parameters)
return Bunch(data=data.values, target=target, cost_mat=cost_mat,
target_names=['no', 'yes'], DESCR=descr,
feature_names=data.columns.values, name='CreditScoring_PAKDD2009') | Load and return the credit scoring PAKDD 2009 competition dataset (classification).
The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset.
Parameters
----------
cost_mat_parameters : Dictionary-like object, optional (default=None)
If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd'
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'cost_mat', the cost matrix of each example,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the full description of the dataset.
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring",
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50
>>> from costcla.datasets import load_creditscoring2
>>> data = load_creditscoring2()
>>> data.target[[10, 17, 50]]
array([1, 0, 0])
>>> data.cost_mat[[10, 17, 50]]
array([[ 209. , 547.965, 0. , 0. ],
[ 24. , 274.725, 0. , 0. ],
[ 89. , 371.25 , 0. , 0. ]]) |
def get_template_loader_for_path(self, path, use_cache=True):
'''
Returns a template loader object for the given directory path.
For example, get_template_loader('/var/mytemplates/') will return
a loader for that specific directory.
Normally, you should not have to call this method. Django automatically
adds request.dmp.render() and request.dmp.render_to_string() on each
request.
This method is useful when you want a custom template loader for a specific
directory that may be outside your project directory or that is otherwise
not contained in a normal Django app. If the directory is inside an app,
call get_template_loader() instead.
Unless use_cache=False, this method caches template loaders in the DMP
cache for later use.
'''
# get from the cache if we are able
if use_cache:
try:
return self.template_loaders[path]
except KeyError:
pass # not there, so we'll create
# create the loader
loader = MakoTemplateLoader(path, None)
# cache if we are allowed
if use_cache:
self.template_loaders[path] = loader
# return
return loader | Returns a template loader object for the given directory path.
For example, get_template_loader('/var/mytemplates/') will return
a loader for that specific directory.
Normally, you should not have to call this method. Django automatically
adds request.dmp.render() and request.dmp.render_to_string() on each
request.
This method is useful when you want a custom template loader for a specific
directory that may be outside your project directory or that is otherwise
not contained in a normal Django app. If the directory is inside an app,
call get_template_loader() instead.
Unless use_cache=False, this method caches template loaders in the DMP
cache for later use. |
def demonize(self):
"""
do the double fork magic
"""
# check if a process is already running
if access(self.pid_file_name, F_OK):
# read the pid file
pid = self.read_pid()
try:
kill(pid, 0) # check if process is running
self.stderr.write("process is already running\n")
return False
except OSError as e:
if e.errno == errno.ESRCH:
# process is dead
self.delete_pid(force_del=True)
else:
self.stderr.write("demonize failed, something went wrong: %d (%s)\n" % (e.errno, e.strerror))
return False
try:
pid = fork()
if pid > 0:
# Exit from the first parent
timeout = time() + 60
while self.read_pid() is None:
self.stderr.write("waiting for pid..\n")
sleep(0.5)
if time() > timeout:
break
self.stderr.write("pid is %d\n" % self.read_pid())
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 1. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
# os.chdir("/")
setsid()
umask(0)
# Do the Second fork
try:
pid = fork()
if pid > 0:
# Exit from the second parent
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 2. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors
# sys.stdout.flush()
# sys.stderr.flush()
# si = file(self.stdin, 'r')
# so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+',
# os.dup2(si.fileno(), sys.stdin.fileno())
# os.dup2(so.fileno(), sys.stdout.fileno())
# os.dup2(se.fileno(), sys.stderr.fileno())
# Write the PID file
#atexit.register(self.delete_pid)
self.write_pid()
return True | do the double fork magic |
def generateAPIRootBody(self):
'''
Generates the root library api file's body text. The method calls
:func:`~exhale.graph.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`~exhale.graph.ExhaleRoot.generateViewHierarchies` followed by
:func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI` to generate both
hierarchies as well as the full API listing. As a result, three files will now
be ready:
1. ``self.class_hierarchy_file``
2. ``self.file_hierarchy_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it.
'''
try:
self.gerrymanderNodeFilenames()
self.generateViewHierarchies()
self.generateUnabridgedAPI()
with codecs.open(self.full_root_file_path, "a", "utf-8") as generated_index:
# Include the class and file hierarchies
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.class_hierarchy_file)
))
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.file_hierarchy_file)
))
# Add the afterHierarchyDescription if provided
if configs.afterHierarchyDescription:
generated_index.write(
"\n{0}\n\n".format(configs.afterHierarchyDescription)
)
# Include the unabridged API
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.unabridged_api_file)
))
# Add the afterBodySummary if provided
if configs.afterBodySummary:
generated_index.write(
"\n{0}\n\n".format(configs.afterBodySummary)
)
# The following should only be applied to the page library root page
# Applying it to other pages will result in an error
if self.use_tree_view and configs.treeViewIsBootstrap:
generated_index.write(textwrap.dedent('''
.. raw:: html
<script type="text/javascript">
/* NOTE: if you are reading this, Exhale generated this directly. */
$(document).ready(function() {{
/* Inspired by very informative answer to get color of links:
https://stackoverflow.com/a/2707837/3814202 */
var $fake_link = $('<a href="#"></a>').hide().appendTo("body");
var linkColor = $fake_link.css("color");
$fake_link.remove();
var $fake_p = $('<p class="{icon_mimic}"></p>').hide().appendTo("body");
var iconColor = $fake_p.css("color");
$fake_p.remove();
/* After much deliberation, using JavaScript directly to enforce that the
* link and glyphicon receive different colors is fruitless, because the
* bootstrap treeview library will overwrite the style every time. Instead,
* leaning on the library code itself to append some styling to the head,
* I choose to mix a couple of things:
*
* 1. Set the `color` property of bootstrap treeview globally, this would
* normally affect the color of both the link text and the icon.
* 2. Apply custom forced styling of the glyphicon itself in order to make
* it a little more clear to the user (via different colors) that the
* act of clicking the icon and the act of clicking the link text perform
* different actions. The icon expands, the text navigates to the page.
*/
// Part 1: use linkColor as a parameter to bootstrap treeview
// apply the class view hierarchy
$("#{class_idx}").treeview({{
data: {class_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// apply the file view hierarchy
$("#{file_idx}").treeview({{
data: {file_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// Part 2: override the style of the glyphicons by injecting some CSS
$('<style type="text/css" id="exhaleTreeviewOverride">' +
' .treeview span[class~=icon] {{ ' +
' color: ' + iconColor + ' ! important;' +
' }}' +
'</style>').appendTo('head');
}});
</script>
'''.format(
icon_mimic=configs.treeViewBootstrapIconMimicColor,
class_idx=configs._class_hierarchy_id,
class_func_name=configs._bstrap_class_hierarchy_fn_data_name,
file_idx=configs._file_hierarchy_id,
file_func_name=configs._bstrap_file_hierarchy_fn_data_name,
show_tags="true" if configs.treeViewBootstrapUseBadgeTags else "false",
collapse_icon=configs.treeViewBootstrapCollapseIcon,
expand_icon=configs.treeViewBootstrapExpandIcon,
levels=configs.treeViewBootstrapLevels,
onhover_color=configs.treeViewBootstrapOnhoverColor
)))
except:
utils.fancyError(
"Unable to create the root api body: [{0}]".format(self.full_root_file_path)
) | Generates the root library api file's body text. The method calls
:func:`~exhale.graph.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`~exhale.graph.ExhaleRoot.generateViewHierarchies` followed by
:func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI` to generate both
hierarchies as well as the full API listing. As a result, three files will now
be ready:
1. ``self.class_hierarchy_file``
2. ``self.file_hierarchy_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it. |
async def handler(event):
"""#docs or #ref query: Like #search but shows the query."""
q1 = event.pattern_match.group(1)
q2 = urllib.parse.quote(q1)
await asyncio.wait([
event.delete(),
event.respond(DOCS.format(q1, q2), reply_to=event.reply_to_msg_id)
]) | #docs or #ref query: Like #search but shows the query. |
def _ipopo_setup_callback(cls, context):
# type: (type, FactoryContext) -> None
"""
Sets up the class _callback dictionary
:param cls: The class to handle
:param context: The factory class context
"""
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.callbacks is not None:
callbacks = context.callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for _, func in functions:
if not hasattr(func, constants.IPOPO_METHOD_CALLBACKS):
# No attribute, get the next member
continue
method_callbacks = getattr(func, constants.IPOPO_METHOD_CALLBACKS)
if not isinstance(method_callbacks, list):
# Invalid content
_logger.warning(
"Invalid callback information %s in %s",
constants.IPOPO_METHOD_CALLBACKS,
get_method_description(func),
)
continue
# Keeping it allows inheritance : by removing it, only the first
# child will see the attribute -> Don't remove it
# Store the call backs
for _callback in method_callbacks:
if _callback in callbacks and not is_from_parent(
cls, callbacks[_callback].__name__, callbacks[_callback]
):
_logger.warning(
"Redefining the callback %s in class '%s'.\n"
"\tPrevious callback : %s\n"
"\tNew callback : %s",
_callback,
cls.__name__,
get_method_description(callbacks[_callback]),
get_method_description(func),
)
callbacks[_callback] = func
# Update the factory context
context.callbacks.clear()
context.callbacks.update(callbacks) | Sets up the class _callback dictionary
:param cls: The class to handle
:param context: The factory class context |
def cookie_name_check(cookie_name):
""" Check cookie name for validity. Return True if name is valid
:param cookie_name: name to check
:return: bool
"""
cookie_match = WHTTPCookie.cookie_name_non_compliance_re.match(cookie_name.encode('us-ascii'))
return len(cookie_name) > 0 and cookie_match is None | Check cookie name for validity. Return True if name is valid
:param cookie_name: name to check
:return: bool |
def check_solution(self, tx_context, flags=None, traceback_f=None):
"""
tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check
"""
for t in self.puzzle_and_solution_iterator(tx_context, flags=flags, traceback_f=traceback_f):
puzzle_script, solution_stack, flags, sighash_f = t
vm = self.VM(puzzle_script, tx_context, sighash_f, flags=flags, initial_stack=solution_stack[:])
vm.is_solution_script = False
vm.traceback_f = traceback_f
stack = vm.eval_script()
if len(stack) == 0 or not vm.bool_from_script_bytes(stack[-1]):
raise self.ScriptError("eval false", errno.EVAL_FALSE)
if flags & VERIFY_CLEANSTACK and len(stack) != 1:
raise self.ScriptError("stack not clean after evaluation", errno.CLEANSTACK) | tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check |
def add_positional_embedding_nd(x, max_length, name=None):
"""Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x.
"""
with tf.name_scope("add_positional_embedding_nd"):
x_shape = common_layers.shape_list(x)
num_dims = len(x_shape) - 2
depth = x_shape[-1]
base_shape = [1] * (num_dims + 1) + [depth]
base_start = [0] * (num_dims + 2)
base_size = [-1] + [1] * num_dims + [depth]
for i in range(num_dims):
shape = base_shape[:]
start = base_start[:]
size = base_size[:]
shape[i + 1] = max_length
size[i + 1] = x_shape[i + 1]
var = tf.get_variable(
name + "_%d" % i,
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5))
var = var * depth**0.5
x += tf.slice(var, start, size)
return x | Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x. |
def modify_calendar_resource(self, calres, attrs):
"""
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyCalendarResource', {
'id': self._get_or_fetch_id(
calres, self.get_calendar_resource),
'a': attrs
}) | :param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...}) |
def keypair_from_seed(seed, index=0):
"""
Generates a deterministic keypair from `seed` based on `index`
:param seed: bytes value of seed
:type seed: bytes
:param index: offset from seed
:type index: int
:return: dict of the form: {
'private': private_key
'public': public_key
}
"""
h = blake2b(digest_size=32)
h.update(seed + struct.pack(">L", index))
priv_key = h.digest()
pub_key = private_to_public_key(priv_key)
return {'private': priv_key, 'public': pub_key} | Generates a deterministic keypair from `seed` based on `index`
:param seed: bytes value of seed
:type seed: bytes
:param index: offset from seed
:type index: int
:return: dict of the form: {
'private': private_key
'public': public_key
} |
def add_edge(self, a, b):
"""Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
"""
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a) | Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge |
def query_requests(cls, admin, eager=False):
"""Get all pending group requests."""
# Get direct pending request
if hasattr(admin, 'is_superadmin') and admin.is_superadmin:
q1 = GroupAdmin.query.with_entities(
GroupAdmin.group_id)
else:
q1 = GroupAdmin.query_by_admin(admin).with_entities(
GroupAdmin.group_id)
q2 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q1),
)
# Get request from admin groups your are member of
q3 = Membership.query_by_user(
user=admin, state=MembershipState.ACTIVE
).with_entities(Membership.id_group)
q4 = GroupAdmin.query.filter(
GroupAdmin.admin_type == 'Group', GroupAdmin.admin_id.in_(q3)
).with_entities(GroupAdmin.group_id)
q5 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q4))
query = q2.union(q5)
return query | Get all pending group requests. |
def xrange(self, start, stop=None, step=1):
"""
Get an iterator for this threads chunk of work.
This corresponds to using the OpenMP 'dynamic' schedule.
"""
self._assert_active()
if stop is None:
start, stop = 0, start
with self._queuelock:
pool_loop_reached = max(self._thread_loop_ids)
# Get this loop id.
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
if pool_loop_reached < loop_id:
# No thread reached this loop yet. Set up the queue.
for idx in range(start, stop, step):
self._dynamic_queue.put(idx)
# Iterate.
return _QueueIterator(self._dynamic_queue, loop_id, self) | Get an iterator for this threads chunk of work.
This corresponds to using the OpenMP 'dynamic' schedule. |
def signalize_extensions():
"""DB API 2.0 extension are reported by warnings at run-time."""
warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning)
warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) # TODO
warnings.warn("DB-API extension cursor.connection used", SalesforceWarning)
# not implemented DB-API extension cursor.scroll(, SalesforceWarning)
warnings.warn("DB-API extension cursor.messages used", SalesforceWarning)
warnings.warn("DB-API extension connection.messages used", SalesforceWarning)
warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning)
warnings.warn("DB-API extension .errorhandler used", SalesforceWarning) | DB API 2.0 extension are reported by warnings at run-time. |
def _assert_struct_type(self, struct, name, types, path=None, extra_info=None):
"""Asserts that given structure is of any of given types.
Args:
struct: structure to check
name: displayable name of the checked structure (e.g. "run_foo" for section run_foo)
types: list/tuple of types that are allowed for given struct
path: list with a source file as a first element and previous names
(as in name argument to this method) as other elements
extra_info: extra information to print if error is found (e.g. hint how to fix this)
Raises:
YamlTypeError: if given struct is not of any given type; error message contains
source file and a "path" (e.g. args -> somearg -> flags) specifying
where the problem is
"""
wanted_yaml_typenames = set()
for t in types:
wanted_yaml_typenames.add(self._get_yaml_typename(t))
wanted_yaml_typenames = ' or '.join(wanted_yaml_typenames)
actual_yaml_typename = self._get_yaml_typename(type(struct))
if not isinstance(struct, types):
err = []
if path:
err.append(self._format_error_path(path + [name]))
err.append(' Expected {w} value for "{n}", got value of type {a}: "{v}"'.
format(w=wanted_yaml_typenames,
n=name,
a=actual_yaml_typename,
v=struct))
if extra_info:
err.append('Tip: ' + extra_info)
raise exceptions.YamlTypeError('\n'.join(err)) | Asserts that given structure is of any of given types.
Args:
struct: structure to check
name: displayable name of the checked structure (e.g. "run_foo" for section run_foo)
types: list/tuple of types that are allowed for given struct
path: list with a source file as a first element and previous names
(as in name argument to this method) as other elements
extra_info: extra information to print if error is found (e.g. hint how to fix this)
Raises:
YamlTypeError: if given struct is not of any given type; error message contains
source file and a "path" (e.g. args -> somearg -> flags) specifying
where the problem is |
def transform_field(instance, source_field_name, destination_field_name, transformation):
'''
Does an image transformation on a instance. It will get the image
from the source field attribute of the instnace, then call
the transformation function with that instance, and finally
save that transformed image into the destination field attribute
of the instance.
.. note::
If the source field is blank or the transformation returns
a false value then the destination field image will be deleted, if it
exists.
.. warning::
When the model instance is saved with the new transformed image, it uses
the ``update_fields`` argument for
:py:meth:`~django.db.models.Model.save`, to tell the model to only update
the destination field and, if set in the destination field, the
:py:attr:`~django.db.models.ImageField.height_field` and
:py:attr:`~django.db.models.ImageField.width_field`. This means that
if the saving code for the model sets any other fields, in the saving
field process, it will not save those fields to the database. This would
only happen if you introduce custom logic to the saving process of
destination field, like the dimension fields do, that updates another field
on that module. In that case, when the model is saved for the
transformation, that other field will not be saved to the database.
:param instance: model instance to perform transformations on
:type instance: instance of :py:class:`django.db.models.Model`
:param source_field_name: field name on model to find source image
:type source_field_name: string
:param destination_field_name: field name on model save transformed image to
:type destination_field_name: string
:param transformation: function, such as :py:func:`~.transforms.scale`, that takes an image files and returns a transformed image
:type transformation: function
'''
source_field = getattr(instance, source_field_name)
destination_field = getattr(instance, destination_field_name)
update_fields = [destination_field_name]
transformed_image = get_transformed_image(source_field, transformation)
if transformed_image:
destination_name = os.path.basename(source_field.name)
dimension_field_names = [
destination_field.field.height_field,
destination_field.field.width_field]
update_fields += filter(None, dimension_field_names)
destination_field.save(
destination_name,
transformed_image,
save=False
)
elif destination_field:
destination_field.delete()
else:
return
instance.save(update_fields=update_fields) | Does an image transformation on a instance. It will get the image
from the source field attribute of the instnace, then call
the transformation function with that instance, and finally
save that transformed image into the destination field attribute
of the instance.
.. note::
If the source field is blank or the transformation returns
a false value then the destination field image will be deleted, if it
exists.
.. warning::
When the model instance is saved with the new transformed image, it uses
the ``update_fields`` argument for
:py:meth:`~django.db.models.Model.save`, to tell the model to only update
the destination field and, if set in the destination field, the
:py:attr:`~django.db.models.ImageField.height_field` and
:py:attr:`~django.db.models.ImageField.width_field`. This means that
if the saving code for the model sets any other fields, in the saving
field process, it will not save those fields to the database. This would
only happen if you introduce custom logic to the saving process of
destination field, like the dimension fields do, that updates another field
on that module. In that case, when the model is saved for the
transformation, that other field will not be saved to the database.
:param instance: model instance to perform transformations on
:type instance: instance of :py:class:`django.db.models.Model`
:param source_field_name: field name on model to find source image
:type source_field_name: string
:param destination_field_name: field name on model save transformed image to
:type destination_field_name: string
:param transformation: function, such as :py:func:`~.transforms.scale`, that takes an image files and returns a transformed image
:type transformation: function |
def update(self, size):
"""
Update object size to be showed. This method called while uploading
:param size: Object size to be showed. The object size should be in bytes.
"""
if not isinstance(size, int):
raise ValueError('{} type can not be displayed. '
'Please change it to Int.'.format(type(size)))
self.current_size += size
self.display_queue.put((self.current_size, self.total_length)) | Update object size to be showed. This method called while uploading
:param size: Object size to be showed. The object size should be in bytes. |
def assert_valid_schema(schema: GraphQLSchema) -> None:
"""Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid.
"""
errors = validate_schema(schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid. |
def visit_Expr(self, node: ast.Expr) -> Optional[ast.Expr]:
"""Eliminate no-op constant expressions which are in the tree
as standalone statements."""
if isinstance(
node.value,
(
ast.Constant, # type: ignore
ast.Name,
ast.NameConstant,
ast.Num,
ast.Str,
),
):
return None
return node | Eliminate no-op constant expressions which are in the tree
as standalone statements. |
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
field_mode = 'REPEATED' if field[1] in (1009, 1005, 1007,
1016) else 'NULLABLE'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
s = json.dumps(schema, sort_keys=True).encode('utf-8')
tmp_schema_file_handle.write(s)
return {self.schema_filename: tmp_schema_file_handle} | Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format. |
def visitor_show(self, visitor_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/visitors#get-a-visitor"
api_path = "/api/v2/visitors/{visitor_id}"
api_path = api_path.format(visitor_id=visitor_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/visitors#get-a-visitor |
def cli(self, *args, **kwargs):
"""Defines a CLI function that should be routed by this API"""
kwargs['api'] = self.api
return cli(*args, **kwargs) | Defines a CLI function that should be routed by this API |
def remove(self):
"""
remove this object from Ariane server
:return: null if successfully removed else self
"""
LOGGER.debug("Cluster.remove - " + self.name)
if self.id is None:
return None
else:
params = SessionService.complete_transactional_req({
'name': self.name
})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'deleteCluster'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'delete', 'parameters': params}
response = ClusterService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc != 0:
LOGGER.warning(
'Cluster.remove - Problem while deleting cluster ' + self.name +
'. Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Cluster.remove", ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
return self
else:
return None | remove this object from Ariane server
:return: null if successfully removed else self |
def add_model_string(self, model_str, position=1, file_id=None):
"""Add a kappa model given in a string to the project."""
if file_id is None:
file_id = self.make_unique_id('inlined_input')
ret_data = self.file_create(File.from_string(model_str, position,
file_id))
return ret_data | Add a kappa model given in a string to the project. |
def get_info(self):
'''
:rtype: dictionary
:return: field information
'''
info = {
'name': self.name if self.name else '<no name>',
'path': self.name if self.name else '<no name>',
'field_type': type(self).__name__,
'value': {
'raw': repr(self._current_value),
'rendered': {
'base64': b64encode(self._current_rendered.tobytes()).decode(),
'length_in_bits': len(self._current_rendered),
'length_in_bytes': len(self._current_rendered.tobytes()),
}
},
'mutation': {
'total_number': self._num_mutations,
'current_index': self._current_index,
'mutating': self._mutating(),
'fuzzable': self._fuzzable,
},
}
return info | :rtype: dictionary
:return: field information |
def scale(self, image, size, crop, options):
"""
Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up
option is set to True before calling ``engine_scale``.
:param image:
:param size:
:param crop:
:param options:
:return:
"""
original_size = self.get_image_size(image)
factor = self._calculate_scaling_factor(original_size, size, crop is not None)
if factor < 1 or options['scale_up']:
width = int(original_size[0] * factor)
height = int(original_size[1] * factor)
image = self.engine_scale(image, width, height)
return image | Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up
option is set to True before calling ``engine_scale``.
:param image:
:param size:
:param crop:
:param options:
:return: |
def reindex_all(self, batch_size=1000):
"""
Reindex all the records.
By default, this method use Model.objects.all() but you can implement
a method `get_queryset` in your subclass. This can be used to optimize
the performance (for example with select_related or prefetch_related).
"""
should_keep_synonyms = False
should_keep_rules = False
try:
if not self.settings:
self.settings = self.get_settings()
logger.debug('Got settings for index %s: %s', self.index_name, self.settings)
else:
logger.debug("index %s already has settings: %s", self.index_name, self.settings)
except AlgoliaException as e:
if any("Index does not exist" in arg for arg in e.args):
pass # Expected, let's clear and recreate from scratch
else:
raise e # Unexpected error while getting settings
try:
if self.settings:
replicas = self.settings.get('replicas', None)
slaves = self.settings.get('slaves', None)
should_keep_replicas = replicas is not None
should_keep_slaves = slaves is not None
if should_keep_replicas:
self.settings['replicas'] = []
logger.debug("REMOVE REPLICAS FROM SETTINGS")
if should_keep_slaves:
self.settings['slaves'] = []
logger.debug("REMOVE SLAVES FROM SETTINGS")
self.__tmp_index.wait_task(self.__tmp_index.set_settings(self.settings)['taskID'])
logger.debug('APPLY SETTINGS ON %s_tmp', self.index_name)
rules = []
synonyms = []
for r in self.__index.iter_rules():
rules.append(r)
for s in self.__index.iter_synonyms():
synonyms.append(s)
if len(rules):
logger.debug('Got rules for index %s: %s', self.index_name, rules)
should_keep_rules = True
if len(synonyms):
logger.debug('Got synonyms for index %s: %s', self.index_name, rules)
should_keep_synonyms = True
self.__tmp_index.clear_index()
logger.debug('CLEAR INDEX %s_tmp', self.index_name)
counts = 0
batch = []
if hasattr(self, 'get_queryset'):
qs = self.get_queryset()
else:
qs = self.model.objects.all()
for instance in qs:
if not self._should_index(instance):
continue # should not index
batch.append(self.get_raw_record(instance))
if len(batch) >= batch_size:
self.__tmp_index.save_objects(batch)
logger.info('SAVE %d OBJECTS TO %s_tmp', len(batch),
self.index_name)
batch = []
counts += 1
if len(batch) > 0:
self.__tmp_index.save_objects(batch)
logger.info('SAVE %d OBJECTS TO %s_tmp', len(batch),
self.index_name)
self.__client.move_index(self.__tmp_index.index_name,
self.__index.index_name)
logger.info('MOVE INDEX %s_tmp TO %s', self.index_name,
self.index_name)
if self.settings:
if should_keep_replicas:
self.settings['replicas'] = replicas
logger.debug("RESTORE REPLICAS")
if should_keep_slaves:
self.settings['slaves'] = slaves
logger.debug("RESTORE SLAVES")
if should_keep_replicas or should_keep_slaves:
self.__index.set_settings(self.settings)
if should_keep_rules:
response = self.__index.batch_rules(rules, forward_to_replicas=True)
self.__index.wait_task(response['taskID'])
logger.info("Saved rules for index %s with response: {}".format(response), self.index_name)
if should_keep_synonyms:
response = self.__index.batch_synonyms(synonyms, forward_to_replicas=True)
self.__index.wait_task(response['taskID'])
logger.info("Saved synonyms for index %s with response: {}".format(response), self.index_name)
return counts
except AlgoliaException as e:
if DEBUG:
raise e
else:
logger.warning('ERROR DURING REINDEXING %s: %s', self.model,
e) | Reindex all the records.
By default, this method use Model.objects.all() but you can implement
a method `get_queryset` in your subclass. This can be used to optimize
the performance (for example with select_related or prefetch_related). |
def rbridge_id(self, **kwargs):
"""Configures device's rbridge ID. Setting this property will need
a switch reboot
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `rbridge_id` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.system.rbridge_id(rbridge_id='225')
... output = dev.system.rbridge_id(rbridge_id='225', get=True)
... dev.system.rbridge_id() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
is_get_config = kwargs.pop('get', False)
if not is_get_config:
rbridge_id = kwargs.pop('rbridge_id')
else:
rbridge_id = ''
callback = kwargs.pop('callback', self._callback)
rid_args = dict(rbridge_id=rbridge_id)
rid = getattr(self._rbridge,
'rbridge_id_rbridge_id')
config = rid(**rid_args)
if is_get_config:
return callback(config, handler='get_config')
return callback(config) | Configures device's rbridge ID. Setting this property will need
a switch reboot
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `rbridge_id` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.system.rbridge_id(rbridge_id='225')
... output = dev.system.rbridge_id(rbridge_id='225', get=True)
... dev.system.rbridge_id() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError |
def scrape_args(self, records, executable='raxmlHPC-AVX', partition_files=None,
model=None, outfiles=None, threads=1, parsimony=False, fast_tree=False,
n_starts=1):
"""
Examine a list of records and generate RAxML command line arguments for tree inference.
:param records: list of `Alignment` records
:param executable: name of the RAxML executable on the system to use. Must be in the user's path.
:param partition_files: List of RAxML partition files used to describe any partitioning scheme
to be used (optional)
:param model: Choice of model to use. Defaults to GTRGAMMA for DNA, or PROTGAMMALGX for amino acid alignments.
:param outfiles: A list of output file locations to write results (required length = 1 per alignment)
:param threads: Number of threads for RAxML to use. This is independent of any threading used by the
`JobHandler`, and the user should be sure that their choice is appropriate for the number of threads
available to their system, and for the RAxML executable being used.
:param parsimony: Use RAxML's parsimony tree search only
:param fast_tree: Use RAxML's experimental fast tree search (-f E)
:return: (List of command line arguments, List of created temporary files)
"""
args = []
to_delete = []
if partition_files is None:
partition_files = [None for rec in records]
if outfiles is None:
outfiles = [None for rec in records]
for (rec, qfile, ofile) in zip(records, partition_files, outfiles):
if model is None:
model = 'GTRGAMMA' if rec.is_dna() else 'PROTGAMMALGX'
filename, delete = rec.get_alignment_file(as_phylip=True)
if delete:
to_delete.append(filename)
to_delete.append(filename + '.reduced')
if qfile is None:
# Attempt to find partition file on disk, using extension 'partitions.txt'
if filename.endswith('.phy'):
likely_qfile = filename.replace('phy', 'partitions.txt')
else:
likely_qfile = filename + '.partitions.txt'
if os.path.exists(likely_qfile):
qfile = likely_qfile
else:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmpfile:
qfile = tmpfile.name
to_delete.append(tmpfile.name)
mymodel = 'DNAX' if rec.is_dna() else model.replace('PROT', '').replace('GAMMA', '').replace('CAT', '')
partition_string = '{model}, {name} = 1-{seqlen}\n'.format(
model=mymodel,
name=rec.name, seqlen=len(rec))
tmpfile.write(partition_string)
args.append((executable, filename, model, qfile, ofile, threads, parsimony, fast_tree, n_starts))
return args, to_delete | Examine a list of records and generate RAxML command line arguments for tree inference.
:param records: list of `Alignment` records
:param executable: name of the RAxML executable on the system to use. Must be in the user's path.
:param partition_files: List of RAxML partition files used to describe any partitioning scheme
to be used (optional)
:param model: Choice of model to use. Defaults to GTRGAMMA for DNA, or PROTGAMMALGX for amino acid alignments.
:param outfiles: A list of output file locations to write results (required length = 1 per alignment)
:param threads: Number of threads for RAxML to use. This is independent of any threading used by the
`JobHandler`, and the user should be sure that their choice is appropriate for the number of threads
available to their system, and for the RAxML executable being used.
:param parsimony: Use RAxML's parsimony tree search only
:param fast_tree: Use RAxML's experimental fast tree search (-f E)
:return: (List of command line arguments, List of created temporary files) |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Firefox cache file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
filename = parser_mediator.GetFilename()
if not self._CACHE_FILENAME_RE.match(filename):
raise errors.UnableToParseFile('Not a Firefox cache2 file.')
# The file needs to be at least 36 bytes in size for it to contain
# a cache2 file metadata header and a 4-byte offset that points to its
# location in the file.
file_size = file_object.get_size()
if file_size < 36:
raise errors.UnableToParseFile(
'File size too small for Firefox cache2 file.')
file_offset = self._GetCacheFileMetadataHeaderOffset(file_object)
file_metadata_header_map = self._GetDataTypeMap(
'firefox_cache2_file_metadata_header')
try:
file_metadata_header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_metadata_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse Firefox cache2 file metadata header with error: '
'{0!s}').format(exception))
if not self._ValidateCacheFileMetadataHeader(file_metadata_header):
raise errors.UnableToParseFile('Not a valid Firefox cache2 record.')
url = file_object.read(file_metadata_header.key_size)
header_data = file_object.read()
display_name = parser_mediator.GetDisplayName()
request_method, response_code = self._ParseHTTPHeaders(
header_data[:-4], file_offset, display_name)
event_data = FirefoxCacheEventData()
event_data.fetch_count = file_metadata_header.fetch_count
event_data.frequency = file_metadata_header.frequency
event_data.request_method = request_method
event_data.request_size = file_metadata_header.key_size
event_data.response_code = response_code
event_data.version = self._CACHE_VERSION
event_data.url = url.decode('ascii', errors='replace')
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.last_fetched_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_metadata_header.last_modified_time:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.last_modified_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_metadata_header.expiration_time:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.expiration_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a Firefox cache file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. |
def summary_data_from_transaction_data(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
freq_multiplier=1,
):
"""
Return summary data from transactions.
This transforms a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
to a DataFrame of the form:
customer_id, frequency, recency, T [, monetary_value]
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the columns in the transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: datetime, optional
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
freq_multiplier: int, optional
Default 1, could be use to get exact recency and T, i.e. with freq='W'
row for user id_sample=1 will be recency=30 and T=39 while data in
CDNOW summary are different. Exact values could be obtained with
freq='D' and freq_multiplier=7 which will lead to recency=30.43
and T=38.86
Returns
-------
:obj: DataFrame:
customer_id, frequency, recency, T [, monetary_value]
"""
if observation_period_end is None:
observation_period_end = (
pd.to_datetime(transactions[datetime_col].max(), format=datetime_format).to_period(freq).to_timestamp()
)
else:
observation_period_end = (
pd.to_datetime(observation_period_end, format=datetime_format).to_period(freq).to_timestamp()
)
# label all of the repeated transactions
repeated_transactions = _find_first_transactions(
transactions, customer_id_col, datetime_col, monetary_value_col, datetime_format, observation_period_end, freq
)
# reset datetime_col to timestamp
repeated_transactions[datetime_col] = pd.Index(repeated_transactions[datetime_col]).to_timestamp()
# count all orders by customer.
customers = repeated_transactions.groupby(customer_id_col, sort=False)[datetime_col].agg(["min", "max", "count"])
# subtract 1 from count, as we ignore their first order.
customers["frequency"] = customers["count"] - 1
customers["T"] = (observation_period_end - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
customers["recency"] = (customers["max"] - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
summary_columns = ["frequency", "recency", "T"]
if monetary_value_col:
# create an index of all the first purchases
first_purchases = repeated_transactions[repeated_transactions["first"]].index
# by setting the monetary_value cells of all the first purchases to NaN,
# those values will be excluded from the mean value calculation
repeated_transactions.loc[first_purchases, monetary_value_col] = np.nan
customers["monetary_value"] = (
repeated_transactions.groupby(customer_id_col)[monetary_value_col].mean().fillna(0)
)
summary_columns.append("monetary_value")
return customers[summary_columns].astype(float) | Return summary data from transactions.
This transforms a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
to a DataFrame of the form:
customer_id, frequency, recency, T [, monetary_value]
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the columns in the transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: datetime, optional
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
freq_multiplier: int, optional
Default 1, could be use to get exact recency and T, i.e. with freq='W'
row for user id_sample=1 will be recency=30 and T=39 while data in
CDNOW summary are different. Exact values could be obtained with
freq='D' and freq_multiplier=7 which will lead to recency=30.43
and T=38.86
Returns
-------
:obj: DataFrame:
customer_id, frequency, recency, T [, monetary_value] |
def get(self, sid):
"""
Constructs a ConnectAppContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.connect_app.ConnectAppContext
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppContext
"""
return ConnectAppContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a ConnectAppContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.connect_app.ConnectAppContext
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppContext |
def delete_keys(d: Dict[Any, Any],
keys_to_delete: List[Any],
keys_to_keep: List[Any]) -> None:
"""
Deletes keys from a dictionary, in place.
Args:
d:
dictonary to modify
keys_to_delete:
if any keys are present in this list, they are deleted...
keys_to_keep:
... unless they are present in this list.
"""
for k in keys_to_delete:
if k in d and k not in keys_to_keep:
del d[k] | Deletes keys from a dictionary, in place.
Args:
d:
dictonary to modify
keys_to_delete:
if any keys are present in this list, they are deleted...
keys_to_keep:
... unless they are present in this list. |
def assert_no_selector(self, *args, **kwargs):
"""
Asserts that a given selector is not on the page or a descendant of the current node. Usage
is identical to :meth:`assert_selector`.
Query options such as ``count``, ``minimum``, and ``between`` are considered to be an
integral part of the selector. This will return True, for example, if a page contains 4
anchors but the query expects 5::
page.assert_no_selector("a", minimum=1) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=4) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=5) # Not Found, returns True
Args:
*args: Variable length argument list for :class:`SelectorQuery`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
True
Raises:
ExpectationNotMet: The given selector matched.
"""
query = SelectorQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_selector():
result = query.resolve_for(self)
if result.matches_count and (
len(result) > 0 or expects_none(query.options)):
raise ExpectationNotMet(result.negative_failure_message)
return True
return assert_no_selector() | Asserts that a given selector is not on the page or a descendant of the current node. Usage
is identical to :meth:`assert_selector`.
Query options such as ``count``, ``minimum``, and ``between`` are considered to be an
integral part of the selector. This will return True, for example, if a page contains 4
anchors but the query expects 5::
page.assert_no_selector("a", minimum=1) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=4) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=5) # Not Found, returns True
Args:
*args: Variable length argument list for :class:`SelectorQuery`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
True
Raises:
ExpectationNotMet: The given selector matched. |
def data_filler_user_agent(self, number_of_rows, pipe):
'''creates keys with user agent data
'''
try:
for i in range(number_of_rows):
pipe.hmset('user_agent:%s' % i, {
'id': rnd_id_generator(self),
'ip': self.faker.ipv4(),
'countrycode': self.faker.country_code(),
'useragent': self.faker.user_agent()
})
pipe.execute()
logger.warning('user_agent Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d) | creates keys with user agent data |
def _force_disconnect_action(self, action):
"""Forcibly disconnect a device.
Args:
action (ConnectionAction): the action object describing what we are
forcibly disconnecting
"""
conn_key = action.data['id']
if self._get_connection_state(conn_key) == self.Disconnected:
return
data = self._get_connection(conn_key)
# If there are any operations in progress, cancel them cleanly
if data['state'] == self.Connecting:
callback = data['action'].data['callback']
callback(data['connection_id'], self.id, False, 'Unexpected disconnection')
elif data['state'] == self.Disconnecting:
callback = data['action'].data['callback']
callback(data['connection_id'], self.id, True, None)
elif data['state'] == self.InProgress:
callback = data['action'].data['callback']
if data['microstate'] == 'rpc':
callback(False, 'Unexpected disconnection', 0xFF, None)
elif data['microstate'] == 'open_interface':
callback(False, 'Unexpected disconnection')
elif data['microstate'] == 'close_interface':
callback(False, 'Unexpected disconnection')
connection_id = data['connection_id']
internal_id = data['internal_id']
del self._connections[connection_id]
del self._int_connections[internal_id] | Forcibly disconnect a device.
Args:
action (ConnectionAction): the action object describing what we are
forcibly disconnecting |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.