content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def dummy_annotation_txt_one_segment(tmpdir_factory):
"""Create empty TXT annotations."""
content = ("# MNE-Annotations\n"
"# onset, duration, description\n"
"3.14, 42, AA")
fname = tmpdir_factory.mktemp('data').join('one-annotations.txt')
fname.write(content)
return fname | 30,600 |
def article_title_meets_posting_requirements(website, article_title):
"""
Validates that the article title meets all requirements to post the list to Reddit.
The validations below check if:
(1) The article contains a number
(2) The article title doesn't contain certain pre-defined keywords
(3) The article title is in english (BuzzFeed only)
Returns True if all validations are met. Returns False otherwise.
"""
if website == ArticleType.BuzzFeed:
try:
if not detect(article_title) == 'en':
return False
except lang_detect_exception.LangDetectException:
return False
if get_article_list_count(article_title) == 0:
return False
if any(words in article_title.lower() for words in get_title_exclusion_words(website)):
return False
return True | 30,601 |
def about():
""" returns about page """
counter = json.load(open(r'filter/OXAs_dict/counter.txt'))
ids = [*counter]
r = csv.reader(open(r'Training_data/Training_data_IC.csv'))
df = pd.DataFrame(data=list(r))
svm_table = df.to_html(index=False, header=False)
return render_template('About.html', svm_table=svm_table, oxa_ids=ids) | 30,602 |
async def _snipe_deleted_message_command(ctx: SlashContext) -> None:
"""Snipe a deleted message"""
Logger.log(f"Snipe slash command called in channel ID {ctx.channel.id}")
# There's a deleted message available
if ctx.channel.id in MessageDatabases.deleted_messages:
deleted_message: dict = MessageDatabases.deleted_messages[ctx.channel.id]
sniped_embed: Embed = Embed(
description=deleted_message["message"],
timestamp=deleted_message["creation_timestamp"],
color=CONFIG["embed_color"],
)
sniped_embed.set_author(name=deleted_message["author"])
sniped_embed.set_footer(text=f"posted in #{ctx.channel.name}")
if attachment_url := deleted_message["attachment"]:
# Use markdown for a link if the attachment isn't an image (isn't supported in embed)
if is_image(attachment_url):
sniped_embed.description += (
f"{NEW_LINE_CHAR}[Attachment]({attachment_url})"
)
else:
sniped_embed.set_image(url=attachment_url)
await ctx.send(embed=sniped_embed)
return
# No deleted message is available
await ctx.send(
"I hate to say this but, sadly, there is no deleted "
+ "message to snipe from this channel. Sorry!"
) | 30,603 |
def phys_mem_regions_from_elf(elf: ElfFile, alignment: int) -> List[MemoryRegion]:
"""Determine the physical memory regions for an ELF file with a given
alignment.
The returned region shall be extended (if necessary) so that the start
and end are congruent with the specified alignment (usually a page size).
"""
assert alignment > 0
return [
MemoryRegion(
round_down(segment.phys_addr, alignment),
round_up(segment.phys_addr + len(segment.data), alignment)
)
for segment in elf.segments
] | 30,604 |
def force_float_to_int_in_any_way(x):
"""This force a float to be converted to an int.
Any float is fine. The result is truncated.
Like PHP, if the input float is greater than 2**63, then the result
is 0, even if intmask(int(f)) would return some bits."""
# magic values coming from pypy.rlib.rarithmetic.ovfcheck_float_to_int
# on 64-bit.
if isnan(x):
return -maxint - 1
if -9223372036854776832.0 <= x < 9223372036854775296.0:
x = r_longlong(x)
return intmask(x)
return 0 | 30,605 |
def where(condition : pdarray, A : Union[Union[int,float], pdarray],
B : Union[Union[int,float], pdarray]) -> pdarray:
"""
Returns an array with elements chosen from A and B based upon a
conditioning array. As is the case with numpy.where, the return array
consists of values from the first array (A) where the conditioning array
elements are True and from the second array (B) where the conditioning
array elements are False.
Parameters
----------
condition : pdarray
Used to choose values from A or B
A : scalar or pdarray
Value(s) used when condition is True
B : scalar or pdarray
Value(s) used when condition is False
Returns
-------
pdarray
Values chosen from A where the condition is True and B where
the condition is False
Raises
------
TypeError
Raised if the condition object is not a pdarray, if pdarray dtypes are
not supported or do not match, or if multiple condition clauses (see
Notes section) are applied
ValueError
Raised if the shapes of the condition, A, and B pdarrays are unequal
Examples
--------
>>> a1 = ak.arange(1,10)
>>> a2 = ak.ones(9, dtype=np.int64)
>>> cond = a1 < 5
>>> ak.where(cond,a1,a2)
array([1, 2, 3, 4, 1, 1, 1, 1, 1])
>>> a1 = ak.arange(1,10)
>>> a2 = ak.ones(9, dtype=np.int64)
>>> cond = a1 == 5
>>> ak.where(cond,a1,a2)
array([1, 1, 1, 1, 5, 1, 1, 1, 1])
>>> a1 = ak.arange(1,10)
>>> a2 = 10
>>> cond = a1 < 5
>>> ak.where(cond,a1,a2)
array([1, 2, 3, 4, 10, 10, 10, 10, 10])
Notes
-----
A and B must have the same dtype and only one conditional clause
is supported e.g., n < 5, n > 1, which is supported in numpy
is not currently supported in Arkouda
"""
if isinstance(A, pdarray) and isinstance(B, pdarray):
repMsg = generic_msg("efunc3vv {} {} {} {}".\
format("where",
condition.name,
A.name,
B.name))
# For scalars, try to convert it to the array's dtype
elif isinstance(A, pdarray) and np.isscalar(B):
repMsg = generic_msg("efunc3vs {} {} {} {} {}".\
format("where",
condition.name,
A.name,
A.dtype.name,
A.format_other(B)))
elif isinstance(B, pdarray) and np.isscalar(A):
repMsg = generic_msg("efunc3sv {} {} {} {} {}".\
format("where",
condition.name,
B.dtype.name,
B.format_other(A),
B.name))
elif np.isscalar(A) and np.isscalar(B):
# Scalars must share a common dtype (or be cast)
dtA = resolve_scalar_dtype(A)
dtB = resolve_scalar_dtype(B)
# Make sure at least one of the dtypes is supported
if not (dtA in DTypes or dtB in DTypes):
raise TypeError(("Not implemented for scalar types {} " +
"and {}").format(dtA, dtB))
# If the dtypes are the same, do not cast
if dtA == dtB: # type: ignore
dt = dtA
# If the dtypes are different, try casting one direction then the other
elif dtB in DTypes and np.can_cast(A, dtB):
A = np.dtype(dtB).type(A)
dt = dtB
elif dtA in DTypes and np.can_cast(B, dtA):
B = np.dtype(dtA).type(B)
dt = dtA
# Cannot safely cast
else:
raise TypeError(("Cannot cast between scalars {} and {} to " +
"supported dtype").format(A, B))
repMsg = generic_msg("efunc3ss {} {} {} {} {} {}".\
format("where",
condition.name,
dt,
A,
dt,
B))
return create_pdarray(type_cast(str,repMsg)) | 30,606 |
def pre_training_configs(m):
"""
Before training the model, configure it
"""
ordering = range(m.n_visible)
np.random.shuffle(ordering)
trainer = Optimization.MomentumSGD(m.nade, m.nade.__getattribute__(m.loss_function))
trainer.set_datasets([m.training_dataset, m.masks_dataset])
trainer.set_learning_rate(m.options.lr)
trainer.set_datapoints_as_columns(True)
trainer.add_controller(TrainingController.AdaptiveLearningRate(
m.options.lr, 0, epochs=m.options.epochs))
trainer.add_controller(TrainingController.MaxIterations(m.options.epochs))
if m.options.training_ll_stop < np.inf:
# Assumes that we're doing minimization so negative ll
trainer.add_controller(
TrainingController.TrainingErrorStop(-m.options.training_ll_stop))
trainer.add_controller(TrainingController.ConfigurationSchedule(
"momentum", [(2, 0), (float('inf'), m.options.momentum)]))
trainer.set_updates_per_epoch(m.options.epoch_size)
trainer.set_minibatch_size(m.options.batch_size)
# trainer.set_weight_decay_rate(options.wd)
trainer.add_controller(TrainingController.NaNBreaker())
# Instrument the training
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Function("training_loss", lambda ins: ins.get_training_loss())))
if not m.options.no_validation:
trainer.add_instrumentation(Instrumentation.Instrumentation([m.console],
m.validation_loss_measurement))
trainer.add_instrumentation(Instrumentation.Instrumentation([m.hdf5_backend],
m.validation_loss_measurement,
at_lowest=[Instrumentation.Parameters()]))
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Configuration()))
# trainer.add_instrumentation(Instrumentation.Instrumentation([hdf5_backend], Instrumentation.Parameters(), every = 10))
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Timestamp()))
return trainer | 30,607 |
def si_pbesol_nomeshsym(request):
"""Return Phono3py instance of Si 2x2x2.
* without mesh-symmetry
* no fc
"""
yaml_filename = os.path.join(current_dir, "phono3py_si_pbesol.yaml")
forces_fc3_filename = os.path.join(current_dir, "FORCES_FC3_si_pbesol")
enable_v2 = request.config.getoption("--v1")
return phono3py.load(
yaml_filename,
forces_fc3_filename=forces_fc3_filename,
is_mesh_symmetry=False,
produce_fc=False,
store_dense_gp_map=enable_v2,
store_dense_svecs=enable_v2,
log_level=1,
) | 30,608 |
def describe_workspace_snapshots(WorkspaceId=None):
"""
Describes the snapshots for the specified WorkSpace.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_workspace_snapshots(
WorkspaceId='string'
)
:type WorkspaceId: string
:param WorkspaceId: [REQUIRED]\nThe identifier of the WorkSpace.\n
:rtype: dict
ReturnsResponse Syntax{
'RebuildSnapshots': [
{
'SnapshotTime': datetime(2015, 1, 1)
},
],
'RestoreSnapshots': [
{
'SnapshotTime': datetime(2015, 1, 1)
},
]
}
Response Structure
(dict) --
RebuildSnapshots (list) --Information about the snapshots that can be used to rebuild a WorkSpace. These snapshots include the user volume.
(dict) --Describes a snapshot.
SnapshotTime (datetime) --The time when the snapshot was created.
RestoreSnapshots (list) --Information about the snapshots that can be used to restore a WorkSpace. These snapshots include both the root volume and the user volume.
(dict) --Describes a snapshot.
SnapshotTime (datetime) --The time when the snapshot was created.
Exceptions
WorkSpaces.Client.exceptions.InvalidParameterValuesException
WorkSpaces.Client.exceptions.ResourceNotFoundException
WorkSpaces.Client.exceptions.AccessDeniedException
:return: {
'RebuildSnapshots': [
{
'SnapshotTime': datetime(2015, 1, 1)
},
],
'RestoreSnapshots': [
{
'SnapshotTime': datetime(2015, 1, 1)
},
]
}
"""
pass | 30,609 |
def loadClusterModule(pathCheckpoint):
"""
Load CPC Clustering Module from Clustering checkpoint file.
"""
state_dict = torch.load(pathCheckpoint, map_location=torch.device('cpu'))
clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]))
clusterModule.load_state_dict(state_dict["state_dict"])
return clusterModule | 30,610 |
def bash_inline_create_file(name, contents):
"""
Turns a file into bash command.
Parameters
----------
name : str
File name.
contents : bytes
File contents.
Returns
-------
result : str
The resulting command that creates this file.
"""
return f"echo {quote(base64.b64encode(contents).decode())} | base64 -d > {quote(name)}" | 30,611 |
def rtf_encode(unicode_string):
"""
Converts HTML encoding and Unicode encoding to RTF.
Be sure that autoescaping is off in the template. Autoescaping converts <, >, ", ', &
The unescape function used here is helpful for catching additional escape sequences used for special
characters, greek letters, symbols, and accents.
:param unicode_string:
:return:
"""
result = None
if unicode_string:
html_parser = html.parser.HTMLParser() # Create an HTML parser
unicode_string = html.unescape(unicode_string) # Convert html encodings to unicode e.g. é -> \ex9
rtf_bytestring = unicode_string.encode('rtfunicode') # Convert unicode to rtf e.g. \ex9 -> \u233?
rtf_string = rtf_bytestring.decode('utf-8')
rtf_string = replace_tags(rtf_string) # replaces common tags with rtf encodings
result = rtf_string
return result | 30,612 |
def test_should_get_default_conf_file_path():
"""
Test get default configuration file path
"""
conf_file_path = get_conf_file_default_path(work_dir='test')
assert conf_file_path == join('test', DEFAULT_CONF_FILENAME) | 30,613 |
def get_P(A):
"""
Markov matrix.
P = D^{-1}*A
"""
D = get_D(A)
return np.linalg.inv(D).dot(A) | 30,614 |
def check_file(option, opt_str, value, _parser):
"""See if a file exists on the file system, raises an OptionValueError"""
if not os.path.isfile(value):
raise OptionValueError("Cannot open %s as a file. Please check if it exists." % value)
setattr(_parser.values, option.dest, value) | 30,615 |
def get_movie_and_zmw_from_name(name):
"""Given a string of pacbio zmw name or read name, return movie and zmw"""
try:
fs = name.strip().split(' ')[0].split('/')
movie, zmw = fs[0], fs[1]
return movie, int(zmw)
except ValueError:
raise ValueError("Read %r is not a PacBio read." % name) | 30,616 |
def create_sp_history_copy(sp):
"""
Create a history copy of SP, with end_at value and new pk
return: created service provider object
"""
admins = sp.admins.all()
admin_groups = sp.admin_groups.all()
nameidformat = sp.nameidformat.all()
grant_types = sp.grant_types.all()
response_types = sp.response_types.all()
oidc_scopes = sp.oidc_scopes.all()
sp.history = sp.pk
sp.pk = None
sp.end_at = timezone.now()
sp.save()
sp.admins.set(admins)
sp.admin_groups.set(admin_groups)
sp.nameidformat.set(nameidformat)
sp.grant_types.set(grant_types)
sp.response_types.set(response_types)
sp.oidc_scopes.set(oidc_scopes)
return sp | 30,617 |
def test_database_error():
"""
test database error
"""
err = errors.DatabaseError()
assert err.status == errors.DatabaseError.status
assert err.title == errors.DatabaseError.title
err = errors.DatabaseError(title='Stuff Happens', status=1990)
assert err.status == 1990
assert err.title == 'Stuff Happens' | 30,618 |
def onehot(arr, num_classes=None, safe=True):
"""
Function to take in a 1D label array and returns the one hot encoded
transformation.
"""
arr = exactly_1d(arr)
if num_classes is None:
num_classes = np.unique(arr).shape[0]
if safe:
if num_classes != np.unique(arr).shape[0]:
raise Exception('Number of unique values does not match num_classes argument.')
return np.squeeze(np.eye(num_classes)[arr.reshape(-1)]) | 30,619 |
def scale_relative_sea_level_rise_rate(mmyr: float, If: float = 1) -> float:
"""Scale a relative sea level rise rate to model time.
This function scales any relative sea level rise rate (RSLR) (e.g., sea
level rise, subsidence) to a rate appropriate for the model time. This is
helpful, because most discussion of RSLR uses units of mm/yr, but the
model (and model configuration) require units of m/s. Additionally, the
model framework needs to assume an "intermittency factor" to convert from
real-world time to model time.
Relative sea level rise (subsidence and/or sea level rise) are scaled from
real world dimensions of mm/yr to model input as:
.. math::
\\widehat{RSLR} = (RSLR / 1000) \\cdot \\dfrac{1}{I_f \\cdot 365.25 \\cdot 86400}
This conversion makes it such that when one real-world year has elapsed
(:math:`I_f \\cdot 365.25 \\cdot 86400` seconds in model time), the relative
sea level has changed by the number of millimeters specified in the input
:obj:`mmyr`.
.. note::
Users should use this function to determine the value to specify in
an input YAML configuration file; no scaling is performed
internally.
Parameters
----------
mmyr : :obj:`float`
Millimeters per year, relative sea level rise rate.
If : :obj:`float`, optional
Intermittency factor, fraction of time represented by morphodynamic
activity. Should be in interval (0, 1). Defaults to 1 if not provided,
i.e., no scaling is performed.
Returns
-------
scaled : :obj:`float`
Scaled relative sea level rise rate, in meters per second.
"""
return (mmyr / 1000) * (1 / (shared_tools._scale_factor(
If, units='years'))) | 30,620 |
def get_covalent1_radius(Z):
"""
Converts array of nuclear charges to array of corresponding valence.
Args:
Z (numpy ndarray): array with nuclear charges
Returns:
numpy ndarray: array of the same size as Z with the valence of the corresponding atom
"""
global _elements
if _elements is None:
_elements=_load_elements()
V=np.array([0]+[line[6] for line in _elements])
ret=V[Z]/100. #angstrom conversion
# assert (ret>0).all()
return ret | 30,621 |
def get_bank_sizes(num_constraints: int,
beam_size: int,
candidate_counts: List[int]) -> List[int]:
"""
Evenly distributes the beam across the banks, where each bank is a portion of the beam devoted
to hypotheses having met the same number of constraints, 0..num_constraints.
After the assignment, banks with more slots than candidates are adjusted.
:param num_constraints: The number of constraints.
:param beam_size: The beam size.
:param candidate_counts: The empirical counts of number of candidates in each bank.
:return: A distribution over banks.
"""
num_banks = num_constraints + 1
bank_size = beam_size // num_banks
remainder = beam_size - bank_size * num_banks
# Distribute any remainder to the end
assigned = [bank_size for x in range(num_banks)]
assigned[-1] += remainder
# Now, moving right to left, push extra allocation to earlier buckets.
# This encodes a bias for higher buckets, but if no candidates are found, space
# will be made in lower buckets. This may not be the best strategy, but it is important
# that you start pushing from the bucket that is assigned the remainder, for cases where
# num_constraints >= beam_size.
for i in reversed(range(num_banks)):
overfill = assigned[i] - candidate_counts[i]
if overfill > 0:
assigned[i] -= overfill
assigned[(i - 1) % num_banks] += overfill
return assigned | 30,622 |
def test_files(host, f):
"""Test that the expected files were installed."""
assert host.file(f).exists
assert host.file(f).is_file
assert host.file(f).user == "root"
assert host.file(f).group == "root"
assert host.file(f).mode == 0o600 | 30,623 |
def ssh_connect(openstack_properties):
"""Create a connection to a server via SSH.
Args:
openstack_properties (dict): OpenStack facts and variables from Ansible
which can be used to manipulate OpenStack objects.
Returns:
def: A factory function object.
"""
connections = [] # Track inventory of SSH connections for teardown.
def _factory(hostname,
username,
retries=10,
key_filename=None,
auth_timeout=180):
"""Connect to a server via SSH.
Note: this function uses an exponential back-off for retries which means
the more retries specified the longer the wait between each retry. The
total wait time is on the fibonacci sequence. (https://bit.ly/1ee23o9)
Args:
hostname (str): The server to connect to.
username (str): The username to authenticate as.
(defaults to the current local username)
retries (int): The maximum number of validation retry attempts.
key_filename (str): The filename, or list of filenames, of optional
private key(s) and/or certs to try for authentication. (Default
is to use the 'rpc_support' key.
auth_timeout (float): An optional timeout (in seconds) to wait for
an authentication response.
Returns:
paramiko.client.SSHClient: A client already connected to the target
server.
Raises:
paramiko.BadHostKeyException: If the server’s host key could not be
verified.
paramiko.AuthenticationException: If authentication failed.
paramiko.SSHException: If there was any other error connecting or
establishing an SSH session.
paramiko.ssh_exception.NoValidConnectionsError: Connection refused
by host. (SSH service is probably not running or host is not
fully booted)
socket.error: If a socket error occurred while connecting.
"""
temp_connection = SSHClient()
temp_connection.set_missing_host_key_policy(AutoAddPolicy())
for attempt in range(1, retries + 1):
try:
temp_connection.connect(
hostname=hostname,
username=username,
key_filename=(
key_filename or openstack_properties['private_key_path']
),
auth_timeout=auth_timeout
)
except NoValidConnectionsError:
if attempt != retries + 1:
sleep(attempt)
else:
raise # Re-raise
connections.append(temp_connection)
return temp_connection
yield _factory
# Teardown
for connection in connections:
connection.close()
HostKeys().clear() | 30,624 |
def test_isup_command_http_error(irc, bot, user, requests_mock):
"""Test URL that returns an HTTP error code."""
requests_mock.head(
'http://example.com',
status_code=503,
reason='Service Unavailable',
)
irc.pm(user, '.isup example.com')
assert len(bot.backend.message_sent) == 1, (
'.isup command should output exactly one line')
assert bot.backend.message_sent == rawlist(
'PRIVMSG User :[isup] http://example.com looks down to me (HTTP 503 "Service Unavailable").'
) | 30,625 |
def _season_store_metadata(
db: sql.Connection,
classification: str | int,
start_date: date | None,
stop_date: date | None,
playlist_id: str,
):
"""Saves metadata for a single season in the database"""
# TODO Duplicate Check
min_year = start_date.year if start_date else None
if stop_date is None:
max_year = None
elif stop_date.month == 1 and stop_date.day == 1:
max_year = stop_date.year - 1
else:
max_year = stop_date.year
db.execute(
"INSERT INTO season VALUES (?, ?, ?, ?, ?, ?)",
(
min_year,
max_year,
classification,
start_date,
stop_date,
playlist_id,
),
) | 30,626 |
def prepare(args: dict, overwriting: bool):
"""Load config and key file,create output directories and setup log files.
Args:
args (dict): argparser dictionary
Returns:
Path: output directory path
"""
output_dir = make_dir(args, "results_tmp", "activity_formatting", overwriting)
mapping_table_dir = make_dir(args, "mapping_table", None, overwriting)
create_log_files(output_dir)
return output_dir, mapping_table_dir | 30,627 |
def parse_data(data):
"""Takes a string from a repr(WSGIRequest) and transliterates it
This is incredibly gross "parsing" code that takes the WSGIRequest
string from an error email and turns it into something that
vaguely resembles the original WSGIRequest so that we can send
it through the system again.
"""
BEGIN = '<WSGIRequest'
data = data.strip()
data = data[data.find(BEGIN) + len(BEGIN):]
if data.endswith('>'):
data = data[:-1]
container = {}
key = ''
for line in data.splitlines():
# Lines that start with 'wsgi.' have values which are
# objects. E.g. a logger. This won't fly with ast.literal_eval
# so we just ignore all the wsgi. meta stuff.
if not line or line.startswith(' \'wsgi.'):
continue
if line.startswith(' '):
# If it starts with a space, then it's a continuation of
# the current dict.
container[key] += line
else:
key, val = line.split(':', 1)
container[key.strip()] = val.strip()
QUERYDICT = '<QueryDict: '
for key, val in container.items():
val = val.strip(',')
if val.startswith(QUERYDICT):
# GET and POST are both QueryDicts, so we nix the
# QueryDict part and pretend they're regular dicts.
#
# <QueryDict: {...}> -> {...}
val = val[len(QUERYDICT):-1]
elif val.startswith('{'):
# Regular dict that might be missing a } because we
# dropped it when we were weeding out wsgi. lines.
#
# {... -> {...}
val = val.strip()
if not val.endswith('}'):
val = val + '}'
else:
# This needs to have the string ornamentation added so it
# literal_evals into a string.
val = 'u"' + val + '"'
# Note: We use ast.literal_eval here so that we're guaranteed
# only to be getting out strings, lists, tuples, dicts,
# booleans or None and not executing arbitrary Python code.
val = ast.literal_eval(val)
container[key] = val
return container | 30,628 |
def create_slack_context_block(elements: List[SlackBlock]) -> dict:
"""
Creates a "context block" as described in the slack documentation here:
https://api.slack.com/reference/messaging/blocks#context
"""
return {
'type': 'context',
'elements': [element.get_formatted_block() for element in elements],
} | 30,629 |
def script_synthesize_filters(config):
""" The scripting version of `synthesize_masks`. This function
applies the filter to the entire directory (or single file). It
combines the filter files from the data directory.
Parameters
----------
config : ConfigObj
The configuration object that is to be used for this
function.
Returns
-------
None
"""
# Extract the global configuration parameters, including
# the directory.
data_directory = core.config.extract_configuration(
config_object=config, keys=['data_directory'])
subfolder = core.config.extract_configuration(
config_object=config, keys=['subfolder'])
filter_tag_name = core.config.extract_configuration(
config_object=config, keys=['filter_tag_name'])
# Get all of the data fits file in the directory to work on.
data_fits = core.io.get_fits_filenames(data_directory=data_directory)
# Get all of the filter files within this directory too.
# sub-folder forces the filters into subdirectories which are
# custom made, so the recursive is forced True.
if (subfolder):
core.error.ifas_info("As masks exist in the sub-folders as "
"indicated by the `subfolder` parameter in the "
"configuration file, obtaining filters will be "
"recursive with respect to the data directory.")
defacto_recursive = True
# Getting the filter files.
filter_files = mask.base.get_filter_fits_filenames(
data_directory=data_directory, recursive=defacto_recursive)
# Compile and combine all of the filter files that used a given
# data file as its base. We assume that it can be determined
# from just name matching.
for datafiledex in data_fits:
# The real data array.
__, hdu_header, hdu_data = core.io.read_fits_file(
file_name=datafiledex, extension=0, silent=True)
# The data filter, assume by default all pixels are good.
hdu_filter = np.full_like(hdu_data, False)
# Search through all filter files.
for filterfiledex in filter_files:
# Extract the base file names of these files to allow
# for comparison to ensure that the files use the same
# source data file.
__, data_filename, __ = core.strformat.split_pathname(
pathname=datafiledex)
__, filter_filename, __ = core.strformat.split_pathname(
pathname=filterfiledex)
if (data_filename in filter_filename):
# They have a large common section in their name;
# they likely share the same data file. Read the
# fits in.
__, filter_header, filter_data = core.io.read_fits_file(
file_name=filterfiledex, extension=0, silent=True)
# Combine this filter with the other ones extracted.
# Ignore the mask part, it works perfectly well with
# filters.
hdu_filter = mask.base.synthesize_masks(hdu_filter,
filter_data)
# Move on to the next filter.
continue
else:
# These files don't likely share the same data file.
continue
# Deriving the name of the filter file to be written. It
# changes depending on user's data directory and the
# sub-folder status.
dir, file, ext = core.strformat.split_pathname(pathname=datafiledex)
filter_dir_name = core.runtime.extract_runtime_configuration(
config_key='FILTERING_SUBDIR')
# The filter tag name; if it is not a valid input, then
# use a default.
if ((isinstance(filter_tag_name, str)) and
(len(filter_tag_name) > 0) and
(filter_tag_name is not None)):
# Apple the tag that the user provided.
filter_tag = filter_tag_name
filter_dir_tag = ''.join(['FILTER', '_', filter_tag_name])
else:
# The defaults.
filter_tag = 'SYNTHESIZED'
filter_dir_tag = 'FILTER_SYNTHESIZE'
# Compile the file name for this filter.
synth_filter_filename = core.strformat.combine_pathname(
directory=([dir, filter_dir_name, filter_dir_tag]
if subfolder else [dir]),
file_name=[file, '__', filter_tag], extension=['.filter','.fits'])
# Also check if the folder exists, if not, then make it.
if (not os.path.isdir(core.strformat.combine_pathname(
directory=([dir, filter_dir_name, filter_dir_tag]
if subfolder else [dir]) ))):
# Creating the directory.
os.makedirs(core.strformat.combine_pathname(
directory=([dir, filter_dir_name, filter_dir_tag]
if subfolder else [dir])))
# All of the filters have been added to the sum total. Save
# the sum total.
core.io.write_fits_file(file_name=synth_filter_filename,
hdu_header=None, hdu_data=hdu_filter,
save_file=True, overwrite=False, silent=True)
# All done.
return None | 30,630 |
def sample_bar_report(request):
"""
Demonstrates a basic horizontal bar chart report.
"""
profile = request.get_user_profile()
if not profile.super_admin:
raise PermissionDenied('Only super admins can view this report.')
# Run your custom report logic to build the following lists:
# categories = ['Tullys', 'Tyrell', 'Lannister', 'Stark', 'Baratheon']
# values = [85, 100, 250, 75, 42]
categories = []
values = []
for group in Group.objects.all():
active_servers = group.server_set.exclude(status='HISTORICAL')
if active_servers:
categories.append(group.name)
values.append(active_servers.count())
# This sample extension renders a generic template for bar charts,
# which requires this view to return just a few context variables.
#
# You could also define your own template that extends one of the following
# and adds customizations:
# 'reports/bar.html' if you want a more customized pie chart
# 'reports/simple_base.html' for more advanced customization, e.g. more
# than one chart or table.
# 'base.html' to start from scratch from the basic CloudBolt template
return render(request, 'reports/bar.html', dict(
pagetitle='Server Counts by Group (Bar)',
subtitle='Excludes historical servers',
report_slug='Server Counts by Group',
intro="""
Sample report extension draws a bar chart.
""",
# Chart data
categories=categories,
values=values,
series_name='Servers',
# Optionally support exporting as CSV by including this dict
export=dict(
csv_headings=['Group', 'Active Servers']
)
)) | 30,631 |
def _is_ref_path(path_elements):
"""
Determine whether the given object path, expressed as an element list
(see _element_list_to_object_path()), ends with a reference and is
therefore eligible for continuation through the reference. The given
object path is assumed to be "completed" down to a single STIX property
value. This means that a *_ref property will be the last component, and
*_refs will be second-to-last, because it requires a subsequent index step.
:param path_elements: An object path, as a list
:return: True if a continuable reference path; False if not
"""
result = False
if path_elements:
last_elt = path_elements[-1]
if isinstance(last_elt, str) and last_elt.endswith("_ref"):
result = True
elif len(path_elements) > 1:
# for _refs properties, the ref property itself must be
# second-to-last, and the last path element must be an index step,
# either "*" or an int. Maybe not necessary to check the index
# step; all we need is to check the second-to-last property.
second_last_elt = path_elements[-2]
if isinstance(second_last_elt, str) \
and second_last_elt.endswith("_refs"):
result = True
return result | 30,632 |
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""Apply the harmonic analysis to 1-D slices along the given axis."""
arr = dask.array.core.asarray(arr)
# Validate and normalize axis.
arr.shape[axis]
axis = len(arr.shape[:axis])
# Rechunk so that analyze is applied over the full axis.
arr = arr.rechunk(arr.chunks[:axis] + (arr.shape[axis:axis + 1], ) +
arr.chunks[axis + 1:])
# Test out some data with the function.
test_data = numpy.ones(args[0].shape[1], dtype=arr.dtype)
test_result = numpy.array(func1d(test_data, *args, **kwargs))
# Map analyze over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_apply_along_axis,
name=dask.utils.funcname(func1d) + '-along-axis',
dtype=test_result.dtype,
chunks=(arr.chunks[:axis] + test_result.shape + arr.chunks[axis + 1:]),
drop_axis=axis,
new_axis=list(range(axis, axis + test_result.ndim, 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result | 30,633 |
def current_time_utc(conf):
""" Get time in UTC """
UTC = pytz.utc
curr_time = datetime.now(UTC)
return curr_time | 30,634 |
def CORe50(root=expanduser("~") + "/.avalanche/data/core50/",
scenario="nicv2_391",
run=0,
train_transform=None,
eval_transform=None):
"""
Creates a CL scenario for CORe50.
If the dataset is not present in the computer, this method will
automatically download and store it.
This generator can be used to obtain the NI, NC, NIC and NICv2-* scenarios.
The scenario instance returned by this method will have two fields,
`train_stream` and `test_stream`, which can be iterated to obtain
training and test :class:`Experience`. Each Experience contains the
`dataset` and the associated task label.
The task label "0" will be assigned to each experience.
The scenario API is quite simple and is uniform across all scenario
generators. It is recommended to check the tutorial of the "benchmark" API,
which contains usage examples ranging from "basic" to "advanced".
:param root: Path indicating where to store the dataset and related
metadata. By default they will be stored in
"~/.avalanche/datasets/core50/data/".
:param scenario: CORe50 main scenario. It can be chosen between 'ni', 'nc',
'nic', 'nicv2_79', 'nicv2_196' or 'nicv2_391.'
:param run: number of run for the scenario. Each run defines a different
ordering. Must be a number between 0 and 9.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:returns: a properly initialized :class:`GenericCLScenario` instance.
"""
assert (0 <= run <= 9), "Pre-defined run of CORe50 are only 10. Indicate " \
"a number between 0 and 9."
assert (scenario in nbatch.keys()), "The selected scenario is note " \
"recognized: it should be 'ni', 'nc'," \
"'nic', 'nicv2_79', 'nicv2_196' or " \
"'nicv2_391'."
if root is None:
core_data = CORE50_DATA()
else:
core_data = CORE50_DATA(root)
root = core_data.data_folder
root_img = root + "core50_128x128/"
filelists_bp = scen2dirs[scenario] + "run" + str(run) + "/"
train_failists_paths = []
for i in range(nbatch[scenario]):
train_failists_paths.append(
root + filelists_bp + "train_batch_" +
str(i).zfill(2) + "_filelist.txt")
scenario_obj = create_generic_benchmark_from_filelists(
root_img, train_failists_paths,
[root + filelists_bp + "test_filelist.txt"],
task_labels=[0 for _ in range(nbatch[scenario])],
complete_test_set_only=True,
train_transform=train_transform,
eval_transform=eval_transform)
return scenario_obj | 30,635 |
def create_results_dir():
"""
Creates results directory
"""
if not os.path.exists(get_results_dir()):
os.makedirs(get_results_dir()) | 30,636 |
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
res = []
for ele in line:
res.append(ele)
for num in res:
if num == 0:
res = shift(res,res.index(num))
for inde in range(len(res)-1):
if res[inde] == res[inde+1]:
res[inde] = res[inde] + res[inde+1]
res[inde+1] = 0
for num in res:
if num == 0:
res = shift(res,res.index(num))
return res | 30,637 |
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG compression."""
buffer = BytesIO()
format = image.format if image.format in list_image_compression_formats() else "PNG"
image.save(buffer, format=format)
return buffer.getvalue() | 30,638 |
def add_transform(transform_type, transform_tag=None, priority=0, status=TransformStatus.New, locking=TransformLocking.Idle,
retries=0, expired_at=None, transform_metadata=None, workprogress_id=None, session=None):
"""
Add a transform.
:param transform_type: Transform type.
:param transform_tag: Transform tag.
:param priority: priority.
:param status: Transform status.
:param locking: Transform locking.
:param retries: The number of retries.
:param expired_at: The datetime when it expires.
:param transform_metadata: The metadata as json.
:raises DuplicatedObject: If a transform with the same name exists.
:raises DatabaseException: If there is a database error.
:returns: transform id.
"""
transform_id = orm_transforms.add_transform(transform_type=transform_type, transform_tag=transform_tag,
priority=priority, status=status, locking=locking, retries=retries,
expired_at=expired_at, transform_metadata=transform_metadata,
workprogress_id=workprogress_id, session=session)
return transform_id | 30,639 |
def load_memmap(path):
"""load_memmap方法用于读取共享数据
Parameters
----------
path : str
文件路径
Returns
----------
"""
memmap_data = joblib.load(path, mmap_mode='r+')
return memmap_data | 30,640 |
def _classical_routine_on_result(
N: int, t: int, x: int, measurement
) -> Tuple[enum.Enum, Optional[Tuple[int, ...]]]:
"""Try to find factors, given x,N,t and the result of a single quantum measurement.
:param N: number to factorise
:param t: number of qubits
:param x: random integer between 0 < x < N
:param measurement: a single measurement of the first measurement after the quantum part of Shor's algorithm
:return: Tuple of exit status and a tuple of any factors found
"""
try:
continued_fraction_ints = classical.continued_fraction(measurement, 2 ** t)
convergents_vals = classical._convergents(continued_fraction_ints)
rs = classical.possible_orders(convergents_vals)
r = classical.first_order(N, x, rs)
factor = classical.find_factor_from_order(N, x, r)
except util.ShorError as e:
if e.fail_reason == util.ExitStatus.FAILED_FACTOR:
return (util.ExitStatus.FAILED_FACTOR, e.failed_factors)
return e.fail_reason, None
return (util.ExitStatus.SUCCESS, factor) | 30,641 |
def bump(chart_path: Path):
"""Bump the given Chart to the next patch version."""
next_patch = get_next_patch(chart_path)
print(f"🚢 🆙 Bumping {chart_path} to {next_patch}")
bump_with_version(chart_path, next_patch) | 30,642 |
def get_rst_cls_file_header(collection_name, class_name):
"""produce the rst content to begin an attribute-level *.rst file"""
# use :doc:`class_name<index>` syntax to create reference back to the index.rst file
title = ":doc:`%s<../index>` %s" % (collection_name.capitalize(), class_name)
return get_rst_file_header(title) | 30,643 |
def is_leap(year):
"""
Simply returns true or false depending on if it's leap or not.
"""
return not year%400 or not (year%4 and year%100) | 30,644 |
def ForwardDynamics(thetalist, dthetalist, taulist, g, Ftip, Mlist, \
Glist, Slist):
"""Computes forward dynamics in the space frame for an open chain robot
:param thetalist: A list of joint variables
:param dthetalist: A list of joint rates
:param taulist: An n-vector of joint forces/torques
:param g: Gravity vector g
:param Ftip: Spatial force applied by the end-effector expressed in frame
{n+1}
:param Mlist: List of link frames i relative to i-1 at the home position
:param Glist: Spatial inertia matrices Gi of the links
:param Slist: Screw axes Si of the joints in a space frame, in the format
of a matrix with axes as the columns
:return: The resulting joint accelerations
This function computes ddthetalist by solving:
Mlist(thetalist) * ddthetalist = taulist - c(thetalist, dthetalist) \
- g(thetalist) - Jtr(thetalist) * Ftip
Example Input (3 Link Robot):
thetalist = np.array([0.1, 0.1, 0.1])
dthetalist = np.array([0.1, 0.2, 0.3])
taulist = np.array([0.5, 0.6, 0.7])
g = np.array([0, 0, -9.8])
Ftip = np.array([1, 1, 1, 1, 1, 1])
M01 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0.089159],
[0, 0, 0, 1]])
M12 = np.array([[ 0, 0, 1, 0.28],
[ 0, 1, 0, 0.13585],
[-1, 0, 0, 0],
[ 0, 0, 0, 1]])
M23 = np.array([[1, 0, 0, 0],
[0, 1, 0, -0.1197],
[0, 0, 1, 0.395],
[0, 0, 0, 1]])
M34 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0.14225],
[0, 0, 0, 1]])
G1 = np.diag([0.010267, 0.010267, 0.00666, 3.7, 3.7, 3.7])
G2 = np.diag([0.22689, 0.22689, 0.0151074, 8.393, 8.393, 8.393])
G3 = np.diag([0.0494433, 0.0494433, 0.004095, 2.275, 2.275, 2.275])
Glist = np.array([G1, G2, G3])
Mlist = np.array([M01, M12, M23, M34])
Slist = np.array([[1, 0, 1, 0, 1, 0],
[0, 1, 0, -0.089, 0, 0],
[0, 1, 0, -0.089, 0, 0.425]]).T
Output:
np.array([-0.97392907, 25.58466784, -32.91499212])
"""
return np.dot(np.linalg.inv(MassMatrix(thetalist, Mlist, Glist, \
Slist)), \
np.array(taulist) \
- VelQuadraticForces(thetalist, dthetalist, Mlist, \
Glist, Slist) \
- GravityForces(thetalist, g, Mlist, Glist, Slist) \
- EndEffectorForces(thetalist, Ftip, Mlist, Glist, \
Slist)) | 30,645 |
def submission_received(submission):
"""Send notifications if this is the first submission"""
attribute_state = inspect(submission).attrs.get('version')
# Check if the speaker has been updated
history = attribute_state.history
# TODO Check for insert rather than assuming the version is immutable
if history.has_changes() and submission.version == 1:
messages.send_new_talk_available(submission) | 30,646 |
def union(dataframe1, dataframe2) -> pd.DataFrame:
"""The set union between dataframe1 (S) and dataframe2 (T), i.e. it returns the elements that are both in dataframe1
and dataframe2. Formally S ∩ T = {s|s ∈ S and s ∈ T}.
If duplicates exists in either dataframe they are dropped and a UserWarning is issued.
Does not alter the original DataFrame.
Syntactic sugar for the pandas dataframe append method.
Parameters
----------
dataframe1 : pd.DataFrame\n
dataframe2 : pd.DataFrame\n
Returns
-------
pandas DataFrame\n
The set difference between dataframe1 and dataframe2
Raises
------
ValueError\n
Raises ValueError if the columns in datframe1 and dataframe2 are not identical.
Example
-------
```python
import panda as pd
import neat_panda
print(df1)
country continent year actual
0 Sweden Europe 2018 1
1 Denmark Not known 2018 3
2 Iceleand Europe 2019 0
print(df2)
country continent year actual
0 Sweden Europe 2020 1
1 Denmark Not known 2020 3
df3 = df1.union(df2)
print(df3)
country continent year actual
0 Sweden Europe 2018 1
1 Denmark Not known 2018 3
2 Iceleand Europe 2019 0
3 Sweden Europe 2020 1
4 Denmark Not known 2020 3
```
"""
return SetOperations(dataframe1, dataframe2).union() | 30,647 |
def get_axis_periodic(self, Nper, is_antiperiod=False):
"""Returns the vector 'axis' taking symmetries into account.
Parameters
----------
self: Data1D
a Data1D object
Nper: int
number of periods
is_antiperiod: bool
return values on a semi period (only for antiperiodic signals)
Returns
-------
New Data1D
"""
# Dynamic import to avoid loop
module = __import__("SciDataTool.Classes.Data1D", fromlist=["Data1D"])
Data1D = getattr(module, "Data1D")
values = self.values
N = self.get_length()
if N % Nper != 0:
raise AxisError(
"ERROR: length of axis is not divisible by the number of periods"
)
values_per = values[: int(N / Nper)]
if is_antiperiod:
sym = "antiperiod"
else:
sym = "period"
New_axis = Data1D(
values=values_per,
name=self.name,
unit=self.unit,
symmetries={sym: Nper},
normalizations=self.normalizations,
is_components=self.is_components,
symbol=self.symbol,
)
return New_axis | 30,648 |
def compute(data, function, key):
"""Calculate some measures.
For single-value measures (like average or median, for example) you need
to specify only one key.
"""
func = COMPUTE_FUNCTIONS[function]
values = [float(value) for value in data]
print func(values) | 30,649 |
def knapsack_fractional(weights,values,capacity):
""" takes weights and values of items and capacity of knapsack as input
and returns the maximum profit possible for the given capacity of knapsack
using the fractional knapsack algorithm"""
#initialisaing the value of max_profit variable
max_profit=0
for pair in sorted(zip(weights,values),key=lambda x:-x[1]/x[0]): # sorting the pair of values in descending order
#if weight of highest pair is greater than capacity, the amount is added in fractions
if pair[0]>capacity:
# while((pair[1]/(pair[0]/capacity))!=0)
max_profit+=int(pair[1]/(pair[0]/capacity))
capacity=0
#if highest pair is lesser than capacity then the next pair is also added in fractions
elif pair[0]<= capacity:
max_profit+=pair[1]
capacity-=pair[0]
#returns nearest possible integer value of profit
return int(max_profit) | 30,650 |
def p_subscript_list(p):
""" subscript_list : subscript
| subscript subscript_list"""
if len(p) == 3:
p[0] = [p[1]] + p[2]
else:
p[0] = [] if p[1] is None else [p[1]] | 30,651 |
def check_protonation(selection):
"""
Check if the structure is protonated or not. In case that is not protonated we will rise a critical logger.
:param selection: prody molecule
:return: if not hydrogens detected, prints a message.
"""
try:
if not selection.select("hydrogen"):
logger.critical("We have not detected Hydrogens in your ligand. Please, add them before starting.")
except AttributeError:
raise AttributeError("Check ligand and core are in the L chain. Otherwise specify their chain using the flags \
-cc chain_core -fc chain_frag. Fragment & Core must always be in the same chain.") | 30,652 |
def verifier_delete(verifier_id):
"""Delete a verifier record.
:param verifier_id: verifier name or UUID
:raises ResourceNotFound: if verifier does not exist
"""
get_impl().verifier_delete(verifier_id) | 30,653 |
def invert(values: np.array, inversion: int) -> np.array:
"""Return the specified musical inversion of the values."""
if np.abs(inversion) > (len(values) - 1):
raise ValueError("Inversion out of range")
return np.hstack([values[inversion:], values[:inversion]]).astype(int) | 30,654 |
def set_logger(**cli_kwargs):
"""Configured logger.
Sets up handlers for stdout and for output file if the user
passed an output file name.
Sets up message format and level.
:param cli_kwargs: The key/value pairs depicting the CLI arguments
given by the user.
"""
# Setup basic log config including a sink for stdout.
log_config = {
"handlers": [
{
"sink": sys.stdout,
"format": "<lvl>{time:YYYY:MM:DD-HH:mm:ss:ZZ} {message}</lvl>",
"level": "INFO",
"enqueue": True,
}
]
}
output_file = cli_kwargs["output_file"]
# If output file is defined, create a sink for it and add it to
# the logger.
if output_file is not None:
output_sink = {
"sink": output_file,
"serialize": False,
"format": "{time:YYYY:MM:DD-HH:mm:ss:ZZ} {message}",
"level": "INFO",
"enqueue": True,
}
log_config["handlers"].append(output_sink)
logger.configure(**log_config) | 30,655 |
def prepare_infrastructure():
"""Entry point for preparing the infrastructure in a specific env."""
runner = ForemastRunner()
runner.write_configs()
runner.create_app()
runner.create_iam()
runner.create_s3()
runner.create_secgroups()
eureka = runner.configs[runner.env]['app']['eureka_enabled']
deploy_type = runner.configs['pipeline']['type']
if eureka:
LOG.info("Eureka Enabled, skipping ELB and DNS setup")
elif deploy_type == "lambda":
LOG.info("Lambda Enabled, skipping ELB and DNS setup")
runner.create_awslambda()
else:
LOG.info("No Eureka, running ELB and DNS setup")
runner.create_elb()
runner.create_dns()
runner.slack_notify()
runner.cleanup() | 30,656 |
def get_libpath():
"""
Get the library path of the the distributed SSA library.
"""
import os
import re
from os.path import dirname, abspath, realpath, join
from platform import system
root = dirname(abspath(realpath(__file__)))
if system() == 'Linux':
library = 'Linux-SSA.so'
elif system() == 'Darwin':
library = 'OSX-SSA.so'
elif system() == 'Windows':
library = "Win-SSA.so"
else:
raise RuntimeError("unsupported platform - \"{}\"".format(system()))
return os.path.join(root, 'clibs', library) | 30,657 |
def getthumb(episode,target="."):
"""
Passed an episode dictionary, retrieves the episode thumbnail & saves with appropriate filename
"""
fn = f"s{episode['airedSeason']:02}e{episode['airedEpisodeNumber']:02}.{episode['episodeName']}.jpg"
fn = os.path.join(target,fn.replace(" ",""))
url = "https://artworks.thetvdb.com/banners/"+episode['filename']
print(f"Saving '{url}' as '{fn}'")
with open(fn,'wb') as f:
i = requests.get(url)
f.write(i.content)
return | 30,658 |
def engine_factory(database_url: str) -> Engine:
"""Construct database connection pool."""
url = make_url(database_url)
engine_kwargs: dict[str, Any] = {}
if url.host == "localhost":
engine_kwargs = {
**engine_kwargs,
"connect_args": {"connect_timeout": 1},
"echo": True,
}
return create_engine(
database_url,
future=True,
poolclass=NullPool,
executemany_mode="values",
**engine_kwargs,
) | 30,659 |
def _shear_x(video: torch.Tensor, factor: float, **kwargs):
"""
Shear the video along the horizontal axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the horizontal axis using the affine
matrix.
"""
_check_fill_arg(kwargs)
translation_offset = video.size(-2) * factor / 2
return F_t.affine(
video,
[1, factor, translation_offset, 0, 1, 0],
fill=kwargs["fill"],
interpolation="bilinear",
) | 30,660 |
def get_smtp_credentials(filters: Optional[Sequence[pulumi.InputType['GetSmtpCredentialsFilterArgs']]] = None,
user_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSmtpCredentialsResult:
"""
This data source provides the list of Smtp Credentials in Oracle Cloud Infrastructure Identity service.
Lists the SMTP credentials for the specified user. The returned object contains the credential's OCID,
the SMTP user name but not the SMTP password. The SMTP password is returned only upon creation.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_smtp_credentials = oci.identity.get_smtp_credentials(user_id=oci_identity_user["test_user"]["id"])
```
:param str user_id: The OCID of the user.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['userId'] = user_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getSmtpCredentials:getSmtpCredentials', __args__, opts=opts, typ=GetSmtpCredentialsResult).value
return AwaitableGetSmtpCredentialsResult(
filters=__ret__.filters,
id=__ret__.id,
smtp_credentials=__ret__.smtp_credentials,
user_id=__ret__.user_id) | 30,661 |
def coord(row, col):
""" returns coordinate values of specific cell within Sudoku Puzzle"""
return row*9+col | 30,662 |
def analyse_decoupled_steady_state_data( # pylint: disable=too-many-branches
data: Dict[Any, Dict[str, Any]], logger: Logger
) -> None:
"""
Carry out analysis on a series of of steady-state data sets.
:param data:
The data to analyse. This should be a `dict` mapping the file name to the data
extracted from the file.
:param logger:
The logger to use for the analysis run.
"""
logger.info("Beginning steady-state analysis.")
# Construct a reduced mapping
autotherm_single_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
autotherm_no_pv_single_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
autotherm_double_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
autotherm_no_pv_double_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
ilaria_single_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
ilaria_no_pv_single_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
ilaria_double_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
ilaria_no_pv_double_glazed_data: Dict[str, Any] = collections.defaultdict(dict)
for key, sub_dict in data.items():
mass_flow_rate_match = re.match(MASS_FLOW_RATE_REGEX, key)
if mass_flow_rate_match is None:
continue
mass_flow_rate_string: str = mass_flow_rate_match.group("mass_flow_rate")
try:
mass_flow_rate: float = int(
mass_flow_rate_string.split("_")[0]
) + 0.1 * int(mass_flow_rate_string.split("_")[1])
except ValueError:
mass_flow_rate = int(mass_flow_rate_string.split("_")[1])
electrical_efficiency, thermal_efficiency = _calculate_zero_point_efficiencies(
sub_dict
)
# If there was no pv layer present in the simulation.
if mass_flow_rate_match.group("no_pv") == "":
if mass_flow_rate_match.group("panel_type") == "autotherm":
if f"{mass_flow_rate_match.group('glazing')}glazed" == "single_glazed":
autotherm_single_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
autotherm_single_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
elif (
f"{mass_flow_rate_match.group('glazing')}glazed" == "double_glazed"
):
autotherm_double_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
autotherm_double_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
if mass_flow_rate_match.group("panel_type") == "ilaria":
if f"{mass_flow_rate_match.group('glazing')}glazed" == "single_glazed":
ilaria_single_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
ilaria_single_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
elif (
f"{mass_flow_rate_match.group('glazing')}glazed" == "double_glazed"
):
ilaria_double_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
ilaria_double_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
else:
if mass_flow_rate_match.group("panel_type") == "autotherm":
if f"{mass_flow_rate_match.group('glazing')}glazed" == "single_glazed":
autotherm_no_pv_single_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
autotherm_no_pv_single_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
elif (
f"{mass_flow_rate_match.group('glazing')}glazed" == "double_glazed"
):
autotherm_no_pv_double_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
autotherm_no_pv_double_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
if mass_flow_rate_match.group("panel_type") == "ilaria":
if f"{mass_flow_rate_match.group('glazing')}glazed" == "single_glazed":
ilaria_no_pv_single_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
ilaria_no_pv_single_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
elif (
f"{mass_flow_rate_match.group('glazing')}glazed" == "double_glazed"
):
ilaria_no_pv_double_glazed_data[mass_flow_rate][
"electrical_efficiency"
] = electrical_efficiency
ilaria_no_pv_double_glazed_data[mass_flow_rate][
"thermal_efficiency"
] = thermal_efficiency
# Plot the PV thermal efficiencies.
plot_figure(
"autotherm_single_glazed_thermal_efficiency_against_mass_flow_rate",
autotherm_single_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"autotherm_double_glazed_thermal_efficiency_against_mass_flow_rate",
autotherm_double_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_single_glazed_thermal_efficiency_against_mass_flow_rate",
ilaria_single_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_double_glazed_thermal_efficiency_against_mass_flow_rate",
ilaria_double_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
# Plot the no-PV thermal efficiencies.
plot_figure(
"autotherm_no_pv_single_glazed_thermal_efficiency_against_mass_flow_rate",
autotherm_no_pv_single_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"autotherm_no_pv_double_glazed_thermal_efficiency_against_mass_flow_rate",
autotherm_no_pv_double_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_no_pv_single_glazed_thermal_efficiency_against_mass_flow_rate",
ilaria_no_pv_single_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_no_pv_double_glazed_thermal_efficiency_against_mass_flow_rate",
ilaria_no_pv_double_glazed_data,
["thermal_efficiency"],
"Thermal efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
# Plot the pv electrical efficiencies.
plot_figure(
"autotherm_single_glazed_electrical_efficiency_against_mass_flow_rate",
autotherm_single_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"autotherm_double_glazed_electrical_efficiency_against_mass_flow_rate",
autotherm_double_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_single_glazed_electrical_efficiency_against_mass_flow_rate",
ilaria_single_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_double_glazed_electrical_efficiency_against_mass_flow_rate",
ilaria_double_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
# Plot the no-PV electrical efficiencies.
plot_figure(
"autotherm_no_pv_single_glazed_electrical_efficiency_against_mass_flow_rate",
autotherm_no_pv_single_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"autotherm_no_pv_double_glazed_electrical_efficiency_against_mass_flow_rate",
autotherm_no_pv_double_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_no_pv_single_glazed_electrical_efficiency_against_mass_flow_rate",
ilaria_no_pv_single_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
)
plot_figure(
"ilaria_no_pv_double_glazed_electrical_efficiency_against_mass_flow_rate",
ilaria_no_pv_double_glazed_data,
["electrical_efficiency"],
"Electrical efficiency",
x_axis_label="Mass-flow rate / Litres per hour",
use_data_keys_as_x_axis=True,
disable_lines=True,
) | 30,663 |
def add_interface_ngeo(
config: pyramid.config.Configurator,
route_name: str,
route: str,
renderer: Optional[str] = None,
permission: Optional[str] = None,
) -> None:
"""Add the ngeo interfaces views and routes."""
config.add_route(route_name, route, request_method="GET")
# Permalink theme: recover the theme for generating custom viewer.js url
config.add_route(
f"{route_name}theme",
f"{route}{'' if route[-1] == '/' else '/'}theme/{{themes}}",
request_method="GET",
)
config.add_view(
Entry, attr="get_ngeo_index_vars", route_name=route_name, renderer=renderer, permission=permission
)
config.add_view(
Entry,
attr="get_ngeo_index_vars",
route_name=f"{route_name}theme",
renderer=renderer,
permission=permission,
) | 30,664 |
def post_authn_parse(request, client_id, endpoint_context, **kwargs):
"""
:param request:
:param client_id:
:param endpoint_context:
:param kwargs:
:return:
"""
if endpoint_context.args["pkce"]["essential"] is True:
if not "code_challenge" in request:
raise ValueError("Missing required code_challenge")
if not "code_challenge_method" in request:
if "plain" not in endpoint_context.args["pkce"]["code_challenge_method"]:
raise ValueError("No support for code_challenge_method=plain")
request["code_challenge_method"] = "plain"
else: # May or may not
if "code_challenge" in request:
if not "code_challenge_method" in request:
if (
"plain"
not in endpoint_context.args["pkce"]["code_challenge_method"]
):
raise ValueError("No support for code_challenge_method=plain")
request["code_challenge_method"] = "plain"
return request | 30,665 |
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='DIR', help='A positional argument', nargs='+')
#nargs='+' is to get a list instead of a single value
parser.add_argument(
'-w',
'--width',
help='A named integer argument',
metavar='int',
type=int,
default=50)
return parser.parse_args() | 30,666 |
def token2hot(seq, max_length, n_features):
"""
takes in tokenized sequences and returns 1-hot encoded
[1 2 2] -> [1 0 0 0], [0 1 0 0 ], [0 1 0 0]
"""
N = max_length - len(seq)
x = np.pad(seq, (0, N), 'constant')
x = F.one_hot(torch.tensor(x),num_classes=n_features)
return x | 30,667 |
def is_repo_on_known_branch(path):
"""Check if we're on the most recent commit in a well known branch, 'master' or
a version branch."""
remote = find_upstream_remote(None, path)
branches = execute_git(
None,
path,
[
"for-each-ref",
"--format=%(refname:short)",
"--points-at",
"HEAD",
"refs/remotes/%s/*" % remote,
"refs/tags/*",
],
capture=True,
).split()
return any(
[re.search(r"([0-9]+\.[0-9]+\.[0-9x]+|master)$", branch) for branch in branches]
) | 30,668 |
def setup(hass, config):
"""Initialize the ADTPulse integration"""
conf = config[ADTPULSE_DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
# share reference to the service with other components/platforms running within HASS
from pyadtpulse import PyADTPulse
service = PyADTPulse(username, password)
host = conf.get(CONF_HOST)
if host:
LOG.debug("Using ADT Pulse API host %s", host)
service.set_service_host(host)
hass.data[ADTPULSE_SERVICE] = service
except (ConnectTimeout, HTTPError) as ex:
LOG.error("Unable to connect to ADT Pulse: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart Home Assistant after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
def refresh_adtpulse_data(event_time):
"""Call ADTPulse service to refresh latest data"""
LOG.debug("Checking ADT Pulse cloud service for updates")
adtpulse_service = hass.data[ADTPULSE_SERVICE]
if adtpulse_service.updates_exist:
adtpulse_service.update()
# notify all listeners (alarm and sensors) that they may have new data
dispatcher_send(hass, SIGNAL_ADTPULSE_UPDATED)
# subscribe for notifications that an update should be triggered
hass.services.register(ADTPULSE_DOMAIN, 'update', refresh_adtpulse_data)
# automatically update ADTPulse data (samples) on the scan interval
scan_interval = timedelta(seconds = conf.get(CONF_SCAN_INTERVAL))
track_time_interval(hass, refresh_adtpulse_data, scan_interval)
for platform in SUPPORTED_PLATFORMS:
discovery.load_platform(hass, platform, ADTPULSE_DOMAIN, {}, config)
return True | 30,669 |
def external_query( # pylint: disable=too-many-arguments
gcs_client: storage.Client, bq_client: bigquery.Client, gsurl: str,
query: str, job_id: str, table: bigquery.TableReference):
"""Load from query over external table from GCS.
This hinges on a SQL query defined in GCS at _config/*.sql and
an external table definition
_config/{constants.BQ_EXTERNAL_TABLE_CONFIG_FILENAME} (otherwise will assume
CSV external table)
"""
external_table_config = read_gcs_file_if_exists(
gcs_client,
f"{gsurl}_config/{constants.BQ_EXTERNAL_TABLE_CONFIG_FILENAME}")
if not external_table_config:
external_table_config = look_for_config_in_parents(
gcs_client, gsurl, constants.BQ_EXTERNAL_TABLE_CONFIG_FILENAME)
if external_table_config:
external_table_def = json.loads(external_table_config)
else:
print(f" {gsurl}_config/{constants.BQ_EXTERNAL_TABLE_CONFIG_FILENAME} "
f"not found in parents of {gsurl}. "
"Falling back to default PARQUET external table: "
f"{json.dumps(constants.DEFAULT_EXTERNAL_TABLE_DEFINITION)}")
external_table_def = constants.DEFAULT_EXTERNAL_TABLE_DEFINITION
print(
json.dumps(
dict(message="Found external table definition.",
table=table.to_api_repr(),
external_table_def=external_table_def)))
# This may cause an issue if >10,000 files.
external_table_def["sourceUris"] = flatten2dlist(
get_batches_for_gsurl(gcs_client, gsurl))
# Check if hivePartitioningOptions
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#hivepartitioningoptions
# is set in external.json file
if external_table_def.get("hivePartitioningOptions"):
external_table_def["hivePartitioningOptions"] = {
"mode":
external_table_def["hivePartitioningOptions"].get("mode")
or "AUTO",
"sourceUriPrefix":
get_hive_partitioning_source_uri_prefix(
external_table_def["sourceUris"][0])
}
external_config = bigquery.ExternalConfig.from_api_repr(external_table_def)
job_config = bigquery.QueryJobConfig(
table_definitions={"temp_ext": external_config}, use_legacy_sql=False)
if table:
# drop partition decorator if present.
table_id = table.table_id.split("$")[0]
# similar syntax to str.format but doesn't require escaping braces
# elsewhere in query (e.g. in a regex)
rendered_query = query.replace(
"{dest_dataset}", f"`{table.project}`.{table.dataset_id}").replace(
"{dest_table}", table_id)
job: bigquery.QueryJob = bq_client.query(rendered_query,
job_config=job_config,
job_id=job_id)
logging.log_bigquery_job(
job, table, f"Submitted asynchronous query job: {job.job_id}")
start_poll_for_errors = time.monotonic()
# Check if job failed quickly
while time.monotonic(
) - start_poll_for_errors < constants.WAIT_FOR_JOB_SECONDS:
job.reload(client=bq_client)
if job.state == "DONE":
check_for_bq_job_and_children_errors(bq_client, job, table)
return
time.sleep(constants.JOB_POLL_INTERVAL_SECONDS) | 30,670 |
def get_args():
"""Get the command line arguments for ``tcp-h2-describe``.
Returns:
Tuple[int, int, Optional[str]]: A triple of
* The port for the "describe" proxy
* The port for the server that is being proxied
* The hostname for the server that is being proxied (or :data:`None` if
not provided)
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="tcp-h2-describe",
)
parser.add_argument(
"--proxy-port",
dest="proxy_port",
type=int,
default=24909,
help='The port that will be used for running the "describe" proxy.',
)
parser.add_argument(
"--server-host",
dest="server_host",
help="The hostname for the server that is being proxied.",
)
parser.add_argument(
"--server-port",
dest="server_port",
type=int,
default=80,
help="The port for the server that is being proxied.",
)
args = parser.parse_args()
return args.proxy_port, args.server_port, args.server_host | 30,671 |
def _Install(vm):
"""Install maven package."""
vm.Install('openjdk')
vm.Install('curl')
# Download and extract maven
maven_full_ver = FLAGS.maven_version
maven_major_ver = maven_full_ver[:maven_full_ver.index('.')]
maven_url = MVN_URL.format(maven_major_ver, maven_full_ver)
maven_tar = maven_url.split('/')[-1]
# will only work with preprovision_ignore_checksum
if maven_tar not in PREPROVISIONED_DATA:
PREPROVISIONED_DATA[maven_tar] = ''
PACKAGE_DATA_URL[maven_tar] = maven_url
maven_remote_path = posixpath.join(linux_packages.INSTALL_DIR, maven_tar)
vm.InstallPreprovisionedPackageData(PACKAGE_NAME, [maven_tar],
linux_packages.INSTALL_DIR)
vm.RemoteCommand(('mkdir -p {0} && '
'tar -C {0} --strip-components=1 -xzf {1}').format(
MVN_DIR, maven_remote_path))
java_home = _GetJavaHome(vm)
# Set env variables for maven
maven_env = MVN_ENV.format(java_home=java_home, maven_home=MVN_DIR)
cmd = 'echo "{0}" | sudo tee -a {1}'.format(maven_env, MVN_ENV_PATH)
vm.RemoteCommand(cmd)
if FLAGS.maven_mirror_url:
settings_local_path = data.ResourcePath(os.path.join(
'maven', 'settings.xml.j2'))
settings_remote_path = '~/.m2/settings.xml'
context = {
'maven_mirror_url': FLAGS.maven_mirror_url
}
vm.RemoteCommand('mkdir -p ~/.m2')
vm.RenderTemplate(settings_local_path, settings_remote_path, context) | 30,672 |
def merge_shopping_cart(sender, user, request, **kwargs):
"""
Check if there are items in shopping cart before login,
merge guest shopping cart to user cart and remove guest cart data
uuid cookie will be expired in 7 days
"""
try:
uuid = request.COOKIES['uuid']
conn = get_redis_connection('cart')
user_key = f'cart_{user.id}'
guest_key = f'cart_{uuid}'
guest_cart = conn.hgetall(guest_key)
user_cart = conn.hgetall(user_key)
updated_cart = {**guest_cart, **user_cart}
# user_cart |= guest_cart # new in python3.9+
# NOTE: this merge will overwrite the count of same product with user cart data
conn.hset(user_key, mapping=updated_cart)
conn.delete(guest_key)
except:
# no uuid found, do nothing
return | 30,673 |
def make_key(ns_sep, namespace, *names):
"""Make a redis namespaced key.
>>> make_key(":", "YOO:HOO", "a", "b", "c") == "YOO:HOO:a:b:c"
True
"""
return ns_sep.join(chain((namespace,), names)) | 30,674 |
def test_session_env_lazy(monkeypatch, gdalenv):
"""Create an Env with AWS env vars."""
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'id')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'key')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'token')
with rasterio.Env() as s:
s.get_aws_credentials()
assert getenv() == rasterio.env.local._env.options
expected = {
'aws_access_key_id': 'id',
'aws_secret_access_key': 'key',
'aws_session_token': 'token'}
for k, v in expected.items():
assert getenv()[k] == v
monkeypatch.undo() | 30,675 |
def gas_strand_2pipes(method="nikuradse"):
"""
:param method: Which results should be loaded: nikuradse or prandtl-colebrook
:type method: str, default "nikuradse"
:return: net - STANET network converted to a pandapipes network
:rtype: pandapipesNet
:Example:
>>> pandapipes.networks.simple_gas_networks.gas_strand_2pipes()
"""
log_result_upon_loading(logger, method=method, converter="stanet")
net_name = "two_pipes_N.json" if method.lower() in ["nikuradse", "n"] else "two_pipes_PC.json"
return from_json(os.path.join(gas_stanet_path, "strand_net", net_name)) | 30,676 |
def directly_follows(logs_traces, all_activs, noise_threshold=0):
"""
Gets the allowed directly-follows relations given the traces of the log
Parameters
-------------
logs_traces
Traces of the log
all_activs
All the activities
noise_threshold
Noise threshold
Returns
--------------
rel
List of relations in the log
"""
ret0 = Counter()
for trace in logs_traces:
rs = Counter(trace_skel.directly_follows(list(trace)))
for k in rs:
rs[k] = rs[k] * logs_traces[trace]
ret0 += rs
ret = set(x for x, y in ret0.items() if y >= all_activs[x[0]] * (1.0 - noise_threshold))
return ret | 30,677 |
def python_name(env_dirpath: Union[Path, str]) -> str:
"""Find the name of the Python in a virtual environment.
Args:
env_dirpath: The path to the root of a virtual environment (Path or str).
Returns:
A descriptive string.
Raises:
ValueError: If the env_dirpath is not a virtual environment.
"""
exe = python_executable_path(env_dirpath)
command = f"{exe} --version"
status, output = subprocess.getstatusoutput(command)
if status != 0:
raise RuntimeError(f"Could not run {command}")
return output.splitlines(keepends=False)[0] | 30,678 |
def hfc(x, framesize=1024, hopsize=512, fs=44100):
"""
Calculate HFC (High Frequency Content)
Parameters:
inData: ndarray
input signal
framesize: int
framesize
hopsize: int
hopsize
fs: int
samplingrate
Returns:
result: ndarray
HFC
Notes:
Spectral Centroidとの違いはスペクトログラムのエネルギーで正規化するか否か,のみ.
"""
S,F,T = stft(x, framesize, hopsize, fs, 'hann')
S = sp.absolute(S)
n_frames = S.shape[1]
hfc_data = sp.zeros(n_frames)
hfc_data = (F * S.T).T.sum(0)
return hfc_data | 30,679 |
def validate_security_requirement_object(security_requirement_object):
"""
:param security_requirement_object:
"""
pass | 30,680 |
def acronym_buster(message):
"""
Find the first group of words that is in all caps
check if it is in the ACRONYMS dict
if it is, return the first occurrence of the acronym
else return [acronym] is an acronym. I do not like acronyms. Please remove them from your email.
:param message: The message to check
:return: new string with the acronyms replaced with full words
:rtype:str
"""
message = reduce(lambda msg, item: msg.replace(*item), ACRONYMS.items(), message)
try:
# find all matching groups with .finditer using next and get the first acronym that is not allows
acronym = next(ACRONYM_PATTERN.finditer(message)).group(0)
return "{} is an acronym. I do not like acronyms. Please remove them from your email.".format(
acronym
)
except StopIteration:
return CAPITAL_PATTERN.sub(CAPITAL_FIX, message) | 30,681 |
def get_backup_temp_2():
"""
This is the third way to get the temperature
"""
try:
temp = RFM69.temperature
logging.warning("Got second backup temperature")
return temp
except RuntimeError:
logging.error("RFM69 not connected")
return 2222
except Exception as error:
logging.error(error)
return 9999 | 30,682 |
def constructed(function):
"""A decorator function for calling when a class is constructed."""
def store_constructed(class_reference):
"""Store the key map."""
setattr(class_reference, "__deserialize_constructed__", function)
return class_reference
return store_constructed | 30,683 |
def generate_functions(
function,
parameters,
name,
name_func,
tag_dict,
tag_func,
docstring_func,
summarize,
num_passing,
num_failing,
key_combs_limit,
execution_group,
timeout,
):
"""
Generate test cases using the given parameter context, use the name_func
to generate the name.
If parameters is of type ``tuple`` / ``list`` then a new testcase method
will be created for each item.
If parameters is of type ``dict`` (of ``tuple``/``list``), then a new
method will be created for each item in the Cartesian product of all
combinations of values.
:param function: A testcase method, with extra arguments
for parametrization.
:type function: ``callable``
:param parameters: Parametrization context for the test case method.
:type parameters: ``list`` or ``tuple`` of ``dict`` or ``tuple`` / ``list``
OR a ``dict`` of ``tuple`` / ``list``.
:param name: Customized readable name for testcase.
:type name: ``str``
:param name_func: Function for generating names of parametrized testcases,
should accept ``func_name`` and ``kwargs`` as parameters.
:type name_func: ``callable``
:param docstring_func: Function that will generate docstring,
should accept ``docstring`` and ``kwargs`` as parameters.
:type docstring_func: ``callable``
:param tag_func: Function that will be used for generating tags via
parametrization kwargs. Should accept ``kwargs`` as
parameter.
:type tag_func: ``callable``
:param tag_dict: Tag annotations to be used for each generated testcase.
:type tag_dict: ``dict`` of ``set``
:param summarize: Flag for enabling testcase level
summarization of all assertions.
:type summarize: ``bool``
:param num_passing: Max number of passing assertions
for testcase level assertion summary.
:type num_passing: ``int``
:param num_failing: Max number of failing assertions
for testcase level assertion summary.
:type num_failing: ``int``
:param key_combs_limit: Max number of failed key combinations on fix/dict
summaries that contain assertion details.
:type key_combs_limit: ``int``
:param execution_group: Name of execution group in which the testcases
can be executed in parallel.
:type execution_group: ``str`` or ``NoneType``
:param timeout: Timeout in seconds to wait for testcase to be finished.
:type timeout: ``int``
:return: List of functions that is testcase compliant
(accepts ``self``, ``env``, ``result`` as arguments) and have
unique names.
:rtype: ``list``
"""
if not parameters:
raise ParametrizationError('"parameters" cannot be a empty.')
_check_name_func(name_func)
argspec = callable_utils.getargspec(function)
args = argspec.args[3:] # get rid of self, env, result
defaults = argspec.defaults or []
required_args = args[: -len(defaults)] if defaults else args
default_args = dict(zip(args[len(required_args) :], defaults))
kwarg_list = _generate_kwarg_list(
parameters, args, required_args, default_args
)
functions = [
_generate_func(
function=function,
name=name,
name_func=name_func,
tag_func=tag_func,
docstring_func=docstring_func,
tag_dict=tag_dict,
kwargs=kwargs,
)
for kwargs in kwarg_list
]
for idx, func in enumerate(functions):
# Users request the feature that when `name_func` set to `None`,
# then simply append integer suffixes to the names of testcases
if name_func is None:
func.name = "{} {}".format(func.name, idx)
func.summarize = summarize
func.summarize_num_passing = num_passing
func.summarize_num_failing = num_failing
func.summarize_key_combs_limit = key_combs_limit
func.execution_group = execution_group
func.timeout = timeout
return functions | 30,684 |
def tdt(input_dir_path, experiment_name="Thellier", meas_file_name="measurements.txt",
spec_file_name="specimens.txt", samp_file_name="samples.txt",
site_file_name="sites.txt", loc_file_name="locations.txt",
user="", location="", lab_dec=0, lab_inc=90, moment_units="mA/m",
samp_name_con="sample=specimen", samp_name_chars=0,
site_name_con="site=sample", site_name_chars=0, volume=12.,
output_dir_path=""):
"""
converts TDT formatted files to measurements format files
Parameters
----------
input_dir_path : str
directory with one or more .tdt files
experiment: str
one of: ["Thellier", "ATRM 6 pos", "NLT"], default "Thellier"
meas_file_name : str
default "measurements.txt"
spec_file_name : str
default "specimens.txt"
samp_file_name : str
default "samples.txt"
site_file_name : str
default "sites.txt"
loc_file_name : str
default "locations.txt"
user : str
default ""
location : str
default ""
lab_dec: int
default: 0
lab_inc: int
default 90
moment_units : str
must be one of: ["mA/m", "emu", "Am^2"], default "mA/m"
samp_name_con : str or int
{1: "sample=specimen", 2: "no. of terminate characters", 3: "character delimited"}
samp_name_chars : str or int
number of characters to remove for sample name, (or delimiting character), default 0
site_name_con : str or int
{1: "site=sample", 2: "no. of terminate characters", 3: "character delimited"}
site_name_chars : str or int
number of characters to remove for site name, (or delimiting character), default 0
volume : float
volume in cc, default 12
output_dir_path : str
path for file output, defaults to input_dir_path
Returns
---------
tuple : (True if program ran else False, measurement outfile name or error message if failed)
"""
# --------------------------------------
# Read the files
#
# Database structure
# Thellier_type experiment:
#
# 1) Each file contains the data one specimen
# 2) First line is the header: "Thellier-tdt"
# 3) Second line in header inlucdes 4 fields:
# [Blab] ,[unknown_1] , [unknown_2] , [unknown_3] , [unknown_4]
# 4) Body includes 5 fields
# [specimen_name], [treatments], [moment],[meas_dec],[meas_dec
# Tretment: XXX.0 (zerofield)
# XXX.1 (infield)
# XXX.2 (pTRM check)
# XXX.3 (Tail check)
# XXX.4 (Additivity check; Krasa et al., 2003)
# XXX.5 (Original Thellier-Thellier protocol. )
# (where .5 is for the second direction and .1 in the first)
# XXX = temperature in degrees
#
#
# IMPORTANT ASSUMPTION:
# (1) lab field is always in Z direction (theta=0, phi=90)
# (2) Thermal demagnetization - NO MICROWAVE
# (3) if if XXX <50 then assuming that this is NRM (273K)
#
# -------------------------------------
#
# ATRM in six positions
#
# Tretment: XXX.0 zerofield
# XXX.1 +x
# XXX.2 +y
# XXX.3 +z
# XXX.4 -x
# XXX.5 -y
# XXX.6 -z
# XXX.7 alteration check
# IMPORTANT REMARKS:
#
# (1) If the program check if the direction of the magnetization fits the coding above
# if not, an error message will appear
# (2) Alteration ckeck can be in any direction
# (3) the order of the measurements is not important
#
def get_sample_name(specimen, sample_naming_convention):
if sample_naming_convention[0] == "sample=specimen":
sample = specimen
elif sample_naming_convention[0] == "no. of terminate characters":
n = int(sample_naming_convention[1])*-1
sample = specimen[:n]
elif sample_naming_convention[0] == "charceter delimited":
d = sample_naming_convention[1]
sample_splitted = specimen.split(d)
if len(sample_splitted) == 1:
sample = sample_splitted[0]
else:
sample = d.join(sample_splitted[:-1])
return sample
def get_site_name(sample, site_naming_convention):
if site_naming_convention[0] == "site=sample":
site = sample
elif site_naming_convention[0] == "no. of terminate characters":
n = int(site_naming_convention[1])*-1
site = sample[:n]
elif site_naming_convention[0] == "charceter delimited":
d = site_naming_convention[1]
site_splitted = sample.split(d)
if len(site_splitted) == 1:
site = site_splitted[0]
else:
site = d.join(site_splitted[:-1])
return site
## format some variables
# convert volume from cc to m^3
volume = float(volume) * 1e-6
if not output_dir_path:
output_dir_path = input_dir_path
samp_name_cons = {1: 'sample=specimen', 2: 'no. of terminate characters', 3: 'character delimited'}
if not samp_name_con:
samp_name_con = "sample=specimen"
elif samp_name_con not in samp_name_cons.values():
try:
samp_name_con = samp_name_cons.get(int(samp_name_con), 'sample=specimen')
except ValueError:
samp_name_con="sample=specimen"
if samp_name_con == 'no. of terminate characters' and not samp_name_chars:
print("-W- You have selected the sample naming convention: 'no. of terminate characters',\n but have provided the number of characters as 0.\n Defaulting to use 'sample=specimen' instead.")
samp_name_con = 'sample=specimen'
site_name_cons = {1: 'site=sample', 2: 'no. of terminate characters', 3: 'character delimited'}
if not site_name_con:
site_name_con = "site=sample"
elif site_name_con not in site_name_cons.values():
try:
site_name_con = site_name_cons.get(int(site_name_con), 'site=sample')
except ValueError:
site_name_con = "site=sample"
if site_name_con == 'no. of terminate characters' and not site_name_chars:
print("-W- You have selected the site naming convention: 'no. of terminate characters',\n but have provided the number of characters as 0.\n Defaulting to use 'site=sample' instead.")
site_name_con = 'site=sample'
Data = {}
# -----------------------------------
# First, read all files and sort data by specimen and by Experiment type
# -----------------------------------
for files in os.listdir(input_dir_path):
if files.endswith(".tdt"):
fname = os.path.join(input_dir_path, files)
print("Open file: ", fname)
fin = open(fname, 'r')
header_codes = ['labfield', 'core_azimuth',
'core_plunge', 'bedding_dip_direction', 'bedding_dip']
body_codes = ['specimen_name',
'treatment', 'moment', 'dec', 'inc']
tmp_body = []
tmp_header_data = {}
line_number = 0
continue_reading = True
line = fin.readline() # ignore first line
lines = fin.readlines()
fin.close()
for line in lines:
if "END" in line:
break
if line.strip('\n') == "":
break
this_line = line.strip('\n').split()
if len(this_line) < 5:
continue
# ---------------------------------------------------
# fix muxworthy funky data format
# ---------------------------------------------------
if len(this_line) < 5 and line_number != 0:
new_line = []
for i in range(len(this_line)):
if i > 1 and "-" in this_line[i]:
tmp = this_line[i].replace("-", " -")
tmp1 = tmp.split()
for i in range(len(tmp1)):
new_line.append(tmp1[i])
else:
new_line.append(this_line[i])
this_line = list(copy(new_line))
# -------------------------------
# Read information from Header and body
# The data is stored in a dictionary:
# Data[specimen][Experiment_Type]['header_data']=tmp_header_data --> a dictionary with header data
# Data[specimen][Experiment_Type]['meas_data']=[dict1, dict2, ...] --> a list of dictionaries with measurement data
# -------------------------------
# ---------------------------------------------------
# header
# ---------------------------------------------------
if line_number == 0:
for i in range(len(this_line)):
tmp_header_data[header_codes[i]] = this_line[i]
line_number += 1
# ---------------------------------------------------
# body
# ---------------------------------------------------
else:
tmp_data = {}
for i in range(min(len(this_line), len(body_codes))):
tmp_data[body_codes[i]] = this_line[i]
tmp_body.append(tmp_data)
# ------------
specimen = tmp_body[0]['specimen_name']
line_number += 1
if specimen not in list(Data.keys()):
Data[specimen] = {}
Experiment_Type = experiment_name
if Experiment_Type not in list(Data[specimen].keys()):
Data[specimen][Experiment_Type] = {}
Data[specimen][Experiment_Type]['meas_data'] = tmp_body
Data[specimen][Experiment_Type]['header_data'] = tmp_header_data
Data[specimen][Experiment_Type]['sample_naming_convention'] = [samp_name_con, samp_name_chars]
Data[specimen][Experiment_Type]['site_naming_convention'] = [site_name_con, site_name_chars]
Data[specimen][Experiment_Type]['location'] = location
Data[specimen][Experiment_Type]['user_name'] = user
Data[specimen][Experiment_Type]['volume'] = volume
Data[specimen][Experiment_Type]['moment_units'] = moment_units
Data[specimen][Experiment_Type]['labfield_DI'] = [lab_dec, lab_inc]
# -----------------------------------
# Convert Data{} to MagIC
# -----------------------------------
MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], []
specimens_list = list(Data.keys())
specimens_list.sort()
for specimen in specimens_list:
Experiment_Types_list = list(Data[specimen].keys())
Experiment_Types_list.sort()
for Experiment_Type in Experiment_Types_list:
if Experiment_Type in ["Thellier"]:
tmp_MeasRecs = []
# IMORTANT:
# phi and theta of lab field are not defined
# defaults are defined here:
phi, theta = '0.', '90.'
header_line = Data[specimen][Experiment_Type]['header_data']
experiment_treatments = []
measurement_running_number = 0
# start to make a list of the methcodes. and later will merge it to one string
methcodes = ["LP-PI-TRM"]
for i in range(len(Data[specimen][Experiment_Type]['meas_data'])):
meas_line = Data[specimen][Experiment_Type]['meas_data'][i]
# ------------------
# check if the same treatment appears more than once. If yes, assuming that the measurements is repeated twice,
# ignore the first, and take only the second one
# ------------------
if i < (len(Data[specimen][Experiment_Type]['meas_data'])-2):
Repeating_measurements = True
for key in ['treatment', 'specimen_name']:
if Data[specimen][Experiment_Type]['meas_data'][i][key] != Data[specimen][Experiment_Type]['meas_data'][i+1][key]:
Repeating_measurements = False
if Repeating_measurements == True:
"Found a repeating measurement at line %i, sample %s. taking the last one" % (
i, specimen)
continue
# ------------------
# Special treatment for first line (NRM data).
# ------------------
if i == 0:
if "." not in meas_line['treatment']:
meas_line['treatment'] = "0.0"
# if NRM is in the form of ".0" instead of "0.0"
elif meas_line['treatment'].split(".")[0] == "" and meas_line['treatment'].split(".")[1] == '0':
meas_line['treatment'] = "0.0"
# if NRM is in the form of "20.0" instead of "0.0"
elif float(meas_line['treatment'].split(".")[0]) < 50 and float(meas_line['treatment'].split(".")[-1]) == 0:
meas_line['treatment'] = "0.0"
# ------------------
# fix line in format of XX instead of XX.YY
# ------------------
if "." not in meas_line['treatment']:
meas_line['treatment'] = meas_line['treatment']+".0"
if meas_line['treatment'].split(".")[1] == "":
meas_line['treatment'] = meas_line['treatment']+"0"
# ------------------
# init names and dictionaries
# ------------------
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
# convert from microT to Tesla
labfield = float(header_line['labfield'])*1e-6
sample = get_sample_name(
specimen, Data[specimen][Experiment_Type]['sample_naming_convention'])
site = get_site_name(
sample, Data[specimen][Experiment_Type]['site_naming_convention'])
location = Data[specimen][Experiment_Type]['location']
if location == '':
location = 'unknown'
# ------------------
# Fill data
# ------------------
# Start with S'tables and Loc Table
if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
SpecRec['volume'] = Data[specimen][Experiment_Type]['volume']
SpecRec['citations'] = "This study"
SpecRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRec['citations'] = "This study"
SampRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['citations'] = "This study"
SiteRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec['citations'] = "This study"
LocRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
LocRecs.append(LocRec)
# now the measurement Rec
MeasRec['citations'] = "This study"
# experiment is set in pmag.measurements_methods3
# MeasRec["experiments"]=""
MeasRec["specimen"] = specimen
MeasRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = "%i" % measurement_running_number
MeasRec["dir_dec"] = meas_line['dec']
MeasRec["dir_inc"] = meas_line['inc']
if Data[specimen][Experiment_Type]['moment_units'] == 'mA/m':
MeasRec["magn_moment"] = "%5e" % (float(meas_line['moment'])*1e-3*float(
Data[specimen][Experiment_Type]['volume'])) # converted to Am^2
if Data[specimen][Experiment_Type]['moment_units'] == 'emu':
MeasRec["magn_moment"] = "%5e" % (
float(meas_line['moment'])*1e-3) # converted to Am^2
if Data[specimen][Experiment_Type]['moment_units'] == 'Am^2':
MeasRec["magn_moment"] = "%5e" % (
float(meas_line['moment'])) # converted to Am^2
MeasRec["meas_temp"] = '273.' # room temp in kelvin
# Date and time
## date=meas_line['Measurement Date'].strip("\"").split('-')
# yyyy=date[2];dd=date[1];mm=date[0]
## hour=meas_line['Measurement Time'].strip("\"")
# MeasRec["measurement_date"]=yyyy+':'+mm+":"+dd+":"+hour
# lab field data: distinguish between PI experiments to AF/Thermal
treatments = meas_line['treatment'].split(".")
if float(treatments[1]) == 0:
MeasRec["treat_dc_field"] = '0'
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
else:
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = Data[specimen][Experiment_Type]['labfield_DI'][0]
MeasRec["treat_dc_field_theta"] = Data[specimen][Experiment_Type]['labfield_DI'][1]
# ------------------
# Lab Treatments
# ------------------
# NRM
if float(treatments[0]) == 0 and float(treatments[1]) == 0:
MeasRec["method_codes"] = "LT-NO"
experiment_treatments.append('0')
MeasRec["treat_temp"] = '273.'
IZorZI = ""
# Zerofield step
elif float(treatments[1]) == 0:
MeasRec["method_codes"] = "LT-T-Z"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
# check if this is ZI or IZ:
for j in range(0, i):
previous_lines = Data[specimen][Experiment_Type]['meas_data'][j]
if previous_lines['treatment'].split(".")[0] == meas_line['treatment'].split(".")[0]:
if float(previous_lines['treatment'].split(".")[1]) == 1 or float(previous_lines['treatment'].split(".")[1]) == 10:
if "LP-PI-TRM-IZ" not in methcodes:
methcodes.append("LP-PI-TRM-IZ")
IZorZI = ""
else:
IZorZI = "Z"
# Infield step
elif float(treatments[1]) == 1 or float(treatments[1]) == 10:
MeasRec["method_codes"] = "LT-T-I"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
# check if this is ZI,IZ:
for j in range(0, i):
previous_lines = Data[specimen][Experiment_Type]['meas_data'][j]
if previous_lines['treatment'].split(".")[0] == meas_line['treatment'].split(".")[0]:
if float(previous_lines['treatment'].split(".")[1]) == 0:
if "LP-PI-TRM-ZI" not in methcodes:
methcodes.append("LP-PI-TRM-ZI")
IZorZI = ""
else:
IZorZI = "I"
# pTRM check step
elif float(treatments[1]) == 2 or float(treatments[1]) == 20:
MeasRec["method_codes"] = "LT-PTRM-I"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
if "LP-PI-ALT" not in methcodes:
methcodes.append("LP-PI-ALT")
# Tail check step
elif float(treatments[1]) == 3 or float(treatments[1]) == 30:
MeasRec["method_codes"] = "LT-PTRM-MD"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
if "LP-PI-BT-MD" not in methcodes:
methcodes.append("LP-PI-BT-MD")
MeasRec["treat_dc_field"] = "0"
MeasRec["treat_dc_field_phi"] = "0"
MeasRec["treat_dc_field_theta"] = "0"
# Additivity check step
elif float(treatments[1]) == 4 or float(treatments[1]) == 40:
MeasRec["method_codes"] = "LT-PTRM-AC"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
if "LP-PI-BT" not in methcodes:
methcodes.append("LP-PI-BT")
# Thellier Thellier protocol (1 for one direction and 5 for the antiparallel)
# Lab field direction of 1 is as put in the GUI dialog box
# Lab field direction of 5 is the anti-parallel direction of 1
elif float(treatments[1]) == 5 or float(treatments[1]) == 50:
MeasRec["method_codes"] = "LT-T-I"
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
MeasRec["treat_dc_field_phi"] = "%.2f" % (
(float(Data[specimen][Experiment_Type]['labfield_DI'][0])+180.) % 360.)
MeasRec["treat_dc_field_theta"] = "%.2f" % (
float(Data[specimen][Experiment_Type]['labfield_DI'][1])*-1.)
if "LP-PI-II" not in methcodes:
methcodes.append("LP-PI-II")
else:
print("-E- ERROR in file %s" % Experiment_Type)
print("-E- ERROR in treatment ",
meas_line['treatment'])
print("... exiting until you fix the problem")
# -----------------------------------
# MeasRec["method_codes"]=lab_treatment+":"+lab_protocols_string
# MeasRec["experiments"]=specimen+":"+lab_protocols_string
tmp_MeasRecs.append(MeasRec)
measurement_running_number += 1
# arrange method_codes and experiments:
method_codes = "LP-PI-TRM"
# Coe mothod
if "LP-PI-TRM-ZI" in methcodes and "LP-PI-TRM-IZ" not in methcodes and "LP-PI-II" not in methcodes:
method_codes = method_codes+":LP-PI-TRM-ZI"
if "LP-PI-TRM-ZI" not in methcodes and "LP-PI-TRM-IZ" in methcodes and "LP-PI-II" not in methcodes:
method_codes = method_codes+":LP-PI-TRM-IZ"
if "LP-PI-TRM-ZI" in methcodes and "LP-PI-TRM-IZ" in methcodes and "LP-PI-II" not in methcodes:
method_codes = method_codes+":LP-PI-BT-IZZI"
if "LP-PI-II" in methcodes:
method_codes = method_codes+":LP-PI-II"
if "LP-PI-ALT" in methcodes:
method_codes = method_codes+":LP-PI-ALT"
if "LP-PI-BT-MD" in methcodes:
method_codes = method_codes+":LP-PI-BT-MD"
if "LP-PI-BT" in methcodes:
method_codes = method_codes+":LP-PI-BT"
for i in range(len(tmp_MeasRecs)):
STRING = ":".join(
[tmp_MeasRecs[i]["method_codes"], method_codes])
tmp_MeasRecs[i]["method_codes"] = STRING
# experiment is set in pmag.measurements_methods3
# STRING=":".join([tmp_MeasRecs[i]["specimen"],method_codes])
# tmp_MeasRecs[i]["experiments"]=STRING
MeasRecs.append(tmp_MeasRecs[i])
elif Experiment_Type in ["ATRM 6 positions"]:
tmp_MeasRecs = []
header_line = Data[specimen][Experiment_Type]['header_data']
experiment_treatments = []
measurement_running_number = 0
# start to make a list of the methcodes. and later will merge it to one string
methcodes = ["LP-AN-TRM"]
for i in range(len(Data[specimen][Experiment_Type]['meas_data'])):
meas_line = Data[specimen][Experiment_Type]['meas_data'][i]
# ------------------
# check if the same treatment appears more than once. If yes, assuming that the measurements is repeated twice,
# ignore the first, and take only the second one
# ------------------
if i < (len(Data[specimen][Experiment_Type]['meas_data'])-2):
Repeating_measurements = True
for key in ['treatment', 'specimen_name']:
if Data[specimen][Experiment_Type]['meas_data'][i][key] != Data[specimen][Experiment_Type]['meas_data'][i+1][key]:
Repeating_measurements = False
if Repeating_measurements == True:
"Found a repeating measurement at line %i, sample %s. taking the last one" % (
i, specimen)
continue
# ------------------
# fix line in format of XX instead of XX.0
# ------------------
if "." not in meas_line['treatment']:
meas_line['treatment'] = meas_line['treatment']+".0"
if meas_line['treatment'].split(".")[1] == "":
meas_line['treatment'] = meas_line['treatment']+"0"
# ------------------
# init names and dictionaries
# ------------------
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
# convert from microT to Tesla
labfield = float(header_line['labfield'])*1e-6
sample = get_sample_name(
specimen, Data[specimen][Experiment_Type]['sample_naming_convention'])
site = get_site_name(
sample, Data[specimen][Experiment_Type]['site_naming_convention'])
location = Data[specimen][Experiment_Type]['location']
# ------------------
# Fill data
# ------------------
# Start with S'tables and Loc Table
if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
SpecRec['volume'] = Data[specimen][Experiment_Type]['volume']
SpecRec['citations'] = "This study"
SpecRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRec['citations'] = "This study"
SampRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['citations'] = "This study"
SiteRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec['citations'] = "This study"
LocRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
LocRecs.append(LocRec)
# Meas data now
MeasRec["specimen"] = specimen
MeasRec['analysts'] = Data[specimen][Experiment_Type]['user_name']
MeasRec['citations'] = "This study"
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = "%i" % measurement_running_number
MeasRec["dir_dec"] = meas_line['dec']
MeasRec["dir_inc"] = meas_line['inc']
MeasRec["magn_moment"] = "%5e" % (float(meas_line['moment'])*1e-3*float(
Data[specimen][Experiment_Type]['volume'])) # converted to Am^2
MeasRec["meas_temp"] = '273.' # room temp in kelvin
treatments = meas_line['treatment'].split(".")
if len(treatments[1]) > 1:
treatments[1] = treatments[1][0]
MeasRec["treat_temp"] = '%8.3e' % (
float(treatments[0])+273.) # temp in kelvin
# labfield direction
if float(treatments[1]) == 0:
MeasRec["treat_dc_field"] = '0'
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
MeasRec["method_codes"] = "LT-T-Z:LP-AN-TRM"
else:
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
# alteration check as final measurement
if float(treatments[1]) == 7 or float(treatments[1]) == 70:
MeasRec["method_codes"] = "LT-PTRM-I:LP-AN-TRM"
else:
MeasRec["method_codes"] = "LT-T-I:LP-AN-TRM"
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
# atrm declination/inlclination order
tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0]
# atrm declination/inlclination order
tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90]
ipos_code = int(treatments[1])-1
# (2) using the magnetization
DEC = float(MeasRec["dir_dec"])
INC = float(MeasRec["dir_inc"])
if INC < 45 and INC > -45:
if DEC > 315 or DEC < 45:
ipos_guess = 0
if DEC > 45 and DEC < 135:
ipos_guess = 1
if DEC > 135 and DEC < 225:
ipos_guess = 3
if DEC > 225 and DEC < 315:
ipos_guess = 4
else:
if INC > 45:
ipos_guess = 2
if INC < -45:
ipos_guess = 5
# prefer the guess over the code
ipos = ipos_guess
MeasRec["treat_dc_field_phi"] = '%7.1f' % (
tdec[ipos])
MeasRec["treat_dc_field_theta"] = '%7.1f' % (
tinc[ipos])
# check it
if ipos_guess != ipos_code and treatments[1] != '7':
print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!" % (
MeasRec["specimen"], ".".join(list(treatments))))
MeasRecs.append(MeasRec)
measurement_running_number += 1
else:
print(
"-E- ERROR. sorry, file format %s is not supported yet. Please contact rshaar@ucsd.edu" % Experiment_Type)
# -------------------------------------------
# measurements.txt
# -------------------------------------------
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MeasRecs, noave=False)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.write_table_to_file('specimens', spec_file_name)
con.write_table_to_file('samples', samp_file_name)
con.write_table_to_file('sites', site_file_name)
con.write_table_to_file('locations', loc_file_name)
meas_file = con.write_table_to_file('measurements', meas_file_name)
return True, meas_file
# ------------------------------------------- | 30,685 |
def post_multi_tag_datapoints(timeseries_with_datapoints: List[TimeseriesWithDatapoints], **kwargs):
"""Insert data into multiple timeseries.
Args:
timeseries_with_datapoints (List[v04.dto.TimeseriesWithDatapoints]): The timeseries with data to insert.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
Returns:
An empty response.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.4) + "/projects/{}/timeseries/data".format(project)
headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}
ul_dps_limit = 100000
# Make sure we only work with TimeseriesWithDatapoints objects that has a max number of datapoints
timeseries_with_datapoints_limited = []
for entry in timeseries_with_datapoints:
timeseries_with_datapoints_limited.extend(_split_TimeseriesWithDatapoints_if_over_limit(entry, ul_dps_limit))
# Group these TimeseriesWithDatapoints if possible so that we upload as much as possible in each call to the API
timeseries_to_upload_binned = _utils.first_fit(
list_items=timeseries_with_datapoints_limited, max_size=ul_dps_limit, get_count=lambda x: len(x.datapoints)
)
for bin in timeseries_to_upload_binned:
body = {
"items": [
{"tagId": ts_with_data.tagId, "datapoints": [dp.__dict__ for dp in ts_with_data.datapoints]}
for ts_with_data in bin
]
}
res = _utils.post_request(url, body=body, headers=headers)
return res.json() | 30,686 |
def get_version(pname: str, url: str) -> str:
"""Extract the package version from the url returned by `get_url`."""
match = search(r"/(\d+\.\d+\.\d+)/(\w+(?:-\w+)?)-(\d+\.\d+\.\d+)\.tar\.gz$", url)
# Assert that the package name matches.
assert match[2] == pname
# As a sanity check, also assert that the versions match.
assert match[1] == match[3]
# Return the version.
return match[1] | 30,687 |
def SmoothMu(trimset, smoothing):
""" Smooth mu or P(mu>thresh). """
Stot = trimset[0]
newmu = trimset[1]
maxflux = Stot.max()
xfine = numpy.exp(numpy.linspace(0, numpy.log(maxflux), 300))
#tck = interpolate.splrep(Stot, newmu, s=1, k=5)
#yfine = interpolate.splev(xfine, tck, der=0)
yfine = griddata(Stot, newmu, xfine, method='nearest')
bad = yfine * 0 != 0
good = yfine * 0 == 0
replacement = yfine[good][0]
yfine[bad] = replacement
if newmu.max() == 1:
buffmax = 1.
else:
buffmax = 10.
buff0 = numpy.zeros(100) + newmu.min()
buff1 = numpy.zeros(100) + buffmax
yfinebig = numpy.append(buff0, yfine)
yfinebig = numpy.append(yfinebig, buff1)
yfinebig = numpy.log10(yfinebig)
smoothedyfine = gaussian_filter(yfinebig, smoothing)
smoothedyfine = 10 ** (smoothedyfine)
buff0 = numpy.arange(100) - 100
buff1 = numpy.arange(100) + xfine.max()
xfinebig = numpy.append(buff0, xfine)
xfinebig = numpy.append(xfinebig, buff1)
#plt.clf()
#plt.plot(Stot, newmu, 'o')
#plt.plot(xfine, yfine, '.')
#plt.plot(xfinebig, smoothedyfine, '.')
#plt.loglog()
return xfinebig, smoothedyfine | 30,688 |
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format) | 30,689 |
def find_object(sha1_prefix) -> str:
"""
Find object with given SHA-1 prefix and return path to object in object
store, or raise ValueError if there are no objects or multiple objects
with this prefix.
"""
if len(sha1_prefix) < 2:
raise ValueError("Hash Prefix must be 2 or more characters")
obj_dir = os.path.join(".pygit", "objects", sha1_prefix[:2])
rest = sha1_prefix[2:]
objects = [name for name in os.listdir(obj_dir) if name.startswith(rest)]
if not objects:
raise ValueError("Object {!r} not found".format(sha1_prefix))
if len(objects) >= 2:
raise ValueError("Multiple Objects ({}) with prefix {!r}".format(len(objects), sha1_prefix))
return os.path.join(obj_dir, objects[0]) | 30,690 |
def list_files(path: str) -> List[str]:
""" List files inside a directory
Parameters
----------
path
directory path
Returns
-------
list
list of file names
"""
return [f.name for f in os.scandir(path) if os.path.isfile(f)] | 30,691 |
def precision_with_fixed_recall(y_true, y_pred_proba, fixed_recall):
""" Compute precision with a fixed recall, for class 1. The chosen threshold for this couple precision is also returned.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred_proba : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier, should be probabilities
fixed_recall : float
Fixed recall, recall will be calculated with respect to this precision
Returns
-------
precision_score : float
threshold : float
"""
if is_valid_y_true(y_true):
_, recall, threshold = precision_recall_curve(y_true, y_pred_proba)
threshold = max([threshold[i] for i in range(len(threshold)) if recall[i] >= fixed_recall])
y_pred_binary = binarize(y_pred_proba, threshold)
return precision_score(y_true, y_pred_binary), threshold
else:
raise ValueError('y_true should not contain only zeros') | 30,692 |
def sample_kollege(user, name='Miguel', crm='222'):
"""Create and return a sample tag"""
return Kollege.objects.create(user=user, name=name, crm=crm) | 30,693 |
def display_help():
"""Displays usage and help"""
print(
"usage: echobox [-a|--align name] [-b|--basic-char char] [-d|--debug]",
file=sys.stderr,
)
print(
" [-f|--fill-char char] [-h|--help|-?] [-i|--inter-lines number]",
file=sys.stderr,
)
print(
" [-l|--lead-lines number] [-S|--style name] [-s|--spaces number]",
file=sys.stderr,
)
print(
" [-t|--trail-lines number] [-v|--version] [--] [string ...]",
file=sys.stderr
)
print(
" ---------------- ---------------------------------------------------",
file=sys.stderr,
)
print(
" -a|--align Box alignment (left, middle, center, right): %s"
% parameters["Alignment"],
file=sys.stderr,
)
print(
" -b|--basic-char Character to use for basic style: '%s'"
% parameters["Basic char"],
file=sys.stderr,
)
print(
" -f|--fill-char Character to use to fill background: '%s'"
% parameters["Fill char"],
file=sys.stderr,
)
print(
" -i|--inter-lines Blank lines around the text: %d"
% parameters["Internal lines"],
file=sys.stderr,
)
print(
" -l|--lead-lines Blank lines before the box: %d"
% parameters["Leading lines"],
file=sys.stderr,
)
print(
" -S|--style Style to use: %s" % parameters["Style"], file=sys.stderr
)
print(
" -s|--spaces Spaces around the text: %d"
% parameters["Surrounding spaces"],
file=sys.stderr,
)
print(
" -t|--trail-lines Blank lines after the box: %d"
% parameters["Trailing lines"],
file=sys.stderr,
)
print(" --debug Enable debug mode", file=sys.stderr)
print(
" --help|-? Print usage and this help message and exit",
file=sys.stderr,
)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
print("Available styles: " + " ".join(STYLES.keys()), file=sys.stderr)
print(file=sys.stderr) | 30,694 |
def get_singular_user_metrics(URM_test, recommender_object: BaseRecommender, cutoff=10):
"""
Return a pandas.DataFrame containing the precision, recall and average precision of all the users
:param URM_test: URM to be tested on
:param recommender_object: recommender system to be tested
:param cutoff: the cutoff to be evaluated on
:return: pandas.DataFrame containing the precision, recall and average precision of all the users
"""
from course_lib.Base.Evaluation.metrics import average_precision, precision, recall
URM_test = sps.csr_matrix(URM_test)
n_users = URM_test.shape[0]
average_precision_list = []
precision_list = []
recall_list = []
user_list = []
for user_id in range(n_users):
if user_id % 10000 == 0:
print("Evaluated user {} of {}".format(user_id, n_users))
start_pos = URM_test.indptr[user_id]
end_pos = URM_test.indptr[user_id + 1]
if end_pos - start_pos > 0:
relevant_items = URM_test.indices[start_pos: end_pos]
recommended_items = recommender_object.recommend(user_id, cutoff=cutoff)
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
user_list.append(user_id)
average_precision_list.append(average_precision(is_relevant, relevant_items))
precision_list.append(precision(is_relevant))
recall_list.append(recall(is_relevant, relevant_items))
return pd.DataFrame(data={'user_id': user_list, 'precision': precision_list, 'recall': recall_list,
'AP': average_precision_list}) | 30,695 |
def detect_license(document, cleaned=False):
"""
Finds a license that is most similar to the provided `document`.
/
:document: a license, whose name should be identified
:type document: string
/
:cleaned: shows whether a `document` is prepared for vectorization.
/
Returns the name of the license document in string.
"""
vectorizer = TfidfVectorizer(stop_words='english',
strip_accents='unicode',
use_idf=True,
smooth_idf=True,
norm='l2')
names, licenses = load_license_templates()
# `tfidf` is a docXvocab matrix, where each row is a document and each
# column is a token in vocabulary
cleaned_doc = document if cleaned else clean(document)
tfidf = vectorizer.fit_transform(licenses + [cleaned_doc])
# Last row in this matrix is our `document`
vectorized_document = tfidf[-1]
index_of_most_similar_license = 0
max_similarity = -1
# Searching for most similar license
for i in range(0, len(licenses)):
next_license = tfidf[i]
cos = cosine_similarity(vectorized_document, next_license)
if cos > max_similarity:
max_similarity = cos
index_of_most_similar_license = i
return names[index_of_most_similar_license] | 30,696 |
def user_profile(request):
"""
Puts user_profile into context if available.
"""
profile = None
if request.user.is_authenticated():
from models import Participant
try:
profile = Participant.objects.get(user__id=request.user.id)
except ObjectDoesNotExist:
pass
return {'user_profile': profile} | 30,697 |
def get_reduced_model(model: torch.nn.Module, x_sample: torch.Tensor,
bias: bool = True, activation: bool = True) -> torch.nn.Module:
"""
Get 1-layer model corresponding to the firing path of the model for a specific sample.
:param model: pytorch model
:param x_sample: input sample
:param device: cpu or cuda device
:param bias: True if model has bias
:param activation: True if you want to add a sigmoid activation on top
:return: reduced model
"""
x_sample_copy = deepcopy(x_sample)
n_linear_layers = 0
for i, module in enumerate(model.children()):
if isinstance(module, torch.nn.Linear):
n_linear_layers += 1
# compute firing path
count_linear_layers = 0
weights_reduced = None
bias_reduced = None
b = None
for i, module in enumerate(model.children()):
if isinstance(module, torch.nn.Linear):
weight = deepcopy(module.weight.detach())
if bias:
b = deepcopy(module.bias.detach())
# linear layer
hi = module(x_sample_copy)
# relu activation
ai = torch.relu(hi)
# prune nodes that are not firing
# (except for last layer where we don't have a relu!)
if count_linear_layers != n_linear_layers - 1:
weight[hi <= 0] = 0
if bias:
b[hi <= 0] = 0
# compute reduced weight matrix
if i == 0:
weights_reduced = weight
if bias:
bias_reduced = b
else:
weights_reduced = torch.matmul(weight, weights_reduced)
if bias:
bias_reduced = torch.matmul(weight, bias_reduced) + bias
# the next layer will have the output of the current layer as input
x_sample_copy = ai
count_linear_layers += 1
# build reduced network
linear = torch.nn.Linear(weights_reduced.shape[1],
weights_reduced.shape[0])
state_dict = linear.state_dict()
state_dict['weight'].copy_(weights_reduced.clone().detach())
if bias:
state_dict['bias'].copy_(bias_reduced.clone().detach())
layers = [linear]
if activation:
layers.append(torch.nn.Sigmoid())
model_reduced = torch.nn.Sequential(*layers)
model_reduced.eval()
return model_reduced | 30,698 |
def cosine_sim(a: np.ndarray, b: np.ndarray) -> float:
"""
Computes the cosine similarity between two vectors
Parameters
----------
a: np.ndarray
the first vector
b: np.ndarray
the second vector
Returns
-------
float: the cosine similarity of the two vectors
"""
a_norm = norm(a)
b_norm = norm(b)
if a_norm == 0 or b_norm == 0:
return 0
else:
return inner(a, b) / (a_norm * b_norm) | 30,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.