content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def lambda_handler(event, context):
"""
Generate a pre-signed URL that allows a save file to be uploaded to S3 in the player's specified save slot. If the
slot is new, will verify that MAX_SAVE_SLOTS_PER_PLAYER has not been reached.
Parameters:
Request Context:
custom:gk_user_id: str
The player_id to associate the save file with. This value comes from the Cognito Authorizer that validates
the API Gateway request.
Header Parameters:
metadata: str
An arbitrary Base64 encoded string to associate with the save file.
[Optional, defaults to an empty string: '']
The total size of the metadata string cannot exceed 1887 bytes (MAX_METADATA_BYTES, see docs above) and must
be Base64 encoded, otherwise the Lambda will return an error. The 2KB limit comes from an S3 limitation, and
the Base64 encoding saves space compared to S3's native behavior for non-ASCII strings:
https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#UserMetadata
The GameKit SDK handles encoding and decoding the metadata string for you; if not using the SDK, please
Base64 encode your metadata values before calling this lambda function.
Examples:
A string, representing the save slot's description:
unencoded_metadata = 'about to fight the boss 👍'
metadata = 'YWJvdXQgdG8gZmlnaHQgdGhlIGJvc3Mg8J+RjQ==' # Pass this to the lambda
A JSON blob, containing several metadata fields:
unencoded_metadata = '{"description": "about to fight the boss 👍", "total_playtime_seconds": "16200"}'
metadata = 'eyJkZXNjcmlwdGlvbiI6ICJhYm91dCB0byBmaWdodCB0aGUgYm9zcyDwn5GNIiwgInRvdGFsX3BsYXl0aW1lX3NlY29uZHMiOiAiMTYyMDAifQ==' # Pass this to the lambda
hash: str
The Base64 encoded SHA-256 hash of the file to upload.
The total size of the hash string will be 44 bytes; the SHA-256 hash itself is 32 bytes, and the Base64
encoding of it will bring the size up to 44. Base64 encoding is used to convert the SHA-256 hash from a
byte stream to an ASCII compliant string.
last_modified_epoch_time: int
The number of milliseconds since epoch of the last UTC time when the save slot was modified on the caller's
device.
Path Parameters:
slot_name: str
The slot name to use for the save file.
Limited to 512 characters long, using alphanumeric characters, dashes (-), underscores (_), and periods (.).
This lambda will return an error if a malformed slot name is provided.
If the slot_name is not occupied with another save file, the Lambda will check whether this new save file
will exceed the MAX_SAVE_SLOTS_PER_PLAYER. If it would be exceeded, the Lambda will return an error.
Query String Parameters:
time_to_live: int
The number of seconds the URL will be valid. The URL will no longer work after the time has expired.
[Optional, defaults to 120 seconds (DEFAULT_TIME_TO_LIVE_SECONDS).]
consistent_read: bool
Whether to use "Consistent Read" when querying DynamoDB.
[Optional, defaults to True (DEFAULT_CONSISTENT_READ).]
Errors:
400 Bad Request - Returned when a malformed 'slot_name' path parameter is provided.
400 Bad Request - Returned when the 'metadata' parameter exceeds 1883 bytes (MAX_METADATA_BYTES) after being
ASCII encoded.
400 Bad Request - Returned when the 'hash' parameter is not exactly 44 bytes (BASE_64_ENCODED_SHA_256_BYTES)
in size.
400 Bad Request - Returned when the save slot is new and would exceed the player's MAX_SAVE_SLOTS_PER_PLAYER.
401 Unauthorized - Returned when the 'custom:gk_user_id' parameter is missing from the request context.
"""
log_event(event)
# Get player_id from requestContext:
player_id = get_player_id(event)
if player_id is None:
return response_envelope(status_code=401)
# Get header inputs:
metadata = get_header_param(event, 'metadata', DEFAULT_METADATA)
sha_hash: str = get_header_param(event, S3_HASH_METADATA_KEY)
last_modified_epoch_time = int(get_header_param(event, 'last_modified_epoch_time'))
# Get path param inputs:
slot_name = get_path_param(event, 'slot_name')
# Get query param inputs:
time_to_live = int(get_query_string_param(event, 'time_to_live', DEFAULT_TIME_TO_LIVE_SECONDS))
consistent_read = bool(strtobool(get_query_string_param(event, 'consistent_read', DEFAULT_CONSISTENT_READ)))
# Validate inputs:
if not is_valid_primary_identifier(slot_name):
logger.error((f'Malformed slot_name: {slot_name} provided for player_id: {player_id}').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_SLOT_NAME)
if get_bytes_length(metadata) > MAX_METADATA_BYTES:
return response_envelope(status_code=400, status_message=ResponseStatus.MAX_METADATA_BYTES_EXCEEDED)
if not is_valid_base_64(metadata):
logger.error((f'Malformed metadata provided, expected a Base64 encoded string. Metadata: {metadata}').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_METADATA)
if len(sha_hash) != BASE_64_ENCODED_SHA_256_BYTES or not sha_hash.isascii():
logger.error((f'Malformed SHA-256 hash: {sha_hash} provided. Must be 44 characters and Base64 encoded.').encode(UTF_8))
return response_envelope(status_code=400, status_message=ResponseStatus.MALFORMED_HASH_SIZE_MISMATCH)
# Verify MAX_SAVE_SLOTS_PER_PLAYER won't be exceeded:
if is_new_save_slot(player_id, slot_name, consistent_read) and would_exceed_slot_limit(player_id, consistent_read):
return response_envelope(status_code=400, status_message=ResponseStatus.MAX_CLOUD_SAVE_SLOTS_EXCEEDED)
# Generate URL:
bucket_name = os.environ.get('GAMESAVES_BUCKET_NAME')
url = generate_presigned_url(
bucket_name, player_id, slot_name, metadata, sha_hash, last_modified_epoch_time, time_to_live
)
# Construct response object:
return response_envelope(
status_code=200,
response_obj={
'url': url
}
)
| 5,336,400
|
def getLanguageLevel() -> dict:
"""
Takes the user input and returns the found documents as dictionary.
:text: String
:language: String
:return: Dictionary
"""
text: str = request.params.get('text')
language: str = request.params.get('language')
# check API Key
if str(request.params.get('key')) != API_KEY:
response.status = 401
return {
"error": "API-KEY is wrong or missing. See https://github.com/elaisasearch/categorizer/blob/master/README.md for more information."
}
if language == "en":
return {
"result": categorizeText(text)
}
# other languages will follow in the future
else:
return {
"error": "'{}' currently isn't supported. Please use 'en' for English as language. Thank you.".format(language)
}
| 5,336,401
|
def list_networks(**kwargs):
"""Lists all networks of the given compartment
Args:
**kwargs: Additional options
Keyword Args:
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
return_formatted (bool): If set to true, a list object is returned.
check_privileges (bool): Checks if the user has privileges for the
subnet
Returns:
a network object
"""
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
return_formatted = kwargs.get("return_formatted", True)
check_privileges = kwargs.get("check_privileges", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# List the virtual networks
vcns = virtual_network.list_vcns(
compartment_id=compartment_id).data
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
try:
if network_has_subnet(
network=vcn, compartment_id=compartment_id,
config=config,
public_subnet=public_subnet,
check_privileges=check_privileges):
good_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
pass
vcns = good_vcns
if return_formatted:
return format_network_listing(vcns)
else:
return oci.util.to_dict(vcns)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
| 5,336,402
|
def ed_affine_to_extended(pt):
"""Map (x, y) to (x : y : x*y : 1)."""
new_curve = EllipticCurve(pt.curve, ED_EXT_HOM_PROJ, Edwards_ExtProj_Arithm)
return new_curve((pt.x, pt.y, pt.x * pt.y, new_curve.field(1)))
| 5,336,403
|
def _download(path, url, archive_name, hash_, hash_type='md5'):
"""Download and extract an archive, completing the filename."""
full_name = op.join(path, archive_name)
remove_archive = True
fetch_archive = True
if op.exists(full_name):
logger.info('Archive exists (%s), checking hash %s.'
% (archive_name, hash_,))
fetch_archive = False
if hashfunc(full_name, hash_type=hash_type) != hash_:
if input('Archive already exists but the hash does not match: '
'%s\nOverwrite (y/[n])?'
% (archive_name,)).lower() == 'y':
os.remove(full_name)
fetch_archive = True
if fetch_archive:
logger.info('Downloading archive %s to %s' % (archive_name, path))
try:
temp_file_name, header = urlretrieve(url)
# check hash sum eg md5sum
if hash_ is not None:
logger.info('Verifying hash %s.' % (hash_,))
hashsum = hashfunc(temp_file_name, hash_type=hash_type)
if hash_ != hashsum:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, hashsum))
shutil.move(temp_file_name, full_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
# _fetch_file(url, full_name, print_destination=False,
# hash_=hash_, hash_type=hash_type)
return remove_archive, full_name
| 5,336,404
|
def do_setup(experiment_folder, path_to_additional_args):
""" Setup Shell Scripts for Experiment """
additional_args = joblib.load(path_to_additional_args)
# Setup Data
logger.info("Setting Up Data")
data_args = setup_train_test_data(experiment_folder, **additional_args)
# Setup
logger.info("Saving Experiment Options per ID")
sampler_args = additional_args['sampler_args']
arg_list = dict_product(sampler_args, data_args)
options_df = setup_options(experiment_folder, arg_list)
return options_df
| 5,336,405
|
def getorgadmins(apikey, orgid, suppressprint=False):
"""
Args:
apikey: User's Meraki API Key
orgid: OrganizationId for operation to be performed against
suppressprint:
Returns:
"""
__hasorgaccess(apikey, orgid)
calltype = 'Organization'
geturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
| 5,336,406
|
def calc_recall(TP, FN):
"""
Calculate recall from TP and FN
"""
if TP + FN != 0:
recall = TP / (TP + FN)
else:
recall = 0
return recall
| 5,336,407
|
def lookup_last_report_execution(job_type, work_ids=None):
"""Lookup in the database when the report/job chunk last executed
This is the expected table schema from the database (id and timestamp columns
are omitted),
---------------------------------------------------
| work_id | history |
---------------------------------------------------
| 1000 | {"report_A": 2019-01-11 11:22:33,
"report_B": 2020-01-12 02:03:44} |
| 2000 | {"report_A": 2012-01-11 12:23:33} |
---------------------------------------------------
The work_id parameter is expected to be work ids. The reason for naming the
parameter work_ids is to support future changes.
Args:
job_type (str): The name of the job to check execution time for
work_ids (list): Specific work ids to check execution time for
Returns:
last_exec_min (int or None): Largest number of minutes since the last
execution for any of the work ids. None
if never executed
Examples:
Looking up the greatest time since work id 1000 executed report_B
should be 2 minutes
>>> str(datetime.utcnow())
2020-01-12 02:05:44
>>> lookup_last_report_execution("report_B", [1000])
2
Looking up the greatest time since work id 1234 executed report_B
should be None, as it was never executed
>>> print(lookup_last_report_execution("report_B", [1234]))
None
"""
# Create string ready for SQL
work_ids_string = ", ".join([str(c) for c in work_ids])
# Query database
# This returns a single number that is the latest execution for any of
# the work_ids in minutes or a single row containing 99999999
sql = f"""
SELECT
MAX(IFNULL(MINUTES_SINCE_LAST_EXEC, 99999999)) AS last_exec
FROM (
-- Calculate the time since last execution
SELECT
TIMESTAMPDIFF(
MINUTE,
STR_TO_DATE(
JSON_UNQUOTE(
JSON_EXTRACT(
history,
'$."{job_type}"')
), "%Y-%m-%d %H:%i:%s"),
CURRENT_TIMESTAMP()
) AS MINUTES_SINCE_LAST_EXEC
FROM StauLatestExecution
WHERE workId IN ({work_ids_string})
) as subq
"""
with Stau() as queue:
rtn = queue._exec(sql, {})
return rtn.get("last_exec", None)
| 5,336,408
|
def get_course_goal_options():
"""
Returns the valid options for goal keys, mapped to their translated
strings, as defined by theCourseGoal model.
"""
return {goal_key: goal_text for goal_key, goal_text in GOAL_KEY_CHOICES}
| 5,336,409
|
def to_dataframe(y):
"""
If the input is not a dataframe, convert it to a dataframe
:param y: The target variable
:return: A dataframe
"""
if not isinstance(y, pd.DataFrame):
return pd.DataFrame(y)
return y
| 5,336,410
|
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,
ignore_query=False, ignore_fragment=False):
"""
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
"""
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
firstp = urlparse(first)
secondp = urlparse(second)
return (
(firstp.scheme == secondp.scheme or ignore_scheme)
and (firstp.netloc == secondp.netloc or ignore_netloc)
and (firstp.path == secondp.path or ignore_path)
and (firstp.params == secondp.params or ignore_params)
and (firstp.query == secondp.query or ignore_query)
and (firstp.fragment == secondp.fragment or ignore_fragment)
)
| 5,336,411
|
def yggdrasil_model_to_keras_model(
src_path: str,
dst_path: str,
input_model_signature_fn: Optional[tf_core.InputModelSignatureFn] = tf_core
.build_default_input_model_signature):
"""Converts an Yggdrasil model into a Keras model.
Args:
src_path: Path to input Yggdrasil Decision Forests model.
dst_path: Path to output TensorFlow Decision Forests SavedModel model.
input_model_signature_fn: A lambda that returns the
(Dense,Sparse,Ragged)TensorSpec (or structure of TensorSpec e.g.
dictionary, list) corresponding to input signature of the model. If not
specified, the input model signature is created by
"build_default_input_model_signature". For example, specify
"input_model_signature_fn" if an numerical input feature (which is
consumed as DenseTensorSpec(float32) by default) will be feed differently
(e.g. RaggedTensor(int64)).
"""
inspector = inspector_lib.make_inspector(src_path)
objective = inspector.objective()
model = CoreModel(
task=objective.task,
learner="MANUAL",
ranking_group=objective.group
if objective.task == inspector_lib.Task.RANKING else None)
model._set_from_yggdrasil_model( # pylint: disable=protected-access
inspector,
src_path,
input_model_signature_fn=input_model_signature_fn)
model.save(dst_path)
| 5,336,412
|
def test_struct(n: cython.int, x: cython.double) -> MyStruct2:
"""
>>> test_struct(389, 1.64493)
(389, 1.64493)
>>> d = test_struct.__annotations__
>>> sorted(d)
['n', 'return', 'x']
"""
assert cython.typeof(n) == 'int', cython.typeof(n)
if is_compiled:
assert cython.typeof(x) == 'double', cython.typeof(x) # C double
else:
assert cython.typeof(x) == 'float', cython.typeof(x) # Python float
a = cython.declare(MyStruct2)
a[0] = MyStruct(is_integral=True, data=MyUnion(n=n))
a[1] = MyStruct(is_integral=False, data={'x': x})
return a[0].data.n, a[1].data.x
| 5,336,413
|
def build_document(json_schema: dict) -> list:
"""
Returns a list of lines to generate a basic adoc file, with the format:
Title
A table for the data properties
A table for the data attributes and nested attributes if any
"""
lines = []
"""
Title and description of schema
"""
title = get_json_attribute(['title'], json_schema)
description = get_json_attribute(['description'], json_schema)
"""
Id and required properties of object
"""
data = get_json_attribute(['properties', 'data'], json_schema)
data_required = get_json_attribute(['required'], data)
data_properties = get_json_attribute(['properties'], data)
"""
Attributes of object
"""
attributes = get_json_attribute(['attributes'], data_properties)
required = get_json_attribute(['required'], attributes)
attribute_properties = get_json_attribute(['properties'], attributes)
"""
Relationships of object
"""
relationships = get_json_attribute(['relationships', 'properties'], data_properties)
print(relationships)
if relationships:
for relationship_name in relationships:
relationship_object = get_json_attribute([relationship_name], relationships)
relationship_required = get_json_attribute(['required'], relationship_object)
relationship_properties = get_json_attribute(['data', 'properties'], relationship_object)
if not relationship_required:
relationship_required = ''
if 'type' in relationship_properties:
relationship_type = get_json_attribute(['type', 'const'], relationship_properties)
relationship_object.update({'type': str(relationship_type)})
"""
Cleans up properties table
"""
# TODO: retrieve nested 'const' attribute from relationship to display under 'Type' in adoc table
data_type = get_json_attribute(['type', 'const'], data_properties)
if 'type' in data_properties:
data_properties.update({'type': {'type': str(data_type)}})
if 'relationships' in data_properties:
del data_properties['relationships']
del data_properties['attributes']
"""
Sets title, description, and tables
"""
lines.append(get_adoc_title(title, 3))
if description:
lines.append(description+'\n')
if data_properties:
lines.extend(get_adoc_table('Properties', ['Type', 'Description'], data_properties, data_required))
if attributes:
lines.extend(get_adoc_table('Attributes', ['Type', 'Description'], attribute_properties, required, True))
lines.append('\n')
if relationships:
lines.extend(get_adoc_table('Relationships', ['Type', 'Description'], relationships, relationship_required))
return lines
| 5,336,414
|
def test_streaming_histogram_1d(dtype, error):
"""Test the computation of streaming histogram for a 1D array."""
values = np.random.random_sample((10000, )).astype(dtype)
hist = StreamingHistogram(values, dtype=dtype, bin_count=values.size)
check_stats(hist, values, dtype, error)
assert np.all(hist.bins()["value"] == np.sort(values))
assert np.all(hist.bins()["weight"] == np.ones_like(values))
other = pickle.loads(pickle.dumps(hist))
check_stats(other, values, dtype, error)
hist = StreamingHistogram(values,
weights=np.ones(values.size),
bin_count=values.size)
check_stats(hist, values, dtype, error)
hist = StreamingHistogram(da.from_array(values, chunks=(1000, )),
bin_count=values.size)
check_stats(hist, values, dtype, error)
assert isinstance(str(hist), str)
| 5,336,415
|
def remove_potential_nonlipids_bad_esi_mode():
"""
remove_potential_nonlipids_bad_esi_mode
description:
ESI mode of the dataset is not 'pos' or 'neg'
returns:
(bool) -- test pass (True) or fail (False)
"""
dset = Dataset(os.path.join(os.path.dirname(__file__), 'real_data_1.csv'))
try:
remove_potential_nonlipids(dset)
except ValueError:
return True
return False
| 5,336,416
|
def test_no_args_workspace_configured_with_some_groups(tmp_path: Path) -> None:
"""Scenario:
* Nothing passed on the command line
* A group named 'group1' in the manifest containing foo
* A group named 'group2' in the manifest containing bar
* Workspace configured with repo_groups=[group1]
Should return foo from group1
"""
groups = {
"group1": {"repos": ["foo"]},
"group2": {"repos": ["bar"]},
}
create_manifest(tmp_path, repos=["foo", "bar"], groups=groups)
workspace = create_workspace(tmp_path, repo_groups=["group1"])
actual = resolve_repos(workspace, groups=None, all_cloned=False)
assert repo_names(actual) == ["foo"]
| 5,336,417
|
def is_oasis_db():
""" Is this likely an OASIS database? Look at the table names to see
if we have the more specific ones.
Return "yes", "no", or "empty"
"""
expect = ['qtvariations', 'users', 'examqtemplates', 'marklog', 'qtattach',
'questions', 'guesses', 'exams', 'qtemplates']
tables = public_tables()
if len(tables) == 0:
return "empty"
if set(expect).issubset(tables):
return "yes"
return "no"
| 5,336,418
|
def selection_screen(screen: pygame.Surface) -> None:
"""
Selection screen between SEARCHING, SORTING, TITLE
"""
clear_screen(screen)
border(screen)
# Labels
draw_header(screen, "Table of Content")
b1 = PButton(screen, (180, 230, 300, 50))
b1.add_text("Sorting")
b2 = PButton(screen, (180, 300, 300, 50))
b2.add_text("Searching")
buttons = [b1 ,b2]
# TODO: Recognize when clicked on "Searching" and go there
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
if event.type == pygame.MOUSEBUTTONUP:
x, y = event.pos
if b1.is_cursor_on((x, y), True):
sort_selection(screen)
if b2.is_cursor_on((x, y), True):
search_selection(screen)
for b in buttons:
if b.is_cursor_on(pygame.mouse.get_pos()):
b.hover()
else:
b.draw()
pygame.display.flip()
| 5,336,419
|
def make_segment(segment, discontinuity=False):
"""Create a playlist response for a segment."""
response = []
if discontinuity:
response.append("#EXT-X-DISCONTINUITY")
response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]),
return "\n".join(response)
| 5,336,420
|
def seq_aggregate_with_reducer(x, y):
"""
Sequencing function that works with the dataframe created by get_normal_frame
:param x:
:param y:
:return:
"""
res = []
for i in range(0, len(x)):
res.append((x[i][0], x[i][1], get_aggregation_func_by_name(x[i][0])(x[i][2], y[i][2])))
return tuple(res)
| 5,336,421
|
def console(bot: Optional[Union[T_Base, T_Group]]=None, **kwargs) -> None:
"""
Function for direct interaction via terminal.
Useful for testing, not advised for production code
Params:
-bot = Instance of the bot/botgroup you wish to control via console.
-kwargs = dict of global variables defined in the __main__ module give the console access to.
"""
if kwargs:
# Define global access for variables passed to the function
for x, y in kwargs.items():
globals()[x] = y
main = __import__('__main__')
for x, y in main.__dict__.items():
if x != 'bot' and (isinstance(y, BotGroup) or isinstance(y, Base)):
# Creates local variable for the relevant variables from the __main__ module
locals()[x] = y
if x.lower() not in main.__dict__ \
or not (
isinstance(main.__dict__[x.lower()], BotGroup) or
isinstance(main.__dict__[x.lower()], Base)
):
# Alternate local variabe with all lowercase as the variable name
# as long as a variable by the same name won't be imported
locals()[x.lower()] = y
while True: # Control loop
try:
a = input()
print('===============')
print(eval(a))
print('===============')
except SystemExit:
if bot:
bot.close()
for x in locals():
if isinstance(x, Base) or isinstance(x, BotGroup):
x.close()
return
except:
# Catch for exceptions to allow the console to continue operating.
print('>>>Exception occured: {0}'.format(sys.exc_info()[1]))
print_tb(sys.exc_info()[2])
print('===============')
| 5,336,422
|
def from_dicts(key: str, *dicts, default: Any = None):
"""
Returns value of key in first matchning dict.
If not matching dict, default value is returned.
Return:
Any
"""
for d in dicts:
if key in d:
return d[key]
return default
| 5,336,423
|
def comp_play_hand(hand, word_list):
"""
Allows the computer to play the given hand, as follows:
* The hand is displayed.
* The computer chooses a word using comp_choose_words(hand, word_dict).
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the computer
chooses another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when the computer has exhausted its possible choices (i.e. comp_play_hand returns None).
hand: dictionary (string -> int)
word_list: list (string)
"""
# TO DO ...
score = 0
handlen = calculate_handlen(hand)
while True:
print 'Current Hand:'
display_hand(hand)
word = comp_choose_word(hand, word_list)
print 'computer choose word:', word
if word == None:
break
if is_valid_word(word, hand, word_list):
hand = update_hand(hand, word)
earn = get_word_score(word, handlen)
score += earn
print word, 'earn', earn, 'points, Total points:', score, '\n'
else:
print 'invalid word,', word, ' please try again'
continue
| 5,336,424
|
def test_clear_objects():
"""
Checks the clear_objects method
"""
obj_storage = (
object_storage.ObjectStorage()
) # obj_storage is a wrapper object to a collection of objects
x = torch.tensor(1)
obj_storage.set_obj(x)
objs = obj_storage.current_objects() # Returns a copy of the objects in obj_storage(here:x)
assert len(objs) == 1
assert objs[x.id] == x
ret_val = obj_storage.clear_objects() # Completely removes all objects from obj_storage
objs = obj_storage.current_objects()
assert len(objs) == 0
assert ret_val == obj_storage
| 5,336,425
|
def test_update_patient(mock_app, test_client, gpx4_patients, test_node, database):
"""Test updating a patient by sending a POST request to the add endpoint with valid data"""
patient_data = gpx4_patients[1]
# Given a node with authorized token
ok_token = test_client["auth_token"]
add_node(mongo_db=mock_app.db, obj=test_client, is_client=True)
add_node(
mongo_db=mock_app.db, obj=test_node, is_client=False
) # add a test node, to perform external matching
# a matches collection without documents
assert database["matches"].find_one() is None
# and an empty patients collection
assert database["patients"].find_one() is None
# GIVEN a patient added using the add enpoint
patient_obj = {"patient": patient_data} # this is a valid patient object
response = mock_app.test_client().post(
"patient/add", data=json.dumps(patient_obj), headers=auth_headers(ok_token)
)
assert response.status_code == 200
# WHEN the patient is updated using the same add endpoint
patient_data["label"] = "modified patient label"
patient_obj = {"patient": patient_data} # this
response = mock_app.test_client().post(
"patient/add", data=json.dumps(patient_obj), headers=auth_headers(ok_token)
)
assert response.status_code == 200
# Then there should still be one patient in the database
results = database["patients"].find()
assert len(list(results)) == 1
# And the update has triggered an additional external patient matching
results = database["matches"].find()
assert len(list(results)) == 2
| 5,336,426
|
def setIndexingRules(fixed_allocations, indexer_id,blacklist_parameter = True, parallel_allocations = 0 , network = "mainnet"):
"""
setIndexingRule via indexer agent management endpoint (default :18000).
Endpoint works with graphQL mutation. So the mutations are sent via a request.post
method.
returns: IndexingRule which was set via
"""
print("YOU ARE IN AUTOMATION MODE")
indexer_id = indexer_id.lower()
# get relevant gateway for mainnet or testnet
if network == 'mainnet':
API_GATEWAY = os.getenv('API_GATEWAY')
else:
API_GATEWAY = os.getenv('TESTNET_GATEWAY')
# get blacklisted subgraphs if wanted
if blacklist_parameter:
with open("./config.json", "r") as jsonfile:
INVALID_SUBGRAPHS = json.load(jsonfile).get('blacklist')
else:
INVALID_SUBGRAPHS = False
# set amount of parallel allocations per subgraph
parallel_allocations = parallel_allocations
# get the amount of GRT that should be allocated from the optimizer
fixed_allocation_sum = sum(list(fixed_allocations.values())) * parallel_allocations
# get relevant indexer data
indexer_data = requests.post(
API_GATEWAY,
data='{"query":"{ indexer(id:\\"' + indexer_id + '\\") { account { defaultName { name } } stakedTokens delegatedTokens allocatedTokens tokenCapacity } }"}',
headers={'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
).json()['data']['indexer']
remaining_stake = int(indexer_data['tokenCapacity']) - int(fixed_allocation_sum)
print(
f"Processing subgraphs for indexer {indexer_data['account']['defaultName']['name'] if indexer_data['account']['defaultName'] else indexer_id}")
print(f"Staked: {int(indexer_data['stakedTokens']) / 10 ** 18:,.2f}")
print(f"Delegated: {int(indexer_data['delegatedTokens']) / 10 ** 18:,.2f}")
print(f"Token Capacity: {int(indexer_data['tokenCapacity']) / 10 ** 18:,.2f}")
print(f"Currently Allocated: {int(indexer_data['allocatedTokens']) / 10 ** 18:,.2f}")
print(f"Fixed Allocation: {int(fixed_allocation_sum) / 10 ** 18:,.2f}")
print(f"Remaining Stake: {remaining_stake / 10 ** 18:,.2f}")
print('=' * 40)
if (int(indexer_data['tokenCapacity']) - int(indexer_data['allocatedTokens']) < int(fixed_allocation_sum)):
print("Not enough free stake for fixed allocation. Free to stake first")
# sys.exit()
subgraph_data = requests.post(
API_GATEWAY,
data='{"query":"{ subgraphDeployments(first: 1000) { id originalName stakedTokens signalledTokens } }"}',
headers={'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
).json()['data']['subgraphDeployments']
subgraphs = set()
invalid_subgraphs = set()
total_signal = 0
total_stake = 0
dynamic_allocation = 0
for subgraph_deployment in subgraph_data:
subgraph = base58.b58encode(bytearray.fromhex('1220' + subgraph_deployment['id'][2:])).decode("utf-8")
if INVALID_SUBGRAPHS:
if subgraph in INVALID_SUBGRAPHS:
#print(f" Skipping invalid Subgraph: {subgraph_deployment['originalName']} ({subgraph})")
invalid_subgraphs.add(subgraph)
pass
if subgraph in fixed_allocations.keys():
if fixed_allocations[subgraph] > 0:
print(
f"{subgraph_deployment['originalName']} ({subgraph}) Total Stake: {int(subgraph_deployment['stakedTokens']) / 10 ** 18:,.2f} Total Signal: {int(subgraph_deployment['signalledTokens']) / 10 ** 18:,.2f} , Ratio: {(int(subgraph_deployment['stakedTokens']) / 10 ** 18) / ((int(subgraph_deployment['signalledTokens']) + 1) / 10 ** 18)}")
subgraphs.add(subgraph)
total_signal += int(subgraph_deployment['signalledTokens'])
total_stake += int(subgraph_deployment['stakedTokens'])
else:
if subgraph in fixed_allocations.keys():
if fixed_allocations[subgraph] > 0:
print(
f"{subgraph_deployment['originalName']} ({subgraph}) Total Stake: {int(subgraph_deployment['stakedTokens']) / 10 ** 18:,.2f} Total Signal: {int(subgraph_deployment['signalledTokens']) / 10 ** 18:,.2f} , Ratio: {(int(subgraph_deployment['stakedTokens']) / 10 ** 18) / ((int(subgraph_deployment['signalledTokens']) + 1) / 10 ** 18)}")
subgraphs.add(subgraph)
total_signal += int(subgraph_deployment['signalledTokens'])
total_stake += int(subgraph_deployment['stakedTokens'])
print(f"Total Signal: {total_signal / 10 ** 18:,.2f}")
print(f"Total Stake: {total_stake / 10 ** 18:,.2f}")
print('=' * 40)
print(f"Subgraphs: {len(subgraphs)}")
print(f"Fixed: {len(set(fixed_allocations.keys()))}")
print(f"Dynamic: {len(subgraphs - set(fixed_allocations.keys()))}")
print(f"Dynamic Allocation: {dynamic_allocation / 10 ** 18:,.2f}")
print('=' * 40)
print()
# Closing Allocations via Indexer Agent Endpoint (localhost:18000), set decision_basis to never
print("NOW CLOSING ALLOCATIONS AUTOMATICALLY VIA INDEXER MANAGEMENT ENDPOINT")
active_allocations = getActiveAllocations(indexer_id = indexer_id, network = network)
if active_allocations:
active_allocations = active_allocations['allocations']
allocation_ids = []
for allocation in active_allocations:
subgraph_hash = allocation["subgraphDeployment"]['id']
allocation_amount = allocation["allocatedTokens"]
print("CLOSING ALLOCATION FOR SUBGRAPH: " + str(subgraph_hash))
print("SUBGRAPH IPFS HASH: " + allocation['subgraphDeployment']['ipfsHash'])
print("ALLOCATION AMOUNT: " + str(allocation_amount))
setIndexingRuleQuery(deployment = subgraph_hash, decision_basis = "never", parallel_allocations = parallel_allocations,
allocation_amount = 0 )
allocation_ids.append(allocation['id'])
print("Closing Allocations amount: " + str(len(allocation_ids)))
asyncFilterAllocationEvents(indexer_id = indexer_id, allocation_ids = allocation_ids, network= network, event_type = "closing" )
# Allocating via Indexer Agent Endpoint (localhost:18000) set decision_basis to always
print("NOW RUNNING THE AUTOMATIC ALLOCATION VIA INDEXER MANAGEMENT ENDPOINT")
subgraph_deployment_ids = []
for subgraph in subgraphs:
if subgraph in fixed_allocations.keys():
if fixed_allocations[subgraph] != 0:
subgraph_hash = "0x"+base58.b58decode(subgraph).hex()[4:]
subgraph_deployment_ids.append(subgraph_hash)
allocation_amount = fixed_allocations[subgraph] / 10 ** 18
print("ALLOCATING SUBGRAPH: " + "0x"+base58.b58decode(subgraph).hex()[4:])
print("Allocation Amount: " + str(allocation_amount))
print("")
setIndexingRuleQuery(deployment = subgraph_hash, decision_basis = "always", parallel_allocations = parallel_allocations,
allocation_amount = allocation_amount)
asyncFilterAllocationEvents(indexer_id = indexer_id, allocation_ids = allocation_ids, network = network,
subgraph_deployment_ids = subgraph_deployment_ids)
| 5,336,427
|
def make_obj(f, mesh):
"""Crude export to Wavefront mesh format"""
for v in mesh.verts:
f.write("v {} {} {}\n".format(v.x, v.y, v.z))
for face in mesh.faces:
if isinstance(face, Quad):
f.write("f {} {} {} {}\n".format(face.v1, face.v2, face.v3, face.v4))
if isinstance(face, Tri):
f.write("f {} {} {}\n".format(face.v1, face.v2, face.v3))
| 5,336,428
|
def temporary_dir(chdir=True):
"""Context manager that creates a temporary directory and chdirs to it.
When the context manager exits it returns to the previous cwd
and deletes the temporary directory.
"""
d = tempfile.mkdtemp()
try:
with contextlib.ExitStack() as stack:
if chdir:
stack.enter_context(cd(d))
yield d
finally:
if os.path.exists(d):
shutil.rmtree(d)
| 5,336,429
|
def time_in_words(h, m):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem
Given the time in numerals we may convert it into words, as shown below:
----------------------------------------------
| 5:00 | -> | five o' clock |
| 5:01 | -> | one minute past five |
| 5:10 | -> | ten minutes past five |
| 5:15 | -> | quarter past five |
| 5:30 | -> | half past five |
| 5:40 | -> | twenty minutes to six |
| 5:45 | -> | quarter to six |
| 5:47 | -> | thirteen minutes to six |
| 5:28 | -> | twenty eight minutes past five |
----------------------------------------------
At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between
the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the
format described.
Args:
h (int): hour of the day
m (int): minutes after the hour
Returns:
str: string representation of the time
"""
time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen",
"fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two",
"twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"]
# We check for a certain set of cases:
# Case 1 - we're on the hour, so we use o' clock
if m == 0:
return "{0} o' clock".format(time[h-1])
# Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time)
if m == 1:
return "{0} minute past {1}".format(time[m-1], time[h-1])
# Case 3 - we're a quarter past the hour
if m == 15:
return "quarter past {0}".format(time[h-1])
# Case 4 - we're half past the hour
if m == 30:
return "half past {0}".format(time[h-1])
# Case 5 - we're a quarter to the next hour
if m == 45:
return "quarter to {0}".format(time[h])
# Case 6 - we check for minutes after the hour, which is until we hit minute 30
if m < 30:
return "{0} minutes past {1}".format(time[m-1], time[h-1])
# Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour
return "{0} minutes to {1}".format(time[59-m], time[h])
| 5,336,430
|
def swap_values_at(first_position, second_position, list):
"""
Swaps two values in-place in a list of lists.
:param first_position: A two-tuple of integers - the index of the
first value.
:param second_position: A two-tuple of integers - the index of the
second value.
:param list: A list of lists.
"""
i, j = first_position
k, l = second_position
list[i][j], list[k][l] = list[k][l], list[i][j]
| 5,336,431
|
def majorityElement(nums):
"""超过三分之一的数,最多不超过两个数"""
num1, num2 = -1, -1
count1, count2 = 0, 0
for i in range(len(nums)):
curNum = nums[i]
if curNum == num1:
count1 += 1
elif curNum == num2:
count2 += 1
elif count1 == 0:
num1 = curNum
count1 = 1
elif count2 == 0:
num2 = curNum
count2 = 1
else:
count1 -= 1
count2 -= 2
count1, count2 = 0, 0
for n in nums:
if n == num1:
count1 += 1
elif n == num2:
count2 += 1
print("num1: {}, count1: {}; num2: {}, count2: {}".format(num1, count1, num2, count2))
numLens = len(nums)
ret = []
if count1 > numLens//3:
ret.append(num1)
if count2 > numLens//3:
ret.append(num2)
return ret
| 5,336,432
|
def calcDensHeight(T,p,z):
"""
Calculate the density scale height H_rho
Parameters
----------
T: vector (float)
temperature (K)
p: vector (float) of len(T)
pressure (pa)
z: vector (float) of len(T
height (m)
Returns
-------
Hbar: vector (float) of len(T)
density scale height (m)
"""
dz=np.diff(z)
TLayer=(T[1:] + T[0:-1])/2.
dTdz=np.diff(T)/np.diff(z)
oneOverH=g/(Rd*TLayer) + (1/TLayer*dTdz)
Zthick=z[-1] - z[0]
oneOverHbar=np.sum(oneOverH*dz)/Zthick
Hbar = 1/oneOverHbar
return Hbar
| 5,336,433
|
def test_app_initialisation():
"""
.. test:: Additional test 1
:id: TC_LE_GROUNDWORK_0_1_12_0404
This test case checks
- if a groundwork app can be instantiated
- if the app path is set to the current working directory (APP_PATH is unset because no configuration is given)
"""
app = groundwork.App()
assert app.path == os.getcwd()
| 5,336,434
|
def resetDb(db_name):
""" Create or cleanup a user DB"""
if db_name in bw.databases:
_eprint("Db %s was here. Reseting it" % db_name)
del bw.databases[db_name]
db = bw.Database(db_name)
db.write(dict())
| 5,336,435
|
def Signal_figure(name,I,mask):
"""Plots a figure designed to show the influences of the image parameters and creates a .png image of it.
Parameters
----------
name: string
Desired name of the image.
I: array
MRI image.
mask: array
Region of interest binary mask.
Return
------
References
----------
"""
sns.set()
sns.set_style('ticks')
sns.set_context('talk')
fig=plt.figure(figsize=(20,20))
gs = fig.add_gridspec(2,2)
ax1=fig.add_subplot(gs[0, 0:1])
ax1.imshow(I,cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_title('Noiseless image',fontsize=40)
ax2=fig.add_subplot(gs[0, 1:2])
ax2.imshow(mask,cmap='gray')
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_title('Mask',fontsize=40)
ax3=fig.add_subplot(gs[1, 0:])
hist, bins = np.histogram(I,80)
ax3.plot(bins[:-1],hist,'k')
ax3.fill_between(bins[:-1], hist,color='black')
ax3.set_title('Noiseless image histogram',fontsize=40)
ax3.set_ylabel('Number os pixels',fontsize=40)
ax3.set_xlabel('Value',fontsize=40)
ax3.set_xlim(0,750)
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
os.chdir('Figures')
plt.savefig(name+'.png')
os.chdir('..')
return None
| 5,336,436
|
async def test_zeroconf_parse_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow on IPP parse error."""
mock_connection(aioclient_mock, parse_error=True)
discovery_info = MOCK_ZEROCONF_IPP_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "parse_error"
| 5,336,437
|
def load_many_data(filenames, clean=True, first_seconds_remove=2, bandpass_range=(5, 50)):
"""
Loads several files and cleans data if clean is True. Returns a concatenated set of data (MNE object).
"""
# TODO: check for matching channels and other errors
raw_data = []
if filenames is None:
# open tkinter dialogue
#multiple files selected at one time
root = Tk()
root.withdraw()
filenames = filedialog.askopenfilenames()
for f in filenames:
#Check sample frequencies and ask user which sfreq files they would like to look at
cur_raw = load_data(f) # current raw object
raw_data.append(cur_raw)
print("The length of raw_data is:" + str(len(raw_data)))
# print("raw_data[0] is " + str(raw_data[0]))
# print("The length of the file list is:" + str(len([PATH1 + f for f in glob.glob(PATH1 + '*.raw.fif.gz')]))) #This file list doesn't return anything
data = mne.concatenate_raws(raw_data)
if clean:
data = clean_data(data, remove=first_seconds_remove, bandpass_range=bandpass_range)
return data
| 5,336,438
|
def extract_push_target(push_target: str):
"""
Extract push target from the url configured
Workspace is optional
"""
if not push_target:
raise ValueError("Cannot extract push-target if push-target is not set.")
match_pattern = re.compile(
r"(?P<http_scheme>https|http):\/\/(?P<askanna_host>[\w\.\-\:]+)\/(?P<workspace_suuid>[\w-]+){0,1}\/{0,1}project\/(?P<project_suuid>[\w-]+)\/{0,1}" # noqa: E501
)
matches = match_pattern.match(push_target)
matches_dict = matches.groupdict()
return matches_dict
| 5,336,439
|
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
authn_policy = AuthTktAuthenticationPolicy('sosecret', callback=groupfinder,
hashalg='sha512')
authz_policy = ACLAuthorizationPolicy()
memcache_server = os.environ.get('MEMCACHE_SERVERS')
settings['beaker.cache.url'] = memcache_server
config = Configurator(settings=settings,
root_factory='atv.models.RootFactory')
config.include('pyramid_chameleon')
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.add_static_view('URL',
'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('panda', '/panda/authorize_upload')
config.add_route('search', '/search')
config.add_route('searchb', '/search/')
config.add_route('answer', '/answer')
config.add_route('delete', '/delete')
config.add_route('denied', '/denied')
config.add_route('explore', '/explore')
config.add_route('exploreb', '/explore/')
config.add_route('exploretrending', '/explore/trending')
config.add_route('exploretrendingb', '/explore/trending/')
config.add_route('explorelatest', '/explore/latest')
config.add_route('explorelatestb', '/explore/latest/')
config.add_route('exploreourpicks', '/explore/ourpicks')
config.add_route('exploreourpicksb', '/explore/ourpicks/')
config.add_route('vote', '/vote')
config.add_route('deleteanswer', '/deleteanswer')
config.add_route('stream', '/i/stream')
config.add_route('streamb', '/i/stream/')
config.add_route('streamlatest', '/i/stream/latest')
config.add_route('streamlatestb', '/i/stream/latest/')
config.add_route('streamtop', '/i/stream/top')
config.add_route('streamtopb', '/i/stream/top/')
config.add_route('edit', '/i/edit')
config.add_route('editb', '/i/edit/')
config.add_route('followunfollow', '/2x4b32cp')
config.add_route('deletenotification', '/2x4b32qp')
config.add_route('chanlatest', '/{channel}/latest')
config.add_route('chanlatestb', '/{channel}/latest/')
config.add_route('chanrising', '/{channel}/top')
config.add_route('chanrisingb', '/{channel}/top/')
config.add_route('ask', '/ask')
config.add_route('signup', '/signup')
config.add_route('signupb', '/signup/')
config.add_route('login', '/login')
config.add_route('loginb', '/login/')
config.add_route('logout', '/logout')
config.add_route('logoutb', '/logout/')
config.add_route('privacy', '/privacy')
config.add_route('privacyb', '/privacy/')
config.add_route('terms', '/terms')
config.add_route('termsb', '/terms/')
config.add_route('blog', '/blog')
config.add_route('blogb', '/blog/')
config.add_route('admin', '/admin')
config.add_route('adminb', '/admin/')
config.add_route('copyright', '/copyright')
config.add_route('copyrightb', '/copyright/')
config.add_route('contact', '/contact')
config.add_route('contactb', '/contact/')
config.add_route('verify', '/verify')
config.add_route('verifyb', '/verify/')
config.add_route('reset', '/reset')
config.add_route('resetb', '/reset/')
config.add_route('ereset', '/ereset')
config.add_route('eresetb', '/ereset/')
config.add_route('verifyereset', '/ereset/{code}')
config.add_route('verifyreset', '/reset/{code}')
config.add_route('verifyemail', '/verify/{code}')
config.add_route('following', '/{channel}/following')
config.add_route('followingb', '/{channel}/following/')
config.add_route('a_history', '/{channel}/history/a')
config.add_route('a_historyb', '/{channel}/history/a/')
config.add_route('history', '/{channel}/history/q')
config.add_route('historyb', '/{channel}/history/q/')
config.add_route('question', '/{channel}/{question}')
config.add_route('questionb', '/{channel}/{question}/')
config.add_route('channel', '/{channel}')
config.add_route('channelb', '/{channel}/')
#Create WSGI app
config.scan()
return config.make_wsgi_app()
| 5,336,440
|
def dish_gain(radius, freq):
"""
Dish radar gain.
Inputs:
- radius [float]: Dish radius (m)
- freq [float]: Transmit frequency (Hz)
Outputs:
- g: Gain
"""
return 4*pi**2*radius**2/wavelen(freq)**2
| 5,336,441
|
def test_advection_1d_constructor():
"""
Test constructor
"""
x_start = 0
x_end = 1
nx = 11
c = 1
advection_1d = Advection1D(c=c, x_start=x_start, x_end=x_end, nx=nx, t_start=0, t_stop=1, nt=11)
np.testing.assert_equal(advection_1d.x_start, x_start)
np.testing.assert_equal(advection_1d.x_end, x_end)
np.testing.assert_equal(advection_1d.nx, nx - 1)
np.testing.assert_almost_equal(advection_1d.dx, 0.1)
np.testing.assert_equal(advection_1d.x, np.linspace(x_start, x_end, nx)[0:-1])
np.testing.assert_equal(True, isinstance(advection_1d.vector_template, VectorAdvection1D))
np.testing.assert_equal(True, isinstance(advection_1d.vector_t_start, VectorAdvection1D))
np.testing.assert_equal(advection_1d.vector_t_start.get_values(),
np.exp(-np.linspace(x_start, x_end, nx)[0:-1] ** 2))
| 5,336,442
|
def _collect_files(gold_dir, system_dir, limit):
"""Return the list of files to run the comparison on."""
gold_files = os.listdir(gold_dir)
system_files = os.listdir(system_dir)
# don't assume the directory content is the same, take the intersection
fnames = sorted(list(set(gold_files).intersection(set(system_files))))
# TODO: includes a hack to avoid a file, get rid of it
fnames = [f for f in fnames[:limit] if not f.endswith('wsj_0907.tml')]
return fnames
| 5,336,443
|
def center_img(img, size=None, fill_value=255):
"""
center img in a square background
"""
h, w = img.shape[:2]
if size is None:
size = max(h, w)
shape = (size, size) + img.shape[2:]
background = np.full(shape, fill_value, np.uint8)
center_x = (size - w) // 2
center_y = (size - h) // 2
background[center_y:center_y + h, center_x:center_x + w] = img
return background
| 5,336,444
|
def concat_files(*files):
"""
Concat some files together. Returns out and err to keep parity with shell commands.
Args:
*files: src1, src2, ..., srcN, dst.
Returns:
out: string
err: string
"""
out = ''
err = ''
dst_name = files[-1]
sources = [files[f] for f in range(len(files)) if f < len(files) - 1]
with open(dst_name, 'w') as dst:
for f in sources:
with open(f, 'r') as src:
for line in src:
dst.write(line)
return out, err
| 5,336,445
|
def distribution_quality( df, refdata, values, ascending, names, fig):
"""Locate the quantile position of each putative :class:`.DesingSerie`
in a list of score distributions.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param grid: Shape of the grid to plot the values in the figure (rows x columns).
:type grid: :class:`tuple` with two :class:`int`
:param refdata: Data content to use as reference.
:type refdata: :class:`~pandas.DataFrame`
:param values: Contents from the data container that are expected to be plotted.
:type values: :func:`list` of :class:`str`
:param ascending: Way the data should be sorted. :data:`True` if the score is better
when lower, :data:`False` otherwise.
:type ascending: :func:`list` of :class:`bool`
:param names: Columns to use as identifiers for the query data.
:type names: :func:`list` of :class:`str`
:param fig: Figure into which the data is going to be plotted.
:type fig: :class:`~matplotlib.figure.Figure`
:return: :class:`~matplotlib.axes.Axes`
:raises:
:ValueError: If columns are requested that do not exist in the :class:`~pandas.DataFrame` of
data **and** reference.
:ValueError: If there isn't a ``ascending`` definition for each ``value``.
:ValueError: If ``refdata`` or ``df`` are not :class:`~pandas.DataFrame`.
:valueError: If the requested names do not exist in the input data.
.. rubric:: Example:
.. ipython::
:okwarning:
In [1]: from rstoolbox.plot import distribution_quality
...: from rstoolbox.utils import load_refdata
...: import matplotlib.pyplot as plt
...: df = load_refdata('scop')
...: qr = pd.DataFrame([['2F4V', 'C'], ['3BFU', 'B'], ['2APJ', 'C'],
...: ['2C37', 'V'], ['2I6E', 'H']],
...: columns=['pdb', 'chain'])
...: qr = qr.merge(df, on=['pdb', 'chain'])
...: refs = []
...: for i, t in qr.iterrows():
...: refs.append(df[(df['length'] >= (t['length'] - 5)) &
...: (df['length'] <= (t['length'] + 5))])
...: fig = plt.figure(figsize=(25, 6))
...: ax = distribution_quality(df=qr, refdata=refs,
...: values=['score', 'pack', 'avdegree',
...: 'cavity', 'psipred'],
...: ascending=[True, False, True, True, False],
...: names=['pdb', 'chain'], fig=fig)
...: plt.tight_layout()
@savefig distribution_quality_docs1.png width=5in
In [2]: plt.show()
In [3]: plt.close()
"""
if not isinstance(df, pd.DataFrame):
raise ValueError('Unknown data format.')
if not isinstance(refdata, (pd.DataFrame, list)):
raise ValueError('Unknown reference data format.')
if len(set(values).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the data container.")
if len(set(names).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested identifiers do not exist "
"in the data container.")
if isinstance(refdata, list):
if len(refdata) != df.shape[0]:
raise ValueError('If multiple references are provided, '
'there should be the same as queries.')
for i, x in enumerate(refdata):
if not isinstance(x, pd.DataFrame):
raise ValueError('Unknown reference {} data format.'.format(i))
if len(set(values).difference(set(list(x.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
else:
if len(set(values).difference(set(list(refdata.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
refdata = [refdata, ] * len(df.shape[0])
if len(values) != len(ascending):
raise ValueError("Number of values and orders should match.")
ax = plt.subplot2grid((1, 1), (0, 0), fig=fig)
cmap = discrete_cmap_from_colors([(144.0 / 255, 238.0 / 255, 144.0 / 255),
(135.0 / 255, 206.0 / 255, 250.0 / 255),
(255.0 / 255, 165.0 / 255, 0.0 / 255),
(205.0 / 255, 92.0 / 255, 92.0 / 255)])
data = []
labs = []
identifiers = df[names[0]].map(str)
for i in range(1, len(names)):
identifiers += '_' + df[names[i]].map(str)
df = df.reset_index(drop=True)
for i, row in df.iterrows():
data.append([])
labs.append([])
for isc, sc in enumerate(values):
qt = refdata[i][sc].quantile([.25, .5, .75])
if row[sc] <= qt[.25]:
data[-1].append(.12 if ascending[isc] else .87)
labs[-1].append('Q1' if ascending[isc] else 'Q4')
elif row[sc] <= qt[.5]:
data[-1].append(.37 if ascending[isc] else .67)
labs[-1].append('Q2' if ascending[isc] else 'Q3')
elif row[sc] <= qt[.75]:
data[-1].append(.67 if ascending[isc] else .37)
labs[-1].append('Q3' if ascending[isc] else 'Q2')
else:
data[-1].append(.87 if ascending[isc] else .12)
labs[-1].append('Q4' if ascending[isc] else 'Q1')
df = pd.DataFrame(data, columns=values, index=identifiers)
sns.heatmap(df, square=True, cmap=cmap, cbar=False, annot=pd.DataFrame(labs), fmt='s', ax=ax)
plt.setp( ax.yaxis.get_majorticklabels(), rotation=0 )
return ax
| 5,336,446
|
def join_paths(path, *paths):
"""
"""
return os.path.join(path, *paths)
| 5,336,447
|
def determine_configure_options(module):
"""
Determine configure arguments for this system.
Automatically determine configure options for this system and build
options when the explicit configure options are not specified.
"""
options = module.params['configure_options']
build_userspace = module.params['build_userspace']
build_module = module.params['build_module']
build_terminal_programs = module.params['build_terminal_programs']
build_bindings = module.params['build_bindings']
build_fuse_client = module.params['build_fuse_client']
with_transarc_paths = module.params['with_transarc_paths']
with_debug_symbols = module.params['with_debug_symbols']
with_rxgk = module.params['with_rxgk']
if options is None:
options = {'enable': [], 'disable': [], 'with': [], 'without': []}
if not build_userspace or not build_module:
module.fail_json(msg="build_userspace and build_module are false.")
if build_module:
options['enable'].append('kernel-module')
if is_linux():
options['with'].append('linux-kernel-packaging')
else:
options['disable'].append('kernel-module')
if not build_terminal_programs:
options['disable'].append('gtx')
if not build_bindings:
options['without'].append('swig')
if not build_fuse_client:
options['disable'].append('fuse-client')
if with_debug_symbols:
options['enable'].append('debug')
options['disable'].extend(['optimize', 'strip-binaries'])
if build_module:
options['enable'].append('debug-kernel')
options['disable'].append('optimize-kernel')
if with_transarc_paths:
options['enable'].append('transarc-paths')
if with_rxgk:
options['enable'].append('rxgk')
return options
| 5,336,448
|
def __test_maxwellian_solution__(collision_operator, solver):
"""
tests if df/dt = 0 if f = maxwellian
:return:
"""
f, f_out, v, dv = __run_collision_operator_test_loop__(
vshift=0.0, t_end=T_END, collision_operator=collision_operator, solver=solver
)
np.testing.assert_almost_equal(f, f_out, decimal=4)
| 5,336,449
|
def test_transform_coverage_to_coordinates(coverage, snapshot):
"""
Test that two sample coverage data sets are correctly converted to coordinates.
"""
assert transform_coverage_to_coordinates(coverage) == snapshot
| 5,336,450
|
def getElementTypeToolTip(t):
"""Wrapper to prevent loading qtgui when this module is imported"""
if t == PoolControllerView.ControllerModule:
return "Controller module"
elif t == PoolControllerView.ControllerClass:
return "Controller class"
| 5,336,451
|
def parse_dates(array):
"""Parse the valid dates in an array of strings.
"""
parsed_dates = []
for elem in array:
elem = parse_date(elem)
if elem is not None:
parsed_dates.append(elem)
return parsed_dates
| 5,336,452
|
def export_secret_to_environment(name):
"""
Add secret to envvar.
:param name: The secret key.
:return:
"""
logger.info('Adding envvar: {0}.'.format(name))
try:
value = base64.b64decode(os.environ[name])
except KeyError:
raise EcosystemTestException(
'Secret env var not set {0}.'.format(name))
if isinstance(value, bytes):
value = value.decode(encoding='UTF-8')
os.environ[name.upper()] = value
| 5,336,453
|
def app_factory(global_config, **local_config):
"""
定义一个 app 的 factory 方法,以便在运行时绑定具体的 app,而不是在配置文件中就绑定。
:param global_config:
:param local_config:
:return:
"""
return MyApp()
| 5,336,454
|
def str_to_datetime(dt_str):
""" Converts a string to a UTC datetime object.
@rtype: datetime
"""
try:
return dt.datetime.strptime(
dt_str, DATE_STR_FORMAT).replace(tzinfo=pytz.utc)
except ValueError: # If dt_str did not match our format
return None
| 5,336,455
|
def quantize(img):
"""Quantize the output of model.
:param img: the input image
:type img: ndarray
:return: the image after quantize
:rtype: ndarray
"""
pixel_range = 255
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
| 5,336,456
|
def detect_os(ctx, loc="local", verbose=0):
"""
detect what type of os we are using
Usage: inv db.detect-os
"""
env = get_compose_env(ctx, loc=loc)
# Override run commands' env variables one key at a time
for k, v in env.items():
ctx.config["run"]["env"][k] = v
res_os = ctx.run("uname -s")
ctx.config["run"]["env"]["OS"] = "{}".format(res_os.stdout)
if ctx.config["run"]["env"]["OS"] == "Windows_NT":
ctx.config["run"]["env"]["DETECTED_OS"] = "Windows"
else:
ctx.config["run"]["env"]["DETECTED_OS"] = ctx.config["run"]["env"]["OS"]
if verbose >= 1:
msg = "[detect-os] Detected: {}".format(ctx.config["run"]["env"]["DETECTED_OS"])
click.secho(msg, fg=COLOR_SUCCESS)
if ctx.config["run"]["env"]["DETECTED_OS"] == "Darwin":
ctx.config["run"]["env"]["ARCHFLAGS"] = "-arch x86_64"
ctx.config["run"]["env"][
"PKG_CONFIG_PATH"
] = "/usr/local/opt/libffi/lib/pkgconfig"
ctx.config["run"]["env"]["LDFLAGS"] = "-L/usr/local/opt/openssl/lib"
ctx.config["run"]["env"]["CFLAGS"] = "-I/usr/local/opt/openssl/include"
| 5,336,457
|
def test_confusion_matrix_per_subgroup_indexed():
"""
Tests calculating confusion matrix per index-based sub-population.
Tests
:func:`fatf.utils.metrics.tools.confusion_matrix_per_subgroup_indexed`
function.
"""
incorrect_shape_error_gt = ('The ground_truth parameter should be a '
'1-dimensional numpy array.')
incorrect_shape_error_p = ('The predictions parameter should be a '
'1-dimensional numpy array.')
flat = np.array([1, 2])
square = np.array([[1, 2], [3, 4]])
with pytest.raises(IncorrectShapeError) as exin:
fumt.confusion_matrix_per_subgroup_indexed([[0]], square, square)
assert str(exin.value) == incorrect_shape_error_gt
with pytest.raises(IncorrectShapeError) as exin:
fumt.confusion_matrix_per_subgroup_indexed([[0]], flat, square)
assert str(exin.value) == incorrect_shape_error_p
mx1 = np.array([[2, 1, 0], [0, 0, 0], [0, 0, 0]])
mx2 = np.array([[2, 0, 0], [0, 0, 0], [0, 2, 1]])
mx3 = np.array([[2, 0, 1], [0, 2, 0], [1, 0, 1]])
with pytest.warns(UserWarning) as w:
pcmxs_1 = fumt.confusion_matrix_per_subgroup_indexed(
_INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS, labels=[0, 1, 2])
pcmxs_2 = fumt.confusion_matrix_per_subgroup_indexed(
_INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS)
assert len(w) == 2
wmsg = ('Some of the given labels are not present in either of the input '
'arrays: {2}.')
assert str(w[0].message) == wmsg
assert str(w[1].message) == wmsg
assert len(pcmxs_1) == 3
assert len(pcmxs_2) == 3
assert np.array_equal(pcmxs_1[0], mx1)
assert np.array_equal(pcmxs_2[0], mx1)
assert np.array_equal(pcmxs_1[1], mx2)
assert np.array_equal(pcmxs_2[1], mx2)
assert np.array_equal(pcmxs_1[2], mx3)
assert np.array_equal(pcmxs_2[2], mx3)
| 5,336,458
|
def is_normalized(M, x, eps):
"""Return True if (a Fuchsian) matrix M is normalized, that
is all the eigenvalues of it's residues in x lie in [-1/2, 1/2)
range (in limit eps->0). Return False otherwise.
Examples:
>>> x, e = var("x epsilon")
>>> is_normalized(matrix([[(1+e)/3/x, 0], [0, e/x]]), x, e)
True
"""
points = singularities(M, x)
for x0, p in points.items():
M0 = matrix_residue(M, x, x0)
for ev in M0.eigenvalues():
ev = limit_fixed(ev, eps, 0)
if not (Rational((-1, 2)) <= ev and ev < Rational((1, 2))):
return False
return True
| 5,336,459
|
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = f'{settings.ANALYTICS_DASHBOARD_URL}/courses/{str(course_key)}'
link = HTML("<a href=\"{0}\" rel=\"noopener\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
| 5,336,460
|
def show_datas(x_train, y_train, x_test, y_test):
"""Show shapes, values, and images."""
# Show shapes.
print('x_train', x_train.shape)
print('y_train', y_train.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)
# Show a data value.
#print(x_train[0])
#print(y_train[0])
# Show an image.
img = x_train[0]
img = Image.fromarray(img)
#img.show()
| 5,336,461
|
def figure1_control(data1, cols):
""" Creates a data set to plot figure 1, Panel B, D, F.
Args:
- data1 (pd.DataFrame): the original data set
- cols (list): a list of column names ["agus", "bct", "bcg"]
Returns:
- df_fig1_contr (pd.DataFrame): a data set for plotting panels with controls
"""
data1["uazY"] = data1["uazY"].astype("category")
for column in cols:
data_df = data1.loc[(data1["dzagr01"] != 0) & (abs(data1["dzagr01"]) < 0.2), [column, "uazY"]].dropna()
data_df["constant"] = [1] * len(data_df.index)
y,X = patsy.dmatrices("{}~constant".format(column), data = data_df, return_type='dataframe')
ybar = y.mean()
y = y - y.groupby(data_df["uazY"]).transform('mean') + ybar
Xbar = X.mean()
X = X - X.groupby(data_df["uazY"]).transform('mean') + Xbar
reg = smp.OLS(y,X).fit()
y_hat = reg.predict()
y_hat.shape = (len(y_hat), 1)
residual = y - y_hat
data1["{}_res".format(column)] = residual
df_fig1_contr = data1.groupby("dzagr01")["{}_res".format(cols[0]),
"{}_res".format(cols[1]),
"{}_res".format(cols[2])].mean()
df_fig1_contr.reset_index(level = 0, inplace = True)
for column in cols:
fig1_B1 = sm.ols(formula = "{}_res ~ dzagr01".format(column),
data = df_fig1_contr[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit()
fig1_B2 = sm.ols(formula = "{}_res ~ dzagr01".format(column),
data = df_fig1_contr[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit()
pred_B1 = fig1_B1.predict()
pred_B2 = fig1_B2.predict()
df_fig1_contr.loc[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2),
"pred_{}1".format(column)] = pred_B1
df_fig1_contr.loc[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2),
"pred_{}2".format(column)] = pred_B2
return df_fig1_contr
| 5,336,462
|
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the IPX800 lights."""
controller = hass.data[DOMAIN][entry.entry_id][CONTROLLER]
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
devices = hass.data[DOMAIN][entry.entry_id][CONF_DEVICES]["light"]
entities: list[LightEntity] = []
for device in devices:
if device[CONF_EXT_TYPE] == IPX:
entities.append(IpxLight(device, controller, coordinator))
if device[CONF_EXT_TYPE] == EXT_X8R:
entities.append(X8RLight(device, controller, coordinator))
elif device[CONF_EXT_TYPE] == EXT_XDIMMER:
entities.append(XDimmerLight(device, controller, coordinator))
elif device[CONF_EXT_TYPE] == EXT_XPWM and CONF_TYPE not in device:
entities.append(XPWMLight(device, controller, coordinator))
elif (
device[CONF_EXT_TYPE] == EXT_XPWM and device.get(CONF_TYPE) == TYPE_XPWM_RGB
):
entities.append(XPWMRGBLight(device, controller, coordinator))
elif (
device[CONF_EXT_TYPE] == EXT_XPWM
and device.get(CONF_TYPE) == TYPE_XPWM_RGBW
):
entities.append(XPWMRGBWLight(device, controller, coordinator))
async_add_entities(entities, True)
| 5,336,463
|
def GetEffectiveRightsFromAclW(acl, sid):
"""
Takes a SID instead of a trustee!
"""
_GetEffectiveRightsFromAclW = windll.advapi32.GetEffectiveRightsFromAclW
_GetEffectiveRightsFromAclW.argtypes = [PVOID, PTRUSTEE_W, PDWORD] #[HANDLE, SE_OBJECT_TYPE, DWORD, PSID, PSID, PACL, PACL, PSECURITY_DESCRIPTOR]
_GetEffectiveRightsFromAclW.restype = RaiseIfNotErrorSuccess
sid_data = sid.to_bytes()
psid = ctypes.create_string_buffer(sid_data, len(sid_data))
trustee = TRUSTEE_W()
trustee.pMultipleTrustee = 0
trustee.MultipleTrusteeOperation = 0
trustee.TrusteeForm = 0
trustee.TrusteeType = 0
trustee.ptstrName = ctypes.c_void_p(ctypes.addressof(psid))
effective_rigths_mask = DWORD(0)
acl_data = acl.to_bytes()
pacl = ctypes.create_string_buffer(acl_data, len(acl_data))
res = _GetEffectiveRightsFromAclW(pacl, trustee, byref(effective_rigths_mask))
return effective_rigths_mask.value
| 5,336,464
|
def store(mnemonic, opcode):
""" Create a store instruction """
ra = Operand("ra", Or1kRegister, read=True)
rb = Operand("rb", Or1kRegister, read=True)
imm = Operand("imm", int)
syntax = Syntax(["l", ".", mnemonic, " ", imm, "(", ra, ")", ",", " ", rb])
patterns = {"opcode": opcode, "ra": ra, "rb": rb, "imm": imm}
members = {
"ra": ra,
"rb": rb,
"imm": imm,
"syntax": syntax,
"patterns": patterns,
"tokens": [Orbis32StoreToken],
}
class_name = mnemonic.title()
return type(class_name, (Orbis32Instruction,), members)
| 5,336,465
|
def hlmlDeviceGetPowerUsage(device: hlml_t.HLML_DEVICE.TYPE) -> int:
""" Retrieves power usage for the device in mW
Parameters:
device (HLML_DEVICE.TYPE) - The handle for a habana device.
Returns:
power (int) - The given device's power usage in mW.
"""
global _hlmlOBJ
power = ctypes.c_uint()
fn = _hlmlOBJ.get_func_ptr("hlml_device_get_power_usage")
ret = fn(device, ctypes.byref(power))
check_return(ret)
return power.value
| 5,336,466
|
def usgs_coef_parse(**kwargs):
"""
Combine, parse, and format the provided dataframes
:param kwargs: potential arguments include:
dataframe_list: list of dataframes to concat and format
args: dictionary, used to run flowbyactivity.py ('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity specifications
"""
# load arguments necessary for function
args = kwargs['args']
# Read directly into a pandas df
df_raw = pd.read_csv(externaldatapath + "USGS_WU_Coef_Raw.csv")
# rename columns to match flowbyactivity format
df = df_raw.copy()
df = df.rename(columns={"Animal Type": "ActivityConsumedBy",
"WUC_Median": "FlowAmount",
"WUC_Minimum": "Min",
"WUC_Maximum": "Max"
})
# drop columns
df = df.drop(columns=["WUC_25th_Percentile", "WUC_75th_Percentile"])
# hardcode data
df["Class"] = "Water"
df["SourceName"] = "USGS_WU_Coef"
df["Location"] = US_FIPS
df['Year'] = args['year']
df = assign_fips_location_system(df, '2005')
df["Unit"] = "gallons/animal/day"
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 5 # tmp
return df
| 5,336,467
|
def repeat_move_randomly(n, circle, window):
"""
Runs move_randomly n times using the given circle and window,
each time making 1000 random moves with 0 seconds pause after each.
Waits for a mouse click after each of the n trials.
Preconditions:
:type n: int
:type circle: rg.Circle
:type window: rg.RoseWindow
where n is nonnegative and the circle is already attached
to a canvas on the given window.
"""
for _ in range(n):
move_randomly(window, circle, 1000, 0)
window.continue_on_mouse_click()
| 5,336,468
|
def success_poly_overlap(gt_poly, res_poly, n_frame):
"""
:param gt_poly: [Nx8]
:param result_bb:
:param n_frame:
:return:
"""
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou_list = []
for i in range(gt_poly.shape[0]):
iou = poly_overlap_ratio(gt_poly[i], res_poly[i])
iou_list.append(iou)
iou_np = np.array(iou_list)
for i in range(len(thresholds_overlap)):
success[i] = np.sum(iou_np > thresholds_overlap[i]) / float(n_frame)
return success
| 5,336,469
|
def my_get_size_png(gg, height, width, dpi, limitsize):
"""
Get actual size of ggplot image saved (with bbox_inches="tight")
"""
buf = io.BytesIO()
gg.save(buf, format= "png", height = height, width = width,
dpi=dpi, units = "in", limitsize = limitsize,verbose=False,
bbox_inches="tight")
buf.seek(0)
img = Image.open(buf)
width, height = img.size
return width / dpi, height / dpi
| 5,336,470
|
def main() -> None:
"""Main function for model inference."""
args = parse_args()
assert args.format_only or args.show or args.show_dir, (
"Please specify at least one operation (save/eval/format/show the "
"results / save the results) with the argument '--format-only', "
"'--show' or '--show-dir'"
)
cfg = Config.fromfile(args.config)
if cfg.load_from is None:
cfg_name = os.path.split(args.config)[-1].replace(
"_bdd100k.py", ".pth"
)
cfg.load_from = MODEL_SERVER + cfg_name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get("cudnn_benchmark", False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get("neck"):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get("rfp_backbone"):
if neck_cfg.rfp_backbone.get("pretrained"):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get("rfp_backbone"):
if cfg.model.neck.rfp_backbone.get("pretrained"):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True # type: ignore
samples_per_gpu = cfg.data.test.pop("samples_per_gpu", 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor( # type: ignore
cfg.data.test.pipeline # type: ignore
)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop("samples_per_gpu", 1) for ds_cfg in cfg.data.test]
)
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == "none":
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg"))
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, cfg.load_from, map_location="cpu")
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if "CLASSES" in checkpoint.get("meta", {}):
model.CLASSES = checkpoint["meta"]["CLASSES"]
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(
model, data_loader, args.show, args.show_dir, args.show_score_thr
)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
)
outputs = multi_gpu_test(
model, data_loader, args.tmpdir, args.gpu_collect
)
rank, _ = get_dist_info()
if rank == 0:
if args.format_only:
dataset.convert_format(outputs, args.format_dir)
| 5,336,471
|
def dispatch(bot, update: Update):
"""
Takes a Telegram Update delegates to the correct
function to handle that update.
Keyword Arguments:
bot -- The overall BuzzardBot instance
update -- The raw Telegram Update
"""
print(update)
message = Message(bot, update)
text = message.text
if "/set " in text:
handle_set_command(message)
elif "/remove " in text:
handle_remove_command(message)
else:
handle_message(message)
| 5,336,472
|
def main():
"""main"""
args = sys.argv
if len(args) == 1:
arg1 = os.path.basename(args[0])
print('Usage: {} FILE'.format(arg1))
sys.exit(1)
infile = sys.argv[1]
if not os.path.isfile(infile):
print('{} is not a file'.format(infile))
sys.exit(1)
for i,line in enumerate(open(infile).read().splitlines(), 1):
print ('{:5}: {}'.format(i,line))
| 5,336,473
|
def getRnnGenerator(vocab_size,hidden_dim,input_dim=512):
"""
"Apply" the RNN to the input x
For initializing the network, the vocab size needs to be known
Default of the hidden layer is set tot 512 like Karpathy
"""
generator = SequenceGenerator(
Readout(readout_dim = vocab_size,
source_names = ["states"], # transition.apply.states ???
emitter = SoftmaxEmitter(name="emitter"),
feedback_brick = LookupFeedback(
vocab_size,
input_dim,
name = 'feedback'
),
name = "readout"
),
MySimpleRecurrent(
name = "transition",
activation = Tanh(),
dim = hidden_dim
),
weights_init = IsotropicGaussian(0.01),
biases_init = Constant(0),
name = "generator"
)
generator.push_initialization_config()
generator.transition.weights_init = IsotropicGaussian(0.01)
generator.initialize()
return generator
| 5,336,474
|
def first_true(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default)
| 5,336,475
|
def display_results(final=True):
"""
Display the results using a colored barplot and event location plot
"""
if final:
st.pyplot(
barplot_colored(st.session_state.thresh_fvs, st.session_state.results)
)
st.pyplot(mk_event_location_plot(st.session_state.results))
st.success(
"The detected phone digits are "
+ "".join([str(num) for num in st.session_state.number])
)
else:
st.pyplot(st.session_state.plt, clear_figure=False)
st.pyplot(mk_event_location_plot(st.session_state.results))
st.write("".join([str(num) for num in st.session_state.number]))
| 5,336,476
|
def parseFileRefs(htmlfile, usedFiles, skipFiles, indent, trace=print):
"""
find files referenced in root, recur for html files
"""
trace('%sParsing:' % ('.' * indent), htmlfile)
parser = MyParser(usedFiles, skipFiles, indent)
text = open(htmlfile).read()
try:
parser.feed(text)
except html.parser.HTMLParseError as E:
print('==>FAILED:', E) # file's refs may be missed!
parser.close()
| 5,336,477
|
def get_length(filename):
"""
Get the length of a specific file with ffrobe from the ffmpeg library
:param filename: this param is used for the file
:type filename: str
:return: length of the given video file
:rtype: float
"""
# use ffprobe because it is faster then other (for example moviepy)
result = subprocess.run([
"ffprobe", "-v", "error", "-show_entries", "format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
| 5,336,478
|
def get_loan_by_id(link):
"""performs a GET request to the /api/loans/:{insert loanId here} endpoint """
get = requests.get(link)
print(get.text)
| 5,336,479
|
def is_File(path):
"""Takes the path of the folder as argument
Returns is the path is a of a Folder or not in bool"""
if os.path.isfile(path):
return True
else:
return False
| 5,336,480
|
def get_device_serial_no(instanceId, gwMgmtIp, fwApiKey):
"""
Retrieve the serial number from the FW.
@param gwMgmtIP: The IP address of the FW
@type: ```str```
@param fwApiKey: Api key of the FW
@type: ```str```
@return The serial number of the FW
@rtype: ```str```
"""
serial_no = None
if gwMgmtIp is None:
logger.error('Firewall IP could not be found. Can not interact with the device')
return False
logger.info('Retrieve the serial number from FW {} with IP: {}'.format(instanceId, gwMgmtIp))
cmd_show_system_info = "/api/?type=op&key={}&cmd=<show><system><info/></system></show>".format(fwApiKey)
response = execute_api_request(gwMgmtIp, 443, cmd_show_system_info)
if response['result'] == False:
logger.error('PAN Firewall: Fail to execute the show system info command for device: {} with IP: {}'.format(instanceId, gwMgmtIp))
result = response['data'].findall(".//line")
for msg in result:
error_msg = msg.text
logger.error('Reason for failure: {}'.format(error_msg))
return False
serial_info = response['data'].findall(".//serial")
for info in serial_info:
serial_no = info.text
if not serial_no:
logger.error("Unable to retrieve the serial number from device: {} with IP: {}".format(instanceId, gwMgmtIp))
return serial_no
| 5,336,481
|
def dump_dict(dct, outpath='./dict.txt'):
""" Dump dict into file. """
with open(Path(outpath), 'w') as file:
for k in sorted(dct.keys()):
file.write('{}: {}\n'.format(k, dct[k]))
| 5,336,482
|
def multilabel_cross_entropy(
x: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = 'mean'
) -> Tensor:
"""Implements the cross entropy loss for multi-label targets
Args:
x (torch.Tensor[N, K, ...]): input tensor
target (torch.Tensor[N, K, ...]): target tensor
weight (torch.Tensor[K], optional): manual rescaling of each class
ignore_index (int, optional): specifies target value that is ignored and do not contribute to gradient
reduction (str, optional): reduction method
Returns:
torch.Tensor: loss reduced with `reduction` method
"""
# log(P[class]) = log_softmax(score)[class]
logpt = F.log_softmax(x, dim=1)
# Ignore index (set loss contribution to 0)
valid_idxs = torch.ones(logpt.shape[1], dtype=torch.bool, device=x.device)
if ignore_index >= 0 and ignore_index < x.shape[1]:
valid_idxs[ignore_index] = False
# Weight
if weight is not None:
# Tensor type
if weight.type() != x.data.type():
weight = weight.type_as(x.data)
logpt = logpt * weight.view(1, -1, *([1] * (x.ndim - 2))) # type: ignore[attr-defined]
# CE Loss
loss = - target * logpt
# Loss reduction
if reduction == 'sum':
loss = loss[:, valid_idxs].sum()
else:
loss = loss[:, valid_idxs].sum(dim=1)
if reduction == 'mean':
loss = loss.mean()
return loss
| 5,336,483
|
def dataset_string(dataset):
"""Generate string from dataset"""
data = dataset_data(dataset)
try:
# single value
return fn.VALUE_FORMAT % data
except TypeError:
# array
if dataset.size > 1:
return fn.data_string(data)
# probably a string
return fn.shortstr('%s' % data)
| 5,336,484
|
def create_constant_value_validator(
constant_cls: Type, is_required: bool
) -> Callable[[str], bool]:
"""
Create a validator func that validates a value is one of the valid values.
Parameters
----------
constant_cls: Type
The constant class that contains the valid values.
is_required: bool
Whether the value is required.
Returns
-------
validator_func: Callable[[str], bool]
The validator func.
"""
def is_valid(value: str) -> bool:
"""
Validate that value is valid.
Parameters
----------
value: str
The value to validate.
Returns
-------
status: bool
The validation status.
"""
if value is None:
return not is_required
return value in get_all_class_attr_values(constant_cls)
return is_valid
| 5,336,485
|
def process_arguments(arguments):
"""
Process command line arguments to execute VM actions.
Called from cm4.command.command
:param arguments:
"""
result = None
if arguments.get("--debug"):
pp = pprint.PrettyPrinter(indent=4)
print("vm processing arguments")
pp.pprint(arguments)
default_cloud = Config().data["cloudmesh"]["default"]["cloud"]
vm = Vm(default_cloud)
if arguments.get("list"):
result = vm.list()
elif arguments.get("create"):
# TODO: Reconcile `create` behavior here and in docopts where
# create is called with a `VMCOUNT`.
vm_name = arguments.get("VMNAME")
if vm_name is None:
vm_name = vm.new_name()
vm.create(vm_name)
result = f"Created {vm_name}"
elif arguments.get("start"):
result = vm.start(arguments.get("--vms"))
elif arguments.get("stop"):
result = vm.stop(arguments.get("--vms"))
elif arguments.get("destroy"):
result = vm.destroy(arguments.get("--vms"))
elif arguments.get("status"):
result = vm.status(arguments.get("--vms"))
elif arguments.get("publicip"):
result = vm.get_public_ips(arguments.get('--vms'))
elif arguments.get("ssh"):
# TODO
pass
elif arguments.get("run"):
# TODO
pass
elif arguments.get("script"):
# TODO
pass
return result
| 5,336,486
|
def forward_resolve(state):
"""Mark the target of a forward branch"""
target_label = state.data_stack.pop()
label(target_label)
| 5,336,487
|
def deliver_hybrid():
"""
Endpoint for submissions intended for dap and legacy systems. POST request requires the submission JSON to be
uploaded as "submission", the zipped transformed artifact as "transformed", and the filename passed in the
query parameters.
"""
logger.info('Processing Hybrid submission')
filename = request.args.get("filename")
meta = MetaWrapper(filename)
files = request.files
submission_bytes = files[SUBMISSION_FILE].read()
survey_dict = json.loads(submission_bytes.decode())
data_bytes = files[TRANSFORMED_FILE].read()
meta.set_legacy(survey_dict, data_bytes)
return process(meta, data_bytes)
| 5,336,488
|
def clear_screen():
"""Clear the screen"""
os.system("cls" if os.name == "nt" else "clear")
| 5,336,489
|
def change_path(path, dir="", file="", pre="", post="", ext=""):
"""
Change the path ingredients with the provided directory, filename
prefix, postfix, and extension
:param path:
:param dir: new directory
:param file: filename to replace the filename full_path
:param pre: prefix to be appended to filename full_path
:param post: postfix to be appended to filename full_path
:param ext: extension of filename to be changed
:return:
"""
from pathlib import Path
target = ""
path_obj = Path(path)
old_filename = path_obj.name.replace(path_obj.suffix, "") \
if len(path_obj.suffix) > 0 else path_obj.name
if os.name == "nt":
if len(dir) > 0:
directory = dir
elif path.endswith("\\"):
directory = path[:-1]
old_filename = ""
else:
directory = str(path_obj.parent)
old_extension = path_obj.suffix
new_filename = file if len(file) > 0 else old_filename
new_filename = pre + new_filename if len(pre) > 0 else new_filename
new_filename = new_filename + post if len(post) > 0 else new_filename
new_extension = "." + ext if len(ext) > 0 else old_extension
target = directory + "\\" + new_filename + new_extension
else:
if len(dir) > 0:
directory = dir
elif path.endswith("/"):
directory = path[:-1]
old_filename = ""
else:
directory = str(path_obj.parent)
old_extension = path_obj.suffix
new_filename = file if len(file) > 0 else old_filename
new_filename = pre + new_filename if len(pre) > 0 else new_filename
new_filename = new_filename + post if len(post) > 0 else new_filename
new_extension = "." + ext if len(ext) > 0 else old_extension
target = directory + "/" + new_filename + new_extension
return target
| 5,336,490
|
def test_entities_false():
"""Test entity ID policy."""
policy = False
with pytest.raises(vol.Invalid):
ENTITY_POLICY_SCHEMA(policy)
| 5,336,491
|
def midi_to_chroma(pitch):
"""Given a midi pitch (e.g. 60 == C), returns its corresponding
chroma class value. A == 0, A# == 1, ..., G# == 11 """
return ((pitch % 12) + 3) % 12
| 5,336,492
|
def _snippet_items(snippet):
"""Return all markdown items in the snippet text.
For this we expect it the snippet to contain *nothing* but a markdown list.
We do not support "indented" list style, only one item per linebreak.
Raises SyntaxError if snippet not in proper format (e.g. contains
anything other than a markdown list).
"""
unformatted = snippet.text and snippet.text.strip()
# treat null text value as empty list
if not unformatted:
return []
# parse out all markdown list items
items = re.findall(r'^[-*+] +(.*)$', unformatted, re.MULTILINE)
# if there were any lines that didn't yield an item, assume there was
# something we didn't parse. since we never want to lose existing data
# for a user, this is an error condition.
if len(items) < len(unformatted.splitlines()):
raise SyntaxError('unparsed lines in user snippet: %s' % unformatted)
return items
| 5,336,493
|
def get_collection(*args, **kwargs):
""" Returns event collection schema
:param event_collection: string, the event collection from which schema is to be returned,
if left blank will return schema for all collections
"""
_initialize_client_from_environment()
return _client.get_collection(*args, **kwargs)
| 5,336,494
|
def get_tf_generator(data_source: extr.PymiaDatasource):
"""Returns a generator that wraps :class:`.PymiaDatasource` for the tensorflow data handling.
The returned generator can be used with `tf.data.Dataset.from_generator
<https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_generator>`_ in order to build a tensorflow dataset`_.
Args:
data_source (.PymiaDatasource): the datasource to be wrapped.
Returns:
generator: Function that loops over the entire datasource and yields all entries.
"""
def generator():
for i in range(len(data_source)):
yield data_source[i]
return generator
| 5,336,495
|
def is_mechanical_ventilation_heat_recovery_active(bpr, tsd, t):
"""
Control of activity of heat exchanger of mechanical ventilation system
Author: Gabriel Happle
Date: APR 2017
:param bpr: Building Properties
:type bpr: BuildingPropertiesRow
:param tsd: Time series data of building
:type tsd: dict
:param t: time step / hour of the year
:type t: int
:return: Heat exchanger ON/OFF status
:rtype: bool
"""
if is_mechanical_ventilation_active(bpr, tsd, t)\
and has_mechanical_ventilation_heat_recovery(bpr)\
and control_heating_cooling_systems.is_heating_season(t, bpr):
# heat recovery is always active if mechanical ventilation is active (no intelligent by pass)
# this is the usual system configuration according to Clayton Miller
return True
elif is_mechanical_ventilation_active(bpr, tsd, t)\
and has_mechanical_ventilation_heat_recovery(bpr)\
and control_heating_cooling_systems.is_cooling_season(t, bpr)\
and tsd['T_int'][t-1] < tsd['T_ext'][t]:
return True
elif is_mechanical_ventilation_active(bpr, tsd, t) \
and control_heating_cooling_systems.is_cooling_season(t, bpr) \
and tsd['T_int'][t-1] >= tsd['T_ext'][t]:
# heat recovery is deactivated in the cooling case,
# if outdoor air conditions are colder than indoor (free cooling)
return False
else:
return False
| 5,336,496
|
def test_get_triup_dim():
"""Check that the function returns the correct number of nodes couples."""
cm = BiCM(np.array([[1, 0, 1], [1, 1, 1]]))
assert cm.get_triup_dim(False) == 3
assert cm.get_triup_dim(True) == 1
td = np.random.randint(low=0, high=2, size=50).reshape(5, 10)
cm = BiCM(td)
n = cm.get_triup_dim(True)
assert n == td.shape[0] * (td.shape[0] - 1) / 2
n = cm.get_triup_dim(False)
assert n == td.shape[1] * (td.shape[1] - 1) / 2
| 5,336,497
|
def init_app(app, **kwargs):
""" Performs app-initialization operations related to the current module. """
from . import errors # noqa: F401
from . import views
app.register_blueprint(views.main_blueprint)
| 5,336,498
|
async def fetch_user(user_id):
"""
Asynchronous function which performs an API call to retrieve a user from their ID
"""
session = aiohttp.ClientSession()
res = await session.get(url=str(f'{MAIN_URL}/api/user/{user_id}'),
headers=headers)
await session.close()
# Reminder : 2XX is a success
# If unsuccessful we return the error message
if res.status != 200:
return res.content
# However, if successful return the json data that was returned and transform it into its python equivalent
return await res.json()
| 5,336,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.