content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _normalize_site_motif(motif):
"""Normalize the PSP site motif to all caps with no underscores and return
the preprocessed motif sequence and the position of the target residue
in the motif (zero-indexed)."""
no_underscores = motif.replace('_', '')
offset = motif.find(no_underscores)
respos = 7 - offset
return (no_underscores.upper(), respos) | 6758ff876b123eba86ec623753c49007baa07431 | 36,482 |
def families_vertical_correctors():
"""."""
return ['CV'] | 48a11db49ec7c7dc9ab94689e26a72603bc7bd73 | 36,483 |
from typing import Dict
from typing import Any
import json
def _serialize_content_with_header(content: Dict[str, Any]) -> bytes:
"""Writes serialized LSP message that includes a header and content."""
serialized_content = json.dumps(content).encode("utf-8")
# Each header parameter is terminated by \r\n, and the header itself is also
# terminated by \r\n.
header = (f"Content-Length: {len(serialized_content)}\r\n"
"Content-Type: application/vscode-jsonrpc;charset=utf-8\r\n"
"\r\n")
return header.encode("utf-8") + serialized_content | 5058de8a604cf626616011220b6c20dd17a61c9c | 36,484 |
import os
def get_example(name):
"""
Retrieves the absolute file name of an example.
"""
this = os.path.abspath(os.path.dirname(__file__))
full = os.path.join(this, name)
if not os.path.exists(full):
raise FileNotFoundError("Unable to find example '{0}'".format(name))
return full | 75cb2514d1603614784a73529b7fe4aae67a4aa7 | 36,485 |
import random
def normalDistrib(a, b, gauss=random.gauss):
"""
NOTE: assumes a < b
Returns random number between a and b, using gaussian distribution, with
mean=avg(a, b), and a standard deviation that fits ~99.7% of the curve
between a and b.
For ease of use, outlying results are re-computed until result is in [a, b]
This should fit the remaining .3% of the curve that lies outside [a, b]
uniformly onto the curve inside [a, b]
------------------------------------------------------------------------
http://www-stat.stanford.edu/~naras/jsm/NormalDensity/NormalDensity.html
The 68-95-99.7% Rule
====================
All normal density curves satisfy the following property which is often
referred to as the Empirical Rule:
68% of the observations fall within 1 standard deviation of the mean.
95% of the observations fall within 2 standard deviations of the mean.
99.7% of the observations fall within 3 standard deviations of the mean.
Thus, for a normal distribution, almost all values lie within 3 standard
deviations of the mean.
------------------------------------------------------------------------
In calculating our standard deviation, we divide (b-a) by 6, since the
99.7% figure includes 3 standard deviations _on_either_side_ of the mean.
"""
while True:
r = gauss((a+b)*.5, (b-a)/6.)
if (r >= a) and (r <= b):
return r | cbb6d6e10998a46c7686956820987fb5e125a598 | 36,489 |
def read_object(self, obj):
"""
Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes.
:param obj: object file path, as string or Node
"""
if not isinstance(obj, self.path.__class__):
obj = self.path.find_resource(obj)
return self(features='fake_obj', source=obj, name=obj.name) | 973e4bce06d28243a3ebf07a9f945509730af7c1 | 36,490 |
from typing import Optional
import os
import sys
import itertools
def find_editor() -> Optional[str]:
"""
Used to set cmd2.Cmd.DEFAULT_EDITOR. If EDITOR env variable is set, that will be used.
Otherwise the function will look for a known editor in directories specified by PATH env variable.
:return: Default editor or None
"""
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editors = ['code.cmd', 'notepad++.exe', 'notepad.exe']
else:
editors = ['vim', 'vi', 'emacs', 'nano', 'pico', 'joe', 'code', 'subl', 'atom', 'gedit', 'geany', 'kate']
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
for editor, path in itertools.product(editors, paths):
editor_path = os.path.join(path, editor)
if os.path.isfile(editor_path) and os.access(editor_path, os.X_OK):
if sys.platform[:3] == 'win':
# Remove extension from Windows file names
editor = os.path.splitext(editor)[0]
break
else:
editor = None
return editor | 74ad6061b90848982dcc12c9e8734a6de0a91b18 | 36,491 |
def convert_to_bool(b_str:str):
"""Converts a string to a boolean.
b_str (str): A string that spells out True or False, regardless of
capitilisation.
Return bool
"""
if not isinstance(b_str, str):
raise TypeError("Input must be a string.")
if b_str.upper() == "TRUE":
return True
elif b_str.upper() == "FALSE":
return False
else:
raise ValueError("Please check your spelling for True or False.") | 52dddf84d1bdd0e72774104c2b736989d941a196 | 36,492 |
def clear_drupal_cache(db_obj, db_cur):
"""
Clear all caches in a Drupal database.
Parameters:
db_obj: the database connection object to use
db_cur: the database cursor object to use
"""
ret = db_obj.get_table_list(db_cur)
if not ret[0]:
return False
for table in ret[1]:
if table[0].startswith('cache'):
ret = db_obj.execute(db_cur,
'DELETE FROM {0};'.format(table[0]),
has_results=False)
if not ret:
return False
return True | 3ed58d1a1c32c2a78b1a8034535af2533c02a640 | 36,493 |
def _as256String(rgb):
"""
Encode the given color as a 256-colors string
Parameters:
rgb (tuple): a tuple containing Red, Green and Blue information
Returns:
The encoded string
"""
redRatio = (0 if rgb[0] < 75 else (rgb[0] - 35) / 40) * 6 * 6
greenRatio = (0 if rgb[1] < 75 else (rgb[1] - 35) / 40) * 6
blueRatio = (0 if rgb[2] < 75 else (rgb[2] - 35) / 40) + 16
return "5;{}".format(int(redRatio + greenRatio + blueRatio)) | cbefff1dcb11be3e74f5b12bb02fbc897f1e5d07 | 36,495 |
def new(name, num_servings):
""" Create and return a new recipe.
"""
return {'name' : name,
'num_servings' : num_servings,
'instructions' : [],
'ingredients' : []} | d8d864308a962307514a4966eb226a95d7c0fb05 | 36,496 |
def label_rs_or_ps(player_data_list, label):
"""Adds label of either RS or PS.
Args:
player_data_list: player data list
label: RS or PS
Returns:
Labels data RS or PS
"""
return [[*year, label] for year in player_data_list] | 1fd4d87df1b656bb3a90c6fe8d2ce4ba2f62b508 | 36,497 |
from datetime import datetime
def _parse_health_event(health_event):
""" Parse health Event
Args: healthEvent (raw data)
Returns: dict
"""
result = {}
wanted_items = ['service', 'arn', 'eventTypeCode', 'eventTypeCategory', 'region', 'availabilityZone', 'startTime', 'endTime',
'lastUpdatedTime', 'statusCode']
for item in wanted_items:
if item in health_event:
if isinstance(health_event[item], datetime):
result[item] = health_event[item].isoformat()
else:
result[item] = health_event[item]
#print(f'parse cloud trail event: {result}')
return result | 208fd2bc5cc1ff134e257e857f75d6012b8aa6e2 | 36,498 |
import itertools
def get_grid(args):
""" Make a dict that contains every combination of hyperparameters to explore"""
# Make a list of every value in the dict if its not
for k, v in args.items():
if type(v) is not list:
args[k] = [v]
# Get every pairwise combination
hyperparam, values = zip(*args.items())
grid = [dict(zip(hyperparam, v)) for v in itertools.product(*values)]
return grid | ef29106e3bc27359f5e27ae2acd4984e85e8489d | 36,500 |
def split_sents_with_bullet_points(sents_list):
"""This function splits a list of sentences into a list of lists of sentences, where each list contains sentences
that are separated by a bullet point."""
new_sents_list = []
d = '•'
for sent in sents_list:
if (d in sent) == True:
new_sents = [d+e for e in sent.split(d) if e]
for sent in new_sents:
if len(sent) > 6:
new_sents_list.append(sent)
else:
new_sents_list.append(sent)
return new_sents_list | ab4b80e046bc23cfcde4b1d4e320f00231665799 | 36,501 |
def get_atom(display, atom):
"""
:type display: Display
:type atom: str
:rtype: int
"""
return display.intern_atom(atom) | 3300fc8d9ec00fc2ca9194c23557775ca0b8b8ad | 36,502 |
def get_5_cards(deck):
"""Return a list of 5 cards dealt from a given deck."""
l =[]
for i in range(5):
l.append(deck.deal_card())
return l | c7b6cf751a144e7d4b0dccb40a92dbb3cc3022c9 | 36,504 |
def GetBuildersWithNoneMessages(statuses, failing):
"""Returns a list of failed builders with NoneType failure message.
Args:
statuses: A dict mapping build config names to their BuilderStatus.
failing: Names of the builders that failed.
Returns:
A list of builder names.
"""
return [x for x in failing if statuses[x].message is None] | 207a71f5a3965609b41da424492a316d03df0d7d | 36,505 |
def _get_salvaged(repo, ms, ctx):
"""returns a list of salvaged files
returns empty list if config option which process salvaged files are
not enabled"""
salvaged = []
copy_sd = repo.filecopiesmode == b'changeset-sidedata'
if copy_sd and len(ctx.parents()) > 1:
if ms.active():
for fname in sorted(ms.allextras().keys()):
might_removed = ms.extras(fname).get(b'merge-removal-candidate')
if might_removed == b'yes':
if fname in ctx:
salvaged.append(fname)
return salvaged | ff3c1199efda706255b81de6f643323cb182e292 | 36,506 |
def str_name_value(name, value, tab=4, ljust=25):
"""
This will return a str of name and value with uniform spacing
:param name: str of the name
:param value: str of the value
:param tab: int of the number of spaces before the name
:param ljust: int of the name ljust to apply to name
:return: str of the formatted string
"""
rep_name = (name.startswith('_') and name[1:]) or name
try:
return ' ' * tab + str(rep_name).ljust(ljust) + \
str(value).replace('\n', '\n' + ' ' * (ljust + tab))
except:
rep_name = "Exception in serializing %s value" % name
return ' ' * tab + str(rep_name).ljust(ljust) + \
str(value).replace('\n', '\n' + ' ' * (ljust + tab)) | fdcbba230e6045c3f84bc050cf3774fe0e4c6036 | 36,509 |
def verbose_name(instance, field_name=None):
"""
Returns verbose_name for a model instance or a field.
"""
if field_name:
return instance._meta.get_field(field_name).verbose_name
return instance._meta.verbose_name | 50e3c9c341e5cd8e259b00807daf068edc2a1772 | 36,511 |
def get_unittest_setup(dir_name, file_name):
"""Setup unittest
:param dir_name: Directory name
:param file_name: Plugin name
:return: unittest_setup
"""
folder_name = file_name.replace("_doc", "")
unittest_name = file_name.replace('_', ' ').title().replace(' ','')
unittest_setup='''
"""
.. module:: ''' + file_name + '''_test
:platform: Unix
:synopsis: unittest test for plugin restructured text file documentation
.. moduleauthor: Jessica Verschoyle
"""
import os
import sys
import unittest
import logging
import logging.config
from io import StringIO
import doc.doc_tests.doc_test_utils as dtu
import scripts.configurator_tests.savu_config_test_utils as sctu
import scripts.configurator_tests.refresh_process_lists_test as refresh
# Determine Savu base path
main_dir = \\
os.path.dirname(os.path.realpath(__file__)).split("/Savu/")[0]
savu_base_path = f"{main_dir}/Savu/"
# Reset the args for command line input
dtu.setup_argparser()
# Start logging
logger, logger_rst = dtu.get_loggers()
fh, ch, fh_rst = dtu.setup_log_files(logger, logger_rst,
"'''+f"{dir_name}/{folder_name}/"\
+'''")
class '''+unittest_name+'''Test(unittest.TestCase):
'''
return unittest_setup | c72241b8f482c2b5bd3426bcc4152dd54942cbbe | 36,512 |
def findClosestValueInBST(bst , target):
"""
You are given a BST data structure consisting of BST nodes.
Each BST node has an integer value stored in a property
called "value" and two children nodes stored in properties called
"left" and "right," respectively. A node is said to be a
BST node if and only if it satisfies the BST property:
its value is strictly greater than the values of every
node to its left; its value is less than or equal to the
values of every node to its right; and both of its children
nodes are either BST nodes themselves or None (null) values.
You are also given a target integer value; write a function
that finds the closest value to that target value contained in the BST.
Assume that there will only be one closest value.
"""
currentNode = bst
closs_val = 0
while(currentNode is not None):
if((target - closs_val) >abs(target - currentNode.value)):
closs_val = currentNode.value
if target > currentNode.value:
# print(currentNode.right.value)
currentNode = currentNode.right
elif target<currentNode.value:
currentNode = currentNode.left
else:
break
return closs_val | 8b91b6cea5350babfafb43698eac4d9aef79662d | 36,513 |
def evaluate(model, examples):
"""
Spacy standard metric
"""
assert len(examples) != 1, "Length of test data must be more than 1"
return model.evaluate(examples).scores['ents_per_type'] | 9cf258795d55f08c347429bd19e973f2949f83b3 | 36,514 |
import re
from typing import OrderedDict
def _make_str_data_list(filename):
"""
read pickle_list file to make group of list of pickle files.
group id is denoted in front of the line (optional).
if group id is not denoted, group id of -1 is assigned.
example:
0:file1.pickle
0:file2.pickle
14:file3.pickle
"""
h = re.compile("([0-9]+):(.*)")
data_list = OrderedDict()
with open(filename, 'r') as fil:
for line in fil:
m = h.match(line.strip())
if m:
group_id, file_name = m.group(1), m.group(2)
else:
group_id, file_name = -1, line.strip()
if group_id not in data_list:
data_list[group_id] = []
data_list[group_id].append(file_name)
return data_list.values() | cf69c678c8cf0b544d6d811034c1efe94370e08e | 36,515 |
def get_description_data(description):
"""If description is set will try to open the file (at given path), and read the contents.
To use as the description for the MR.
Args:
description (str): Path to description for MR.
Returns
str: The description to use for the MR.
Raises:
OSError: If couldn't open file for some reason.
"""
description_data = ""
if description:
try:
with open(description) as mr_description:
description_data = mr_description.read()
except FileNotFoundError:
print(f"Unable to find description file at {description}. No description will be set.")
except OSError:
print(f"Unable to open description file at {description}. No description will be set.")
return description_data | e1594ef02f9f33443e24fac76f186846912a45fa | 36,517 |
def order(request):
"""
A possible bond order.
"""
return request.param | 7d5d9bc55d1b839c5df19caa560889901e6dd009 | 36,519 |
def table(name=None, primary_key="id", column_map=None):
"""数据据保存的表名"""
def decorate(clazz):
setattr(clazz, "__table_name__", clazz.__name__ if name is None else name)
setattr(clazz, "__primary_key__", primary_key)
setattr(clazz, "__column_map__", None if column_map is None else column_map)
return clazz
return decorate | 2384bf23ff73dcbf15311768de05bd05487e9c22 | 36,521 |
import grp
def gid_to_name(gid):
"""
Find the group name associated with a group ID.
:param gid: The group ID (an integer).
:returns: The group name (a string) or :data:`None` if :func:`grp.getgrgid()`
fails to locate a group for the given ID.
"""
try:
return grp.getgrgid(gid).gr_name
except Exception:
return None | abde4a17fccd88add0392a4185f2db40753ccae0 | 36,522 |
def have_binaries(packages):
"""Check if there are any binaries (executables) in the packages.
Return: (bool) True if packages have any binaries, False otherwise
"""
for pkg in packages:
for filepath in pkg.files:
if filepath.startswith(('/usr/bin', '/usr/sbin')):
return True
return False | f270defa5f9ab141692bf17fc91d1580989437de | 36,523 |
def convert_to_float(match):
"""
float conversion expects 1.0e-01
xml is formatted like -1.0e+01
"""
modifier = 1
if '-' in match:
modifier = -1
match = match.replace('-', '')
return modifier * float(match) | b1344f0ce93eee6f55f34c1f4b93c85a0c3a38fa | 36,525 |
def get_best_model_name():
"""
Return filename of best model snapshot by step
:return: filename of best model snapshot
:rtype: str
"""
return 'network-snapshot-best.pth' | a5a819e3604287d05b96c294bebceab5f49c1446 | 36,526 |
def get_param_names(df):
"""Get the parameter names from df.
Since the parameters are dynamically retrieved from the df,
this function ensures that we have a consistent and correct mapping
between their names and values across the callbacks.
"""
return sorted(col for col in df.columns if col.startswith("param_")) | 8def5a18c717dffb3d07da006cc0a04efbf1cf8d | 36,527 |
import re
def validate_email(email: str) -> str:
""" validate an input email address
Arguments:
email (str): email to validate
Returns:
email or invalid (str)
"""
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
email = "invalid"
return email | 8c8e31d5d887eb171354402a1a4d49baea05f709 | 36,528 |
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) // int(b) | cb44ec46b1867e56906edeb99f64f7aedd088482 | 36,529 |
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel() | 6a5c37b639bbb4f0bc971c482834a47d560fd8e4 | 36,530 |
def __get_default_resource_group() -> str:
"""Get the resource group name for player account"""
return 'CloudGemPlayerAccount' | c77a4f29292f3c761b2bd687456bb1e60114173c | 36,534 |
def version_comparator(v1: str, v2: str):
""" check if both the versions are equal """
if v1 == v2:
return 0
v1_components = v1.split(".")
v2_components = v2.split(".")
common_elements = min(len(v1_components), len(v2_components))
for i in range(common_elements):
""" if at a given index, element in v1 > element in v2,
then v1 is greater than v2
"""
if v1_components[i] > v2_components[i]:
return 1
""" if at a given index, element in v1 < element in v2,
then v1 is less than v2
"""
if v1_components[i] < v2_components[i]:
return -1
""" check of the 2 versions has a higher prefix
"""
return 1 if len(v1_components) > len(v2_components) else -1 | a0960311ba83bc74e2731d6e92c7d5acfe89f058 | 36,535 |
def not_infinite(instructions):
""" check if loop is infinite """
index = 0
visited = set()
while (index not in visited) and (index < len(instructions)):
visited.add(index)
opr, val = instructions[index].split()
index = index + int(val) if opr == "jmp" else index + 1
if index == len(instructions):
return True
return False | c21a60f090080f82983079097771c048c298a316 | 36,536 |
def matrixAsList( matrix, value = True ):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells | 837944d212635cff0b01549b4397624a4e4bbfd9 | 36,537 |
def is_next_month(first_date, second_date):
""" Return True if second_date is in following month to first_date """
return (second_date.month - first_date.month) + \
(second_date.year - first_date.year) * first_date.month == 1 | bd22ffd4ffd7dba142090130015de50f9a079945 | 36,538 |
def convert_title(x) -> str:
"""Trim trailing characters on the title"""
return x.strip() | a63ccc9886fc16f936ce38fba37e3b6c6b7d0fba | 36,539 |
def GetFileExt(path, add_dot=False):
""" Returns the filetype extension from the given file path.
:param str path: file path
:param boolean add_dot: whether to append a period/dot to the returned extension
:returns str: filetype extension (e.g: png)
"""
ext = path.split(".")
ext.reverse()
if add_dot == True:
return ".{}".format(ext[0])
else:
return ext[0] | 149b5df42b0f37b2414e6c1ef46b9a35a8308fdf | 36,541 |
def unit_conversion(array, unit_prefix, current_prefix=""):
"""
Converts an array or value to of a certain
unit scale to another unit scale.
Accepted units are:
E - exa - 1e18
P - peta - 1e15
T - tera - 1e12
G - giga - 1e9
M - mega - 1e6
k - kilo - 1e3
m - milli - 1e-3
u - micro - 1e-6
n - nano - 1e-9
p - pico - 1e-12
f - femto - 1e-15
a - atto - 1e-18
Parameters
----------
array : ndarray
Array to be converted
unit_prefix : string
desired unit (metric) prefix (e.g. nm would be n, ms would be m)
current_prefix : optional, string
current prefix of units of data (assumed to be in SI units
by default (e.g. m or s)
Returns
-------
converted_array : ndarray
Array multiplied such as to be in the units specified
"""
UnitDict = {
'E': 1e18,
'P': 1e15,
'T': 1e12,
'G': 1e9,
'M': 1e6,
'k': 1e3,
'': 1,
'm': 1e-3,
'u': 1e-6,
'n': 1e-9,
'p': 1e-12,
'f': 1e-15,
'a': 1e-18,
}
try:
Desired_units = UnitDict[unit_prefix]
except KeyError:
raise ValueError(
"You entered {} for the unit_prefix, this is not a valid prefix".
format(unit_prefix))
try:
Current_units = UnitDict[current_prefix]
except KeyError:
raise ValueError(
"You entered {} for the current_prefix, this is not a valid prefix"
.format(current_prefix))
conversion_multiplication = Current_units / Desired_units
converted_array = array * conversion_multiplication
return converted_array | 202725b01912532a6e899e6dbaff1e5edfbef7e0 | 36,542 |
import time
import os
import sys
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include <gtest/gtest.h>
#include <gtest/gtest-spi.h>
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS) | f84c9a399621c89abd054460b699d3736b50aee9 | 36,544 |
import base64
def file_to_b64(filename):
"""Converts image file ti b64 string
Args:
filename: string variable containing the path and name of the
image file on computer
new_filename: filepath to save file as
Returns:
b64_string: string variable containing the image bytes encoded
as a base64 string
"""
with open(filename, "rb") as image_file:
b64_bytes = base64.b64encode(image_file.read())
b64_string = str(b64_bytes, encoding='utf-8')
return b64_string | 6d263b9f786249377198e944e1ea0ab1c8e17697 | 36,545 |
from typing import Dict
from typing import Any
from typing import Tuple
import itertools
def merge_args_and_kwargs(
event_abi: Dict[str, Any], args: Any, kwargs: Any
) -> Tuple[Any, ...]:
"""
Borrowed / modified from
https://github.com/ethereum/web3.py/blob/master/web3/_utils/abi.py
"""
if len(args) + len(kwargs) > len(event_abi.get("inputs", [])):
raise TypeError(
"Incorrect argument count. Expected <= '{0}'. Got '{1}'".format(
len(event_abi["inputs"]), len(args) + len(kwargs)
)
)
if not kwargs and not args:
raise TypeError("No kwargs or args provided.")
if not kwargs:
return args
args_as_kwargs = {
arg_abi["name"]: arg for arg_abi, arg in zip(event_abi["inputs"], args)
}
duplicate_keys = set(args_as_kwargs).intersection(kwargs.keys())
if duplicate_keys:
raise TypeError(
"{fn_name}() got multiple values for argument(s) '{dups}'".format(
fn_name=event_abi["name"], dups=", ".join(duplicate_keys)
)
)
sorted_arg_names = [arg_abi["name"] for arg_abi in event_abi["inputs"]]
unknown_kwargs = {key for key in kwargs.keys() if key not in sorted_arg_names}
if unknown_kwargs:
if event_abi.get("name"):
raise TypeError(
"{fn_name}() got unexpected keyword argument(s) '{dups}'".format(
fn_name=event_abi.get("name"), dups=", ".join(unknown_kwargs)
)
)
# show type instead of name in the error message incase key 'name' is missing.
raise TypeError(
"Type: '{_type}' got unexpected keyword argument(s) '{dups}'".format(
_type=event_abi.get("type"), dups=", ".join(unknown_kwargs)
)
)
sorted_args = list(
zip(
*sorted(
itertools.chain(kwargs.items(), args_as_kwargs.items()),
key=lambda kv: sorted_arg_names.index(kv[0]),
)
)
)
if sorted_args:
return sorted_args[1]
else:
return tuple() | 594c4775f25df45f42f8a6699a02f4e2c570b46a | 36,547 |
import subprocess
import re
def patch_uncrustify(changed_files):
"""Creates patch to fix formatting in a set of files"""
files = changed_files.split(" ")
patch = ''
for file in files:
format_call = (
'uncrustify -q -c .uncrustify.cfg -f {}'.format(file)
+ '| git --no-pager diff --color=always --no-index -- "{}" - '.format(file)
+ '| tail -n+3'
)
diff_result = subprocess.check_output(format_call, shell=True)
if type(diff_result) is not str:
diff_result = diff_result.decode("utf-8")
diff_result = re.sub(r'---.*', '--- "a/{}"'.format(file), diff_result)
diff_result = re.sub(r'\+\+\+.*', '+++ "b/{}"'.format(file), diff_result)
patch += diff_result
return patch | 48b0aad9a87f410e528f8534a00b7b27482da9f4 | 36,548 |
def second(x, y):
"""Second argument"""
return y | 2a66d4dc0ea831537af565cd8ad03f16ad54b554 | 36,549 |
def epiweek_to_month(ew):
"""
Convert an epiweek to a month.
"""
return (((ew - 40) + 52) % 52) // 4 + 1 | 95403f0ef57d4dece759cc7922e3bf295d08c459 | 36,550 |
import numpy
def PIL2array(img):
""" Convert a PIL/Pillow image to a numpy array """
return numpy.array(img.getdata(), numpy.uint8).reshape(img.size[1], img.size[0], 3)
# return numpy.array(img.getdata(), numpy.uint8) | 199d6f7d6045d4654c45361b9bb99a499babd6f7 | 36,551 |
import torch
def masked_function(function, *inputs, mask=None, restore_shape=True):
"""
Apply a function to the masked part of an input tensor.
:param function: The function to apply.
:param inputs: Input tensor, shape (N* x hidden)
:param mask: Mask, shape (N*) or broadcastable, optional
:return: The output of applying the function to only the masked part of the tensor,
but in the original shape
"""
if mask is None:
return function(*inputs)
valid_indices = torch.nonzero(mask.view(-1)).squeeze(1)
# remember the original shape
original_shape = inputs[0].size()
num_items = torch.prod(original_shape[:-1])
clean_inputs = []
for inp in inputs:
flat_input = inp.view(-1, original_shape[-1])
clean_inputs.append(flat_input.index_select(0, valid_indices))
# forward pass on the clean input only
clean_output = function(*clean_inputs)
if not restore_shape:
return clean_output
# after that, scatter the output (the position where we don't scatter are masked zeros anyways)
flat_output = inputs[0].new_zeros(num_items, clean_output.size(-1))
flat_output.index_copy_(0, valid_indices, clean_output)
output = flat_output.view(*original_shape[:-1], clean_output.size(-1))
return output | 35fbacad265a94b409131ec74d84b87c824f0c8a | 36,552 |
def html_table(header, rows):
"""Create HTML table that can be used for logging."""
output = '<div class="doc"><table>'
output += "<tr>" + "".join(f"<th>{name}</th>" for name in header) + "</tr>"
for row in rows:
output += "<tr>" + "".join(f"<td>{name}</td>" for name in row) + "</tr>"
output += "</table></div>"
return output | 3ecb9faee9460001dd19efabc031b45a541f8e0f | 36,553 |
import sys
import os
import subprocess
def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
'-m', 'pip',
'show', '--quiet',
name,
]
with open(os.devnull, 'w') as devnull:
status = subprocess.call(command, stderr=devnull)
return not status | 7fb0c2c512ca68fc48efa0ae625d9d33c394477a | 36,555 |
import pandas
def dummies_from_bins(df, col, bins, bin_labels, col_prefix):
"""
Given a dataframe and column to create binary features from bins, return dummy columns of said bins
concatenated onto the end of the df
"""
# cut the column values into bins. the labels provided are the returned values
# bins must increase monotonically
binned_values = pandas.cut(df[col],
bins=bins,
labels=bin_labels)
# Create dummy variables and add prefix to col label
dummies_cols = pandas.get_dummies(binned_values).add_prefix(col_prefix)
# Concatenate onto end of original df
df = pandas.concat([df, dummies_cols], axis=1)
return df | dab725d64e17f6dde30ff52906c3b828b7487717 | 36,556 |
def _determine_dimensions(ideal_dimensions, current_dimensions):
"""
ideal_dimensions: dimensions we want an image to fit into (width, height)
current_dimensions: the dimensions the image currently has (width, height)
returns the dimensions the image should be resized to
"""
current_width = current_dimensions[0]
current_height = current_dimensions[1]
ideal_width = ideal_dimensions[0]
ideal_height = ideal_dimensions[1]
width_diff = current_width - ideal_width
height_diff = current_height - ideal_height
if (width_diff <= 0) and (height_diff <= 0):
return current_dimensions
if width_diff > height_diff:
return ideal_width, int((ideal_width / current_width) * current_height)
else:
return int((ideal_height / current_height) * current_width), ideal_height | 5954ca209b6d8aa12480944d53346035379cd4c7 | 36,558 |
import re
def _is_multiple_lines(line):
"""
:param line:
:return:
"""
if '=' in line:
s = line.split('=')[1].strip()
if s.startswith("{") and len(re.findall('{', s)) == len(re.findall('}', s)):
return False
elif s.startswith("\"") and len(re.findall("\"", s)) % 2 == 0:
return False
return True | 8782b6e48e863ff2f9a7ff01907473932da22281 | 36,562 |
def prepare_input(file):
"""
Documentation: description, input, output
"""
inp, trgt = file
return inp, trgt | 46bd975785596fa6100fac6c3843dedb17596f9b | 36,563 |
import os
def remove_ex2_local_files(base_path):
"""Remove files (if they exist)."""
files_to_remove = [
f"{base_path}/eos/arista1-saved.txt",
f"{base_path}/eos/arista2-saved.txt",
f"{base_path}/eos/arista3-saved.txt",
f"{base_path}/eos/arista4-saved.txt",
]
for a_file in files_to_remove:
try:
os.remove(a_file)
except FileNotFoundError:
pass
return None | 5f4c1a2cfc4739f9397b803e3cae3ea1a144e3f5 | 36,564 |
import os
import sys
def inVirtualEnv():
"""
Returns whether we are inside a virtualenv or Conda virtual environment.
"""
return ('VIRTUAL_ENV' in os.environ or
'CONDA_DEFAULT_ENV' in os.environ or
hasattr(sys, 'real_prefix') or
(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)) | b63d730fb6ed1d553519fa0914eabcedceed36d3 | 36,565 |
def aerial_raw_sample():
"""Sample of AerialImage original images (called for populate a AerialImage
dataset)
"""
return "tests/data/aerial/input/training/" | 604b8560e056172a86cadba130aa442ad6a884e1 | 36,566 |
import argparse
def get_args():
""" Pass command-line arguments. """
parser = argparse.ArgumentParser(
description='Merges several HDF5 files.')
parser.add_argument(
'file', metavar='file', type=str, nargs='+',
help='HDF5 files to merge')
parser.add_argument(
'-o', metavar='ofile', dest='ofile', type=str, nargs=1,
help=('output file name'),
default=[None], required=True,)
parser.add_argument(
'-m', metavar='nfiles', dest='nfiles', type=int, nargs=1,
help=('number of merged files (blocks)'),
default=[1],)
parser.add_argument(
'-v', metavar='var', dest='vnames', type=str, nargs='+',
help=('only merge specific vars if given, otherwise merge all'),
default=[],)
parser.add_argument(
'-z', metavar=None, dest='comp', type=str, nargs=1,
help=('compress merged file(s)'),
choices=('lzf', 'gzip'), default=[None],)
parser.add_argument(
'-k', metavar='key', dest='key', type=str, nargs=1,
help=('sort files by numbers after `key` in file name'),
default=[None],)
parser.add_argument(
'-n', metavar='njobs', dest='njobs', type=int, nargs=1,
help=('number of jobs for parallel processing when using -m'),
default=[1],)
return parser.parse_args() | e3ca165ce49138dc5b429d243a4db8c9e8400665 | 36,567 |
import sys
def buffer_memory_size(rb):
""" Util to calculate how much memory is needed to allocate the full buffer, given the size of a last-filled entry"""
idx = len(rb) - 1
if idx > 0:
dat = rb[idx]
sizes = [sys.getsizeof(ele) for ele in (dat.state, dat.action, dat.reward, dat.next_state, dat.done)]
ele_size = sum(sizes)
else:
return 0
return len(rb) * ele_size | 7d68a66fcbbeb33037a16d0d45c12ecb4edc4d75 | 36,568 |
import requests
def genderize(first_name):
""" Use genderize.io to return a dictionary with the result of the
probability of a first name being of a man or a woman.
Example: {'count': 5856,
'gender': 'male',
'name': 'Alex',
'probability': '0.87'}
Parameters
----------
first_name: str
Returns
-------
query_result: dict
"""
return requests.get("https://api.genderize.io/", params={"name": first_name}).json() | dd6d107ce28bec3a5149f815ae15865cf34205be | 36,569 |
import re
def re_tokenize(s):
"""Input string. Return list of tokenized words
"""
return re.findall(r"[\w]+[']*[\w]+|[\w]+|[.,!?;]", s) | 7ac85a3a1f20788cb1c9ec0049c39d97ea653e35 | 36,574 |
def import_sensors_from_device(client, device):
"""
Imports sensor data of specified devices from the Rayleigh Connect API
:param client: An instance of the RayleighClient class
:param device: A list of Node objects describing the Rayleigh devices
:return: A JSON object containing the sensors for each device, retrieved from the Rayleigh Connect API
"""
return client.retrieve_sensors(device) | 275f818cb56aa244e79aa5b9b3ad077472026e0e | 36,578 |
def get_years(start_time, stop_time):
"""Get years contained in a time period.
Returns the list of years contained in between the provided
start time and the stop time.
:param start_time: Start time to determine list of years
:type start_time: str
:param stop_time: Stop time to determine list of years
:type stop_time: str
:return: Creation date
:rtype: list of str
"""
years = []
start_year = start_time.split("-")[0]
finish_year = stop_time.split("-")[0]
year = int(start_year)
while year <= int(finish_year):
years.append(str(year))
year += 1
return years | 9d8073933f7dfe4c2917b1eb95e38700e30b3243 | 36,579 |
def _add_skipped_resources_to_obj(obj, data, columns):
"""Add skipped resources to obj.vault
"""
i = 0
for s in obj.vault.skipped_resources:
if obj.vault.resources[i].id:
name = 'skipped_resource_' + str(i + 1)
data += (obj.vault.skipped_resources[i].id,)
columns = columns + (name,)
i += 1
return data, columns | 88ee09abeba9da066fbf6a0062f91e6c66968dce | 36,580 |
def and_join(items: list[str], sep: str = ", ") -> str:
"""Joins a list by a separator with an 'and' at the very end for readability."""
return f"{sep.join(str(x) for x in items[:-1])}{sep}and {items[-1]}" | 4ba8be63f7add4a124dce05c139a1521d83ab12e | 36,581 |
def test_same_type(iterable_obj):
"""
Utility function to test whether all elements of an iterable_obj are of the
same type. If not it raises a TypeError.
"""
# by set definition, the set of types of the elements in iterable_obj
# includes all and only the different types of the elements in iterable_obj.
# If its length is 1, then all the elements of iterable_obj are of the
# same data-type
if len(set([type(x) for x in iterable_obj])) == 1:
# all element are of the same type: test successful!
return True
else:
raise TypeError("Iterable '{}' in input has elements of heterogeneous types: {}"
.format(iterable_obj, [type(x) for x in iterable_obj])) | 25b48d11eef503f8a0fd78b648f5b6b273f0a8a9 | 36,582 |
def schur_representative_from_index(i0, i1):
"""
Simultaneously reorder a pair of tuples to obtain the equivalent
element of the distinguished basis of the Schur algebra.
.. SEEALSO::
:func:`schur_representative_indices`
INPUT:
- A pair of tuples of length `r` with elements in `\{1,\dots,n\}`
OUTPUT:
- The corresponding pair of tuples ordered correctly.
EXAMPLES::
sage: from sage.algebras.schur_algebra import schur_representative_from_index
sage: schur_representative_from_index([2,1,2,2], [1,3,0,0])
((1, 2, 2, 2), (3, 0, 0, 1))
"""
w = []
for i, val in enumerate(i0):
w.append((val, i1[i]))
w.sort()
i0 = []
i1 = []
for pair in w:
i0.append(pair[0])
i1.append(pair[1])
return (tuple(i0), tuple(i1)) | b747f247c77b108e86564eb30b6f06376551c7bc | 36,583 |
import requests
def import_web_intraday(ticker):
"""
Queries the website of the stock market data provider AlphaVantage (AV). AV provides stock,
forex, and cryptocurrency data. AV limits access for free users as such:
1. maximum of : 5 unique queries per minute, and 500 unique queries per 24h period
2. Intraday history is capped at the past five days (current + 4)
3. After-hour data is not available
The provided data is JSON formatted. The data is a series of 'ticks' for each minute during
trading hours: open (09:30am) to closing (04:00pm).
Each tick lists the following stock data: open, close, low, high, average, volume
--------
:param <ticker>: String ; ticker of a company traded on the financial markets
"""
website = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol='+ticker+'&interval=1min&apikey=’+YOUR_API_KEY+’&outputsize=full&datatype=json'
raw_json_intraday_data = requests.get(website)
return raw_json_intraday_data.json() | 8d38a950923f32c805d15e8f29d83b113e4e3b5b | 36,584 |
def linear_damped_SHO(t, x):
"""x' = -0.1x+2y
y' = -2x-0.1y
"""
return [-0.1 * x[0] + 2 * x[1], -2 * x[0] - 0.1 * x[1]] | 4f639dfe65cb891f0efb3c0baed618b87daf70dd | 36,585 |
def command_handler(option_value):
"""Dynamically load a Python object from a module and return an instance"""
module, klass = option_value.rsplit('.',1)
mod = __import__(module, fromlist=[klass])
return getattr(mod, klass)() | d199b8f031c08f4e559aa66edb5a3979a295079c | 36,586 |
def to_list(collection):
""":yaql:toList
Returns collection converted to list.
:signature: collection.toList()
:receiverArg collection: collection to be converted
:argType collection: iterable
:returnType: list
.. code::
yaql> range(0, 3).toList()
[0, 1, 2]
"""
return list(collection) | 730c2d1b939b24f501461362e585212a353cf1b8 | 36,587 |
def safe_dict_set_value(tree_node, key, value):
"""Safely set a value to a node in a tree read from JSON or YAML.
The JSON and YAML parsers returns nodes that can be container or non-container
types. This method internally checks the node to make sure it's a dictionary
before setting for the specified key. If value is None, try to remove key
from tree_node.
Args:
tree_node: Node to set.
key: Key of the entry to be added to the node.
value: Value of the entry to be added to the node. If None, try to remove
the entry from tree_node.
Returns:
Return tree_node
"""
if not issubclass(tree_node.__class__, dict):
return tree_node
if value is None:
if key in tree_node:
del tree_node[key]
else:
tree_node[key] = value
return tree_node | 5a6e527a2e0bde659086c75f9cca06824069cd9a | 36,588 |
import pickle
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = open(file, "rb")
return pickle.load(file) | abc9b5ef04b96d259d775a979b75205061f16d0e | 36,590 |
from typing import List
def _create_select_cql(key_space: str, table_name: str,
column_to_select: List,
conditions_eq: dict) -> str:
"""
This function will create a general level Select query for Cassendera
"""
select_cql = "SELECT "
if column_to_select:
select_cql += ", ".join(column_to_select)
else:
select_cql += "* "
select_cql += " FROM " + key_space + "." + table_name
select_cql += " WHERE "
for key in conditions_eq:
select_cql += (key + " = ?" + " AND ")
select_cql = select_cql[:-4]
select_cql += " ALLOW FILTERING;"
return select_cql | 8c0b98bc761ae5b37bc0e24117fcf6f3dcac26fa | 36,591 |
def check_zabbixproxy(zapi, domain_name):
"""check domain from domain name if exists
:domain_name: domain name if this cluster
:returns: true or false
"""
proxy = zapi.proxy.get(search={"host": "server-247." + domain_name})
return proxy | c5df7dbe73697855078117df90bbb92efb7c95ac | 36,593 |
def get_next_cursor(res):
"""Extract the next_cursor field from a message object. This is
used by all Web API calls which get paginated results.
"""
metadata = res.get('response_metadata')
if not metadata:
return None
return metadata.get('next_cursor', None) | e5ef1daaf228a4c556387e913297998810c82511 | 36,594 |
from typing import Optional
import json
def _get_debtor_info_iri_from_config_data(config_data: str) -> Optional[str]:
"""Parse `config_data` and return `config_data['info']['iri']`."""
def get_dict_key(d, key):
return d.get(key) if isinstance(d, dict) else None
try:
config_data = json.loads(config_data)
except json.JSONDecodeError:
return None
debtor_info = get_dict_key(config_data, 'info')
debtor_info_iri = get_dict_key(debtor_info, 'iri')
return debtor_info_iri if isinstance(debtor_info_iri, str) else None | e16558f29ed47f2e4909097f133049a005fa1446 | 36,595 |
def emaAverage(y, z):
"""
@name: emaAverage
@requiredFunc ema
@description: basic ema Average function
"""
val = y * z
return val | a6d5a8d728abc23be6e119978ae8a053d2dcfa9d | 36,596 |
def hello_world(request, resource_type, id, *arg, **kwargs):
"""
Simple Hello World to check for pluggable module
:param request:
:param resource_type:
:param id:
:param arg:
:param kwargs:
:return:
"""
return "Hello World from fhir_io_mongo.views.get.hello_world: " \
"%s,{%s}[%s]" % (request,
resource_type,
id) | e751d908529178af4f01017b2e540cfc5f08fb54 | 36,597 |
from typing import Callable
def arbitrary_call(arbitrary_args_callable: Callable[..., int]) -> int:
"""Callable type with arbitrary arguments."""
return arbitrary_args_callable("x") + arbitrary_args_callable(y=2) | 3bd1c3f7b4b4bf9aeaaa1b5f03a1af358086d39f | 36,598 |
def resource_to_type_name(resource):
"""Creates a type/name format from a resource dbo."""
return resource.type_name | 444ce157cdef0cb54c842c2636801ec075e1db15 | 36,599 |
def get_kmers(start,end,input_alignment):
"""
Accepts as start and end position within a MSA and returns a dict of all of the kmers
:param start: int
:param end: int
:param input_alignment: dict of sequences
:return: dict of sequence kmers corresponding to the positions
"""
kmers = {}
seq_iter = input_alignment.keys()
for seq_id in seq_iter :
kmers[seq_id] = input_alignment[seq_id][start:end]
return kmers | d4fa3a98cb4a9bdbba4aff423df94f7898430d3c | 36,600 |
from typing import List
from typing import Any
def stable_sort(deck: List[Any]) -> List[Any]:
"""Sort of list of object, assuming that <, ==, and > are implement for the
objects in the list.
This function implements a merge sort, which has an average performance of
O(nlog(n)) and a worst case performance of O(nlog(n)). Although merge sort
can have memory usage of O(n), because Python requires passing by value and
not by reference, this implementation uses O(nlog(n)) memory. This sort is
stable so the order between identical objects in the input array is
preserved.
Arguments:
deck: List[Any]
List of objects to be sorted
Returns:
List[Any]: sorted list of objects
"""
if len(deck) > 1:
first = stable_sort(deck[:len(deck) // 2])
last = stable_sort(deck[(len(deck) // 2):])
i = 0
j = 0
while i < len(first) or j < len(last):
if i >= len(first):
deck[i + j] = last[j]
j += 1
elif j >= len(last):
deck[i + j] = first[i]
i += 1
elif first[i] < last[j]:
deck[i + j] = first[i]
i += 1
else:
deck[i + j] = last[j]
j += 1
return deck | d35ab897160c0f483b924ed4527edf718e3e21d2 | 36,601 |
def iddversiontuple(afile):
"""given the idd file or filehandle, return the version handle"""
def versiontuple(vers):
"""version tuple"""
return tuple([int(num) for num in vers.split(".")])
try:
fhandle = open(afile, "rb")
except TypeError:
fhandle = afile
line1 = fhandle.readline()
try:
line1 = line1.decode("ISO-8859-2")
except AttributeError:
pass
line = line1.strip()
if line1 == "":
return (0,)
vers = line.split()[-1]
return versiontuple(vers) | 1a82dcdcaba12d3bc7e80ad66a1a9a4fa2fa694d | 36,602 |
import torch
def cumulative_jitter(nodes_to_leaf, feats, strength=(1000, 300)):
"""
Apply cumulative jitter to graph.
Args:
nodes_to_leaf: indices of nodes from start node to leaf
feats: features per node
strength: strength scale for the jittering
"""
jitter = (2 * torch.rand(3) - 1) * strength[0]
for _, n in enumerate(nodes_to_leaf):
temp = torch.tensor(feats[n])[:3] + jitter
feats[n][:3] = tuple(temp.tolist())
jitter += (2 * torch.rand(3) - 1) * strength[1]
return feats | 9e07e853eab4c8e176b01cd81e40d36d61ed85dc | 36,603 |
def metric_max_over_ground_truths(f1_score_fn, bleu4_fn, prediction, ground_truths):
"""
This function calculates and returns the precision, recall and f1-score
"""
f1_for_ground_truths = []
for ground_truth in ground_truths:
f1 = f1_score_fn(prediction, ground_truth)
f1_for_ground_truths.append(f1)
# 为了综合考虑多个answer,此处将 max 改为 mean
# max_f1 = sum(f1_for_ground_truths) / len(f1_for_ground_truths)
max_f1 = max(f1_for_ground_truths)
if bleu4_fn is not None:
bleu = bleu4_fn(prediction, ground_truths)
else:
bleu = 1
return max_f1 * bleu | 20b640faa4df0ce39223ac6b5ecabf41b2306130 | 36,604 |
import functools
def future_generator(func):
"""Decorates a generator that generates futures
"""
@functools.wraps(func)
def call_generator(*args, **kw):
gen = func(*args, **kw)
try:
f = next(gen)
except StopIteration:
gen.close()
else:
def store(gen, future):
@future.add_done_callback
def _(future):
try:
try:
result = future.result()
except Exception as exc:
f = gen.throw(exc)
else:
f = gen.send(result)
except StopIteration:
gen.close()
else:
store(gen, f)
store(gen, f)
return call_generator | 43f961ed3286ee32f4b931d8d0a911db47ab8d01 | 36,605 |
def horizontal_correction(conductor, angle):
"""Equation 24, page 28"""
if conductor.stranded:
return 1 - 1.76e-6 * angle ** 2.5
else:
return 1 - 1.58e-4 * angle ** 1.5 | c1a1b39c912000e98357fac74ecd6d36da511636 | 36,606 |
import os
import dill
def get_explainer(export_dir):
"""
Creates LIME explainer based on input data
Arguments:
export_dir: str, path of the directory of saved model
Returns:
explainer: lime object, A Lime explainer created while training
dict_mapping: dict, mapping dictionary of categorical columns
feature_names: list, A list of names of features
"""
with open(os.path.join(export_dir, 'assets.extra', 'lime_explainer'), 'rb') as f:
explainer = dill.load(f)
dict_mapping = dill.load(f)
feature_names = dill.load(f)
return explainer, dict_mapping, feature_names | 219810d7c461a531eafed21a49d136e7e5581d45 | 36,608 |
def countAndSay(self, n):
# ! 这题的考点在于迭代生成字符串
"""
:type n: int
:rtype: str
"""
b='1' # 将第一行的1换成字符类型,便于下一行的读出
for i in range (n-1): # (n-1)是因为第一行不需要处理,直接可以读出
a, c, count = b[0], '', 0 # a用来读取上一行的第一个字符,c用来存放读出的内容(char),count用来统计
for j in b:
if a == j:
count += 1
else:
# ! 在遇到新的字符是,先将之前的结果存入最终的结果里
c += str(count) + a # 注意一定要将count转换为字符型,否则两个数就会相加(变成数学公式)。
a = j
count = 1
c += str(count) + a
b = c
return b | d6d88899e32e70500e4bd555ae46a3ef51952766 | 36,609 |
def get_device_model(device):
"""
Get the model of a given device
Args:
device: The device instance.
Returns:
The device model.
"""
logFile = device.adb("shell", "getprop", "ro.product.model")
return logFile.strip() | acea6f64f2aa6aefe6963f660d08a8bb7421cf94 | 36,610 |
import pprint
def pretty(data):
"""Nice print"""
pp = pprint.PrettyPrinter(indent=2)
return pp.pprint(data) | 894c10a06a046e2d29e0aaab35c8b21ccd676b0b | 36,611 |
import shutil
from pathlib import Path
def get_program_path(program):
"""Get full path to a program on system PATH."""
path = shutil.which(program)
if not program:
raise OSError(f"{program} not found on system $PATH")
return Path(path).resolve() | 5422a0577527149110a77bf07a67a4ea83ed5add | 36,613 |
import os
def strip_dir_part(path, root):
"""Strip `root` part from `path`.
>>> strip_dir_part('/home/ruby/file', '/home')
'ruby/file'
>>> strip_dir_part('/home/ruby/file', '/home/')
'ruby/file'
>>> strip_dir_part('/home/ruby/', '/home')
'ruby/'
>>> strip_dir_part('/home/ruby/', '/home/')
'ruby/'
"""
path = path.replace(root, '', 1)
if path.startswith(os.path.sep):
path = path[1:]
return path | e229de0a56086702261c279c1acc06f5f6dd9a8c | 36,614 |
def split_jaspar_id(id):
"""
Utility function to split a JASPAR matrix ID into its component base ID
and version number, e.g. 'MA0047.2' is returned as ('MA0047', 2).
"""
id_split = id.split('.')
base_id = None
version = None
if len(id_split) == 2:
base_id = id_split[0]
version = id_split[1]
else:
base_id = id
return (base_id, version) | d1a0646abf2650b87421b656d5abc7fb4bfa4c31 | 36,615 |
def i_am_the_lord(legion_name: str) -> bool:
"""Returns flag indicating whether you are the lord
of the given legion.
* http://uwsgi.readthedocs.io/en/latest/Legion.html#legion-api
:param legion_name:
"""
return False | d974ad01fe57a7cdadb8672aa8c8c861794788f2 | 36,616 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.