content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def genBetaModel(matshape, cen, betaparam):
"""
Generate beta model with given parameters
inputs
======
matshape: tuple or list
Shape of the matrix
cen: tuple or list
Location of the center pixel
betaparam: dict
Parameters of the beta function
{ "A": float,
"r0": float,
"theta": float,
"beta": float,
"majaxis": float,
"minaxis": float,
}
output
======
matbeta: np.ndarray
The matrix with modelled beta values
"""
if len(betaparam) != 6:
print("There might be short of parameter.")
return None
# Init matbeta
matbeta = np.zeros(matshape)
# load paramters
A = betaparam['A']
r0 = betaparam['r0']
theta = betaparam['theta']
beta = betaparam['beta']
majaxis = betaparam['majaxis']
minaxis = betaparam['minaxis']
ecc = majaxis / minaxis # eccentricity
# Generate meshgrids
X = np.linspace(1, matshape[0], matshape[0])
Y = np.linspace(1, matshape[1], matshape[1])
# anti-clock
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta),np.cos(theta)]])
# Calc
for j, x in enumerate(X):
for i,y in enumerate(Y):
x_r = x - cen[0]
y_r = y - cen[1]
r = np.matmul(rot, np.array([x_r, y_r]))
r = r[0]**2 + r[1]**2 * ecc**2
matbeta[i, j] = A * (1 + r/r0**2)**(-np.abs(beta))
return matbeta | 37,100 |
def folders_to_create(search_path, dirs, base_path=""):
"""
Recursively traverse through folder paths looking for the longest existing subpath.
Return the dir info of the longest subpath and the directories that
need to be created.
"""
# Allow user to pass in a string, but use a list in the recursion
if isinstance(search_path, list):
parts = search_path
else:
parts = search_path.strip("/").split("/")
# shared drives don't start with a /
if base_path == "" and not search_path.startswith("/"):
base_path = parts.pop(0)
parent = [dr for dr in dirs if dr.get("path", "") == base_path]
if len(parent) == 0:
parent = {"id": "root"}
else:
parent = parent.pop()
# Stop if we ran out of parts to create
if len(parts) == 0:
return parent, []
base_path += "/" + parts[0]
dirs = [dr for dr in dirs if dr.get("path", "").startswith(base_path)]
# If there's base_path matches, then keep looking for a longer path
if len(dirs) > 0:
return folders_to_create(parts[1:], dirs, base_path)
else:
return parent, parts | 37,101 |
def send_message(token, recipient, text):
"""Send the message text to the recipient using messenger API."""
r = requests.post(
"https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps(
{
"recipient": {"id": recipient},
"message": {"text": text.decode('unicode_escape')}
}
),
headers={'Content-type': 'application/json'}
)
if r.status_code != requests.codes.ok:
print r.text | 37,102 |
def profile_line(image, src, dst, linewidth=1,
order=1, mode='constant', cval=0.0):
"""Return the intensity profile of an image measured along a scan line.
Parameters
----------
image : numeric array, shape (M, N[, C])
The image, either grayscale (2D array) or multichannel
(3D array, where the final axis contains the channel
information).
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line. The destination point is *included*
in the profile, in contrast to standard numpy indexing.
linewidth : int, optional
Width of the scan, perpendicular to the line
order : int in {0, 1, 2, 3, 4, 5}, optional
The order of the spline interpolation to compute image values at
non-integer coordinates. 0 means nearest-neighbor interpolation.
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
How to compute any values falling outside of the image.
cval : float, optional
If `mode` is 'constant', what constant value to use outside the image.
Returns
-------
return_value : array
The intensity profile along the scan line. The length of the profile
is the ceil of the computed length of the scan line.
Examples
--------
>>> x = np.array([[1, 1, 1, 2, 2, 2]])
>>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)])
>>> img
array([[0, 0, 0, 0, 0, 0],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0]])
>>> profile_line(img, (2, 1), (2, 4))
array([ 1., 1., 2., 2.])
>>> profile_line(img, (1, 0), (1, 6), cval=4)
array([ 1., 1., 1., 2., 2., 2., 4.])
The destination point is included in the profile, in contrast to
standard numpy indexing.
For example:
>>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds
array([ 1., 1., 1., 2., 2., 2., 0.])
>>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row
array([ 1., 1., 1., 2., 2., 2.])
"""
perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)
if image.ndim == 3:
pixels = [ndi.map_coordinates(image[..., i], perp_lines,
order=order, mode=mode, cval=cval)
for i in range(image.shape[2])]
pixels = np.transpose(np.asarray(pixels), (1, 2, 0))
else:
pixels = ndi.map_coordinates(image, perp_lines,
order=order, mode=mode, cval=cval)
intensities = pixels.mean(axis=1)
return intensities | 37,103 |
def set_out(pin, state):
"""
Set simple digital (high/low) output
:param pin: pun number or logical name
:param state: state: 1/0 = True/False
:return: verdict
"""
__digital_out_init(pin).value(state)
return {'pin': pin, 'state': state} | 37,104 |
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['n_steps'] = int(round(float(env['_run_duration']) / float(env['dt'])))
env['save_grid_dt'] = float(env['dt'])
env['save_pixels_dt'] = float(env['dt'])
# If no pixel_file is given, let TopoFlow make one.
if env['pixel_file'] == 'off':
env['pixel_file'] = env['case_prefix'] + '_outlets.txt'
# Translate the roughness choice to TopoFlow parameters.
env['MANNING'] = env['roughness_option'].startswith('Manning') * 1
env['LAW_OF_WALL'] = 1 - env['MANNING']
assign_parameters(env, file_list)
# for fname in ['code_file', 'slope_file'] + file_list:
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
# src = find_simulation_input_file(env['site_prefix'] + '.rti')
# shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti'))
src = find_simulation_input_file(env['code_file'])
env['code_file'] = env['site_prefix'] + '_flow.rtg'
shutil.copy(src, os.path.join(os.curdir, env['code_file']))
src = find_simulation_input_file(env['slope_file'])
env['slope_file'] = env['site_prefix'] + '_slope.rtg'
shutil.copy(src, os.path.join(os.curdir, env['slope_file']))
src = find_simulation_input_file(env['rti_file'])
shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti'))
env['A_units'] = units_map[env['A_units']]
for var in ('LINK_FLATS', 'FILL_PITS_IN_Z0', 'LR_PERIODIC',
'TB_PERIODIC'): env[var] = choices_map[env[var]]
for var in ('width', 'angle', 'roughness', 'd0', 'sinu'):
if env[var + '_ptype'] == 'Scalar':
scalar_to_rtg_file(var, env)
for var in ('nval', 'z0val'):
env[var + '_ptype'] = env['roughness_ptype']
env[var + '_dtype'] = env['roughness_dtype']
env[var] = env['roughness']
env[var + '_file'] = env['roughness_file'] | 37,105 |
def _assert_identical_data_entity_exists(app_context, test_object):
"""Checks a specific entity exists in a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
entity_class = test_object.__class__
existing_object = entity_class().get(test_object.key())
assert existing_object
assert existing_object.data == test_object.data
assert existing_object.key().id() == test_object.key().id()
finally:
namespace_manager.set_namespace(old_namespace) | 37,106 |
def get_session():
"""Define a re-usable Session object"""
session = requests.Session()
session.auth = auth
session.verify = False
return session | 37,107 |
def main():
"""Main function."""
options = get_options()
LOG.debug("Options are %s", options)
entry_point = get_entrypoint()
plugin = get_plugin(entry_point)
inventory = plugin.get_dynamic_inventory()
if options.list:
dumps(inventory)
elif options.host in inventory["_meta"]["hostvars"]:
dumps(inventory["_meta"]["hostvars"][options.host])
else:
raise exceptions.InventoryError(
"Cannot find required host {0}".format(options.host))
return os.EX_OK | 37,108 |
def create_test_action(context, **kw):
"""Create and return a test action object.
Create a action in the DB and return a Action object with appropriate
attributes.
"""
action = get_test_action(context, **kw)
action.create()
return action | 37,109 |
def parse(header_array, is_paper=False):
""" Decides which version of the headers to use."""
if not is_paper:
version = clean_entry(header_array[2])
if old_eheaders_re.match(version):
headers_list = old_eheaders
elif new_eheaders_re.match(version):
headers_list = new_eheaders
else:
raise UnknownHeaderError ("Couldn't find parser for electronic version %s" % (version))
else:
version = clean_entry(header_array[1])
if paper_headers_v1_re.match(version):
headers_list = paper_headers_v1
elif paper_headers_v2_2_re.match(version):
headers_list = paper_headers_v2_2
elif paper_headers_v2_6_re.match(version):
headers_list = paper_headers_v2_6
else:
raise UnknownHeaderError ("Couldn't find parser for paper version %s" % (version))
headers = {}
for i in range(0, len(headers_list)):
this_arg = "" # It's acceptable for header rows to leave off delimiters, so enter missing trailing args as blanks.
try:
this_arg = clean_entry(header_array[i])
except IndexError:
# [JACOB WHAT DOES THIS INDICATE?]
pass
headers[headers_list[i]] = this_arg
return headers | 37,110 |
def _GetIssueIDsFromLocalIdsCond(cnxn, cond, project_ids, services):
"""Returns global IDs from the local IDs provided in the cond."""
# Get {project_name: project} for all projects in project_ids.
ids_to_projects = services.project.GetProjects(cnxn, project_ids)
ref_projects = {pb.project_name: pb for pb in ids_to_projects.itervalues()}
# Populate default_project_name if there is only one project id provided.
default_project_name = None
if len(ref_projects) == 1:
default_project_name = ref_projects.values()[0].project_name
# Populate refs with (project_name, local_id) pairs.
refs = []
for val in cond.str_values:
project_name, local_id = tracker_bizobj.ParseIssueRef(val)
if not project_name:
if not default_project_name:
# TODO(rmistry): Support the below.
raise MalformedQuery(
'Searching for issues accross multiple/all projects without '
'project prefixes is ambiguous and is currently not supported.')
project_name = default_project_name
refs.append((project_name, int(local_id)))
issue_ids, _misses = services.issue.ResolveIssueRefs(
cnxn, ref_projects, default_project_name, refs)
return issue_ids | 37,111 |
def lookup_plex_media(hass, content_type, content_id):
"""Look up Plex media for other integrations using media_player.play_media service payloads."""
content = json.loads(content_id)
if isinstance(content, int):
content = {"plex_key": content}
content_type = DOMAIN
plex_server_name = content.pop("plex_server", None)
plex_server = get_plex_server(hass, plex_server_name)
if playqueue_id := content.pop("playqueue_id", None):
try:
playqueue = plex_server.get_playqueue(playqueue_id)
except NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = content.pop("shuffle", 0)
media = plex_server.lookup_media(content_type, **content)
if media is None:
raise HomeAssistantError(
f"Plex media not found using payload: '{content_id}'"
)
playqueue = plex_server.create_playqueue(media, shuffle=shuffle)
return (playqueue, plex_server) | 37,112 |
def get_file_paths(directory, file=None):
"""
Collects the file paths from the given directory if the file is not given, otherwise creates a path
joining the given directory and file.
:param directory: The directory where the file(s) can be found
:param file: A file in the directory
:return: The sorted list of collected file paths
"""
file_paths = []
# get the absolute path of the one given file
if file is not None:
source_file_path = os.path.join(directory, file)
if os.path.isfile(source_file_path):
file_paths.append(source_file_path)
# if the given doesn't exist or it wasn't given, all files from the directory will be loaded
# except desktop.ini
if len(file_paths) == 0:
for child in os.listdir(directory):
if child != 'desktop.ini':
child_path = os.path.join(directory, child)
if os.path.isfile(child_path):
file_paths.append(child_path)
return sorted(file_paths) | 37,113 |
def dsym_test(func):
"""Decorate the item as a dsym test."""
if isinstance(func, type) and issubclass(func, unittest2.TestCase):
raise Exception("@dsym_test can only be used to decorate a test method")
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
if lldb.dont_do_dsym_test:
self.skipTest("dsym tests")
except AttributeError:
pass
return func(self, *args, **kwargs)
# Mark this function as such to separate them from the regular tests.
wrapper.__dsym_test__ = True
return wrapper | 37,114 |
def count_meetings(signups=None, left: datetime=None, right: datetime=None) -> int:
""" Returns the number of meetings the user has been to, between two
date ranges. Left bound is chosen as an arbitrary date guaranteed to
be after any 8th periods from the past year, but before any from the
current year. Right bound is chosen to be today. """
# can't use default arguments initialized at function definition
# in a long-running app right would be the date of last reset not "today"
if signups is None:
signups = get_signups()
if left is None:
left = summer(get_year() - 1)
if right is None:
right = datetime.today()
return len(list(
filter(lambda signup: left < ion_date(signup["block"]["date"]) < right,
signups))) | 37,115 |
def fake_surroundings(len_poem, size_surroundings=5):
"""
Retourne une liste d'indices tirée au sort
:param len_poem: nombre de vers dans le poème
:param size_surroundings: distance du vers de référence du vers (default 5)
:return: liste
"""
# bornes inférieures
lower_bounds_w_neg = np.array([row - size_surroundings for row in range(len_poem)])
lower_bounds_2d = np.stack([np.zeros(len_poem), lower_bounds_w_neg])
# calcul max entre 0 et le rang - surroundings
lower_bounds = np.max(lower_bounds_2d, axis=0)
# bornes supérieures
higher_bounds_w_neg = np.array([row + size_surroundings for row in range(len_poem)])
higher_bounds_2d = np.stack([np.repeat(len_poem, len_poem), higher_bounds_w_neg])
# calcul min entre longueur du poeme et le rang + surroundings
higher_bounds = np.min(higher_bounds_2d, axis=0)
# tirage
fake_within_poem = np.random.randint(low=lower_bounds, high=higher_bounds).tolist()
return fake_within_poem | 37,116 |
def interpolate_missing(sparse_list):
"""Use linear interpolation to estimate values for missing samples."""
dense_list = list(sparse_list)
x_vals, y_vals, x_blanks = [], [], []
for x, y in enumerate(sparse_list):
if y is not None:
x_vals.append(x)
y_vals.append(y)
else:
x_blanks.append(x)
if x_blanks:
interpolants = numpy.interp(x_blanks, x_vals, y_vals)
for x, y in zip(x_blanks, interpolants):
dense_list[x] = y
return dense_list | 37,117 |
def remove_pipeline(name: str, version: Optional[Union[str, int]] = None, db: Session = DB_SESSION):
"""
Removes a pipeline from the Feature Store
"""
version = parse_version(version)
pipelines = crud.get_pipelines(db, _filter={'name': name, 'pipeline_version': version})
if not pipelines:
raise SpliceMachineException(status_code=status.HTTP_404_NOT_FOUND, code=ExceptionCodes.DOES_NOT_EXIST,
message=f"Pipeline {name} does not exist. Please enter a valid pipeline.")
pipeline = pipelines[0]
if pipeline.feature_set_id:
raise SpliceMachineException(status_code=status.HTTP_406_NOT_ACCEPTABLE, code=ExceptionCodes.ALREADY_DEPLOYED,
message=f"Cannot delete Pipeline {name} v{pipeline.pipeline_version} as it is "
"already deployed. Please undeploy it first with fs.undeploy_pipeline()")
crud.delete_pipeline(db, pipeline.pipeline_id, pipeline.pipeline_version if version == 'latest' else version) | 37,118 |
def auto_format_rtf(file_path, debug=False):
""" Input complete filepath to .rtf file
replaces all instances of "\\line" to "\\par".
writes new data to new file with "MODFIED" appended.
Prints debug messages to console if debug=True.
"""
# Separates file name and extension for processing later.
file_name, file_ext = os.path.splitext(os.path.basename(file_path))
# Verifies that file exists and is .rtf before starting
if os.path.exists(file_path) and file_ext == ".rtf":
if debug:
print("\nFile Operation Confirmed".format(
file_path=file_path))
print(" Modifiying \"{filename}\".".format(
filename=os.path.basename(file_path)))
# Opens file and copies data to text_data object.
with open(file_path, "r") as file:
text_data = file.read()
if debug:
print(" Successfully read data")
# Replaces the unwanted "\\line" with "\\par"
# Operation performed on the entire data set instead of line by line.
new_text_data = text_data.replace("\\line", "\\par")
if debug:
print(" Data format operation successful")
# Gets location of file
file_location = os.path.dirname(file_path)
# Creates new file name from original name.
new_file_name = file_name + " MODIFIED" + file_ext
# Creates new complete file path from new name and original path.
new_file = os.path.join(file_location, new_file_name)
# Creates new file @ new path and writes data to new file.
with open(new_file, "w+") as file:
file.write(new_text_data)
if debug:
print(" Created new file at \"{new_file}\"."
.format(new_file=new_file))
print(" Wrote data to \"{new_file_name}\".\n"
.format(new_file_name=new_file_name))
return new_file | 37,119 |
def classifier_uncertainty(classifier: BaseEstimator, X: modALinput, **predict_proba_kwargs) -> np.ndarray:
"""
Classification uncertainty of the classifier for the provided samples.
Args:
classifier: The classifier for which the uncertainty is to be measured.
X: The samples for which the uncertainty of classification is to be measured.
**predict_proba_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier.
Returns:
Classifier uncertainty, which is 1 - P(prediction is correct).
"""
# calculate uncertainty for each point provided
try:
classwise_uncertainty = classifier.predict_proba(X, **predict_proba_kwargs)
except NotFittedError:
return np.ones(shape=(X.shape[0], ))
# for each point, select the maximum uncertainty
uncertainty = 1 - np.max(classwise_uncertainty, axis=1)
return uncertainty | 37,120 |
def fetch_nature_scene_similarity(data_home: Optional[os.PathLike] = None, download_if_missing: bool = True,
shuffle: bool = True, random_state: Optional[np.random.RandomState] = None,
return_triplets: bool = False) -> Union[Bunch, np.ndarray]:
""" Load the nature scene similarity dataset (odd-one-out).
=================== =====================
Triplets 3355
Objects (Scenes) 120
=================== =====================
See :ref:`nature_vogue_dataset` for a detailed description.
>>> dataset = fetch_nature_scene_similarity(shuffle=True) # doctest: +REMOTE_DATA
>>> dataset.image_label[[0, -1]].tolist() # doctest: +REMOTE_DATA
['art114.jpg', 'n344019.jpg']
>>> dataset.triplet.shape # doctest: +REMOTE_DATA
(3355, 3)
Args:
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, default=True
shuffle: default = True
Shuffle the order of triplet constraints.
random_state: optional, default = None
Initialization for shuffle random generator
return_triplets : boolean, default=False.
If True, returns numpy array instead of a Bunch object.
Returns:
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
triplet : ndarray, shape (n_triplets, 3)
Each row corresponding odd-one-out query.
The columns represent the odd image and two others.
class_label : ndarray, shape (120, )
Names of the scene images.
DESCR : string
Description of the dataset.
triplets : numpy arrays (n_triplets, 3)
Only present when `return_triplets=True`.
Raises:
IOError: If the data is not locally available, but download_if_missing=False
"""
return _fetch_nature_vogue('nature', data_home, download_if_missing, shuffle, random_state, return_triplets) | 37,121 |
def state_validation(instance):
"""Perform state validation."""
entity = "State"
check_instance(entity, instance)
check_instance("Country", instance.country)
# Note: underlying implementation may raise KeyError or return None
try:
official = pycountry.subdivisions.get(code=instance.iso_code)
except KeyError:
official = None
if not official:
raise ValidationError(_(invalid_iso(entity)),
params={'value': instance.iso_code})
if official.name != instance.name:
raise ValidationError(
_(invalid_name(entity)),
params={'value': instance.name,
'expected': official.name})
if not instance.country.is_usa():
raise ValidationError(_("%(country)s is an invalid for %(state)s."),
params={'country': instance.country.name,
'state': instance.name}) | 37,122 |
def test_preserve_scalars():
""" test the preserve_scalars decorator """
class Test():
@misc.preserve_scalars
def meth(self, arr):
return arr + 1
t = Test()
assert t.meth(1) == 2
np.testing.assert_equal(t.meth(np.ones(2)), np.full(2, 2)) | 37,123 |
def test_empty_video_id():
"""
Tests that an empty video id does not give a result
"""
result = stream_to_s3("")
assert not result | 37,124 |
def from_string(zma_str, one_indexed=True, angstrom=True, degree=True):
""" read a z-matrix from a string
"""
syms, key_mat, name_mat, val_dct = ar.zmatrix.read(zma_str)
val_mat = tuple(tuple(val_dct[name] if name is not None else None
for name in name_mat_row)
for name_mat_row in name_mat)
zma = automol.create.zmat.from_data(
syms, key_mat, val_mat, name_mat, one_indexed=one_indexed,
angstrom=angstrom, degree=degree)
return zma | 37,125 |
def search(raw_query, query_type='/fast/all'):
"""
Hit the FAST API for names.
"""
out = []
unique_fast_ids = []
query = text.normalize(raw_query, PY3).replace('the university of', 'university of').strip()
query_type_meta = [i for i in refine_to_fast if i['id'] == query_type]
if query_type_meta == []:
query_type_meta = default_query
query_index = query_type_meta[0]['index']
try:
#FAST api requires spaces to be encoded as %20 rather than +
url = api_base_url + '?query=' + urllib.quote(query)
url += '&rows=30&queryReturn=suggestall%2Cidroot%2Cauth%2cscore&suggest=autoSubject'
url += '&queryIndex=' + query_index + '&wt=json'
app.logger.debug("FAST API url is " + url)
resp = requests.get(url)
results = resp.json()
except Exception, e:
app.logger.warning(e)
return out
for position, item in enumerate(results['response']['docs']):
match = False
name = item.get('auth')
alternate = item.get('suggestall')
if (len(alternate) > 0):
alt = alternate[0]
else:
alt = ''
fid = item.get('idroot')
fast_uri = make_uri(fid)
#The FAST service returns many duplicates. Avoid returning many of the
#same result
if fid in unique_fast_ids:
continue
else:
unique_fast_ids.append(fid)
score_1 = fuzz.token_sort_ratio(query, name)
score_2 = fuzz.token_sort_ratio(query, alt)
#Return a maximum score
score = max(score_1, score_2)
if query == text.normalize(name, PY3):
match = True
elif query == text.normalize(alt, PY3):
match = True
resource = {
"id": fast_uri,
"name": name,
"score": score,
"match": match,
"type": query_type_meta
}
out.append(resource)
#Sort this list by score
sorted_out = sorted(out, key=itemgetter('score'), reverse=True)
#Refine only will handle top three matches.
return sorted_out[:3] | 37,126 |
def all_data():
"""Return list from fetch data csv"""
import csv
import pygal as pg
main_data = csv.reader(open('airtrafficstats.csv', newline=''))
main_data = [row for row in main_data]
pass_ = []
for i in main_data:
if i[1] == "Passenger":
pass_.append(i)
arrive_2013 = 0
depar_2013 = 0
arrive_2014 = 0
depar_2014 = 0
arrive_2015 = 0
depar_2015 = 0
for i in pass_:
if i[3] == "2013":
arrive_2013 += int(i[5])
depar_2013 += int(i[6])
elif i[3] == "2014":
arrive_2014 += int(i[5])
depar_2014 += int(i[6])
elif i[3] == "2015":
arrive_2015 += int(i[5])
depar_2015 += int(i[6])
graph = pg.Bar()
graph.title = 'Passenger 2013 - 2015'
graph.x_labels = ['2013', '2014', '2015']
graph.add('Arrive | From', [arrive_2013, arrive_2014, arrive_2015])
graph.add('Deperture | To', [depar_2013, depar_2014, depar_2015])
graph.render_to_file('pass_2013-2015.svg')
print([arrive_2013, arrive_2014, arrive_2015])
print([depar_2013, depar_2014, depar_2015]) | 37,127 |
def get_jmp_addr(bb):
"""
@param bb List of PseudoInstructions of one basic block
@return Address of jump instruction in this basic block
"""
for inst in bb:
if inst.inst_type == 'jmp_T':
return inst.addr
return None | 37,128 |
def mock_publish_from_s3_to_redis_err(
work_dict):
"""mock_publish_from_s3_to_redis_err
:param work_dict: dictionary for driving the task
"""
env_key = 'TEST_S3_CONTENTS'
redis_key = work_dict.get(
'redis_key',
env_key)
str_dict = ae_consts.ev(
env_key,
None)
log.info(
'mock_publish_from_s3_to_redis_err - '
f'setting key={redis_key} value={str_dict}')
data = None
if str_dict:
os.environ[redis_key] = str_dict
data = str_dict.encode('utf-8')
else:
os.environ[redis_key] = ''
data = None
status = ae_consts.ERR
err = None
return {
'status': status,
'err': err,
'rec': {
'data': data
}
} | 37,129 |
def open_db_conn(db_file=r'/home/openwpm/Desktop/crawl-data.sqlite'):
""""
open connection to sqlite database
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Exception as e:
print(e)
return None | 37,130 |
def test_default_stock_details_(dummy_request, db_session):
"""test query db with incorrect key"""
from ..views.entry import detail_view
dummy_request.GET = {'sdcs': 'ss'}
response = detail_view(dummy_request)
from pyramid.httpexceptions import HTTPNotFound
assert isinstance(response, HTTPNotFound) | 37,131 |
def update_link(src: Path, dest: Path) -> None:
"""
Create/move a symbolic link at 'dest' to point to 'src'.
If dest is a symbolic link and it points to a different target file, than
src, it changes the link to point to src.
If dest does not exist the link is created.
If dest exists but is not a symbolic link, update_link prints a warning on
STDERR; no changes are made to dest.
Args:
src: link target file
dest: link location
"""
if dest.is_symlink():
link_target = os.readlink(dest)
if link_target != str(src):
dest.unlink()
dest.symlink_to(src)
elif not dest.exists():
dest.symlink_to(src)
else:
print(
"WARNING: Cannot create sym_link: " f"{src} -> {dest}",
file=stderr,
) | 37,132 |
def totals_per_time_frame(data_points, time_frame):
"""For a set of data points from a single CSV file,
calculate the average percent restransmissions per time frame
Args:
data_points (List[List[int,int,float]]): A list of data points.
Each data points consist of
0: 1 if is a transmission,
1: 1 if is a retransmission,
2: time in seconds
time_frame (float): increment of time in seconds in which new data points are calculated
Returns:
List[List[float,float]]: A list of data points containing the percent retransmissions, and the time in seconds
"""
time_frame_min = 0
time_frame_max = time_frame
percent_retransmissions_list = []
transmissions_in_frame = 0
retransmissions_in_frame = 0
index = 0
while time_frame_max < data_points[-1][2] and index < len(data_points):
if data_points[index][2] >= time_frame_min and data_points[index][2] < time_frame_max:
transmissions_in_frame += data_points[index][0] + data_points[index][1]
retransmissions_in_frame += data_points[index][1]
index += 1
else:
if transmissions_in_frame > 0:
percent_retransmissions = 100*retransmissions_in_frame/transmissions_in_frame
else:
percent_retransmissions = 0
percent_retransmissions_list.append([percent_retransmissions,time_frame_min])
time_frame_min = time_frame_max
time_frame_max += time_frame
transmissions_in_frame = 0
retransmissions_in_frame = 0
return percent_retransmissions_list | 37,133 |
def main():
"""Use the GitHub API to print Pull Requests with review requests."""
token = get_token(GITHUB_API_TOKEN, GH_CONFIG_FILE, HUB_CONFIG_FILE)
session = github_session(token)
data = get_review_requests_defaults()
pull_requests = [
{
"url": pull_request["html_url"],
"user": pull_request["user"]["login"],
"title": pull_request["title"],
}
for pull_request in data["items"]
]
table = tabulate(pull_requests, headers="keys", showindex=True)
print(table) | 37,134 |
def calc_2phase_vu(datetime_stamps, dsmx, smx, config):
"""."""
logger.info("Computing for 2-phase image-based results")
for view in dsmx:
opath = os.path.join(config.resdir, f'view_{config.name}_{view}')
if os.path.isdir(opath):
shutil.rmtree(opath)
util.sprun(
mtxmult.imgmult(os.path.join(dsmx[view], '%04d.hdr'), smx, odir=opath))
ofiles = [os.path.join(opath, f) for f in sorted(os.listdir(opath))
if f.endswith('.hdr')]
for idx, val in enumerate(ofiles):
shutil.move(val, os.path.join(opath, datetime_stamps[idx]+'.hdr')) | 37,135 |
def extract_lesional_clus(label, input_scan, scan, options):
"""
find cluster components in the prediction
corresponding to the true label cluster
"""
t_bin = options['t_bin']
# t_bin = 0
l_min = options['l_min']
output_scan = np.zeros_like(input_scan)
# threshold input segmentation
t_segmentation = input_scan > t_bin
# t_segmentation = input_scan > 0
# perform morphological operations (dilation of the erosion of the input)
morphed = binary_opening(t_segmentation, iterations=1)
# morphed = t_segmentation
# label connected components
morphed = nd.binary_fill_holes(morphed, structure=np.ones((5,5,5))).astype(int)
pred_labels, _ = nd.label(morphed, structure=np.ones((3,3,3)))
label_list = np.unique(pred_labels)
num_elements_by_lesion = nd.labeled_comprehension(morphed, pred_labels, label_list, np.sum, float, 0)
Y = np.zeros((len(num_elements_by_lesion > l_min)))
for l in range(len(num_elements_by_lesion > l_min)):
Y[l] = dc(label, (pred_labels == l))
clus_ind = np.where(Y == Y.max())
lesion_pred = np.copy(pred_labels)
lesion_pred[lesion_pred != clus_ind] = 0
lesion_pred[lesion_pred == clus_ind] = 1
lesion_pred_out = nib.Nifti1Image(lesion_pred, np.eye(4))
options['test_lesion_pred'] = options['experiment'] + '_' + options['test_scan'] + '_out_lesion_pred_only.nii.gz'
lesion_pred_out.to_filename(os.path.join(options['pred_folder'], options['test_lesion_pred']))
return lesion_pred | 37,136 |
def glColor3fv(v):
"""
v - seq( GLfloat, 3)
"""
if 3 != len(v):
raise TypeError(len(v), "3-array expected")
_gllib.glColor3fv(v) | 37,137 |
def feed_to_hdf5(feature_vector, subject_num, utterance_train_storage, utterance_test_storage, label_train_storage,
label_test_storage):
"""
:param feature_vector: The feature vector for each sound file of shape: (num_frames,num_features_per_frame,num_channles.)
:param subject_num: The subject class in 'int' format.
:param utterance_storage: The HDF5 object for storing utterance feature map.
:param label_train_storage: The HDF5 object for storing train label.
:param label_test_storage: The HDF5 object for storing test label.
:return: Each utterance will be stored in HDF5 file.
"""
num_utterances_per_speaker = 20
stride_step = 20
utterance_length = 80
num_frames = feature_vector.shape[0]
num_samples = int(np.floor((num_frames - utterance_length - num_utterances_per_speaker) / float(stride_step))) + 1
# Half of the samples will be fed for training.
range_training = range(int(4 * num_samples / 5))
range_training = range(1)
for sample_index in range_training:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_train_storage.append(utterance)
label_train_storage.append((np.array([subject_num + 1], dtype=np.int32)))
# The second half of each sound file will be used for testing on the same subject.
range_testing = range(int(4 * num_samples / 5), int(num_samples))
range_testing = range(1,2)
for sample_index in range_testing:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_test_storage.append(utterance)
label_test_storage.append((np.array([subject_num + 1], dtype=np.int32))) | 37,138 |
def add_port_fwd(
zone, src, dest, proto="tcp", dstaddr="", permanent=True, force_masquerade=False
):
"""
Add port forwarding.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_port_fwd public 80 443 tcp
force_masquerade
when a zone is created ensure masquerade is also enabled
on that zone.
"""
if force_masquerade and not get_masquerade(zone):
add_masquerade(zone)
cmd = "--zone={0} --add-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}".format(
zone, src, proto, dest, dstaddr
)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd) | 37,139 |
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values | 37,140 |
async def pin(msg):
""" For .Pin command, pins the replied/tagged message on the top the chat. """
# Admin or creator check
chat = await msg.get_chat()
admin = chat.admin_rights
creator = chat.creator
# If not admin and not creator, return
if not admin and not creator:
await msg.edit(NO_ADMIN)
return
to_pin = msg.reply_to_msg_id
if not to_pin:
await msg.edit("`Reply to a message to pin it.`")
return
options = msg.pattern_match.group(1)
is_silent = True
if options.lower() == "loud":
is_silent = False
try:
await msg.client(
UpdatePinnedMessageRequest(msg.to_id, to_pin, is_silent))
except BadRequestError:
await msg.edit(NO_PERM)
return
await msg.edit("`Pinned Successfully!`")
user = await get_user_from_id(msg.from_id, msg)
if BOTLOG:
await msg.client.send_message(
BOTLOG_CHATID, "#PIN\n"
f"ADMIN: [{user.first_name}](tg://user?id={user.id})\n"
f"CHAT: {msg.chat.title}(`{msg.chat_id}`)\n"
f"LOUD: {not is_silent}") | 37,141 |
def read_int(handle):
"""
Helper function to parse int from file handle
Args:
handle (file): File handle
Returns:
numpy.int32
"""
return struct.unpack("<i", handle.read(4))[0] | 37,142 |
def prem_to_av(t):
"""Premium portion put in account value
The amount of premiums net of loadings, which is put in the accoutn value.
.. seealso::
* :func:`load_prem_rate`
* :func:`premium_pp`
* :func:`pols_if_at`
"""
return prem_to_av_pp(t) * pols_if_at(t, "BEF_DECR") | 37,143 |
def remove_close(points, radius):
"""
Given an nxd set of points where d=2or3 return a list of points where no point is closer than radius
:param points: a nxd list of points
:param radius:
:return:
author: revised by weiwei
date: 20201202
"""
from scipy.spatial import cKDTree
tree = cKDTree(points)
# get the index of every pair of points closer than our radius
pairs = tree.query_pairs(radius, output_type='ndarray')
# how often each vertex index appears in a pair
# this is essentially a cheaply computed "vertex degree"
# in the graph that we could construct for connected points
count = np.bincount(pairs.ravel(), minlength=len(points))
# for every pair we know we have to remove one of them
# which of the two options we pick can have a large impact
# on how much over-culling we end up doing
column = count[pairs].argmax(axis=1)
# take the value in each row with the highest degree
# there is probably better numpy slicing you could do here
highest = pairs.ravel()[column + 2 * np.arange(len(column))]
# mask the vertices by index
mask = np.ones(len(points), dtype=np.bool)
mask[highest] = False
if tol.strict:
# verify we actually did what we said we'd do
test = cKDTree(points[mask])
assert len(test.query_pairs(radius)) == 0
return points[mask], mask | 37,144 |
def pose_mof2mat_v1(mof, rotation_mode='euler'):
"""
### Out-of-Memory Issue ###
Convert 6DoF parameters to transformation matrix.
Args:
mof: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6, H, W]
Returns:
A transformation matrix -- [B, 3, 4, H, W]
"""
bs, _, hh, ww = mof.size()
mof = mof.permute(0,2,3,1).reshape(-1,6) # [B*N, 6]
translation = mof[:,:3].unsqueeze(-1) # [B*N, 3, 1]
rot = mof[:,3:] # [B*N, 3]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B*N, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B*N, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B*N, 3, 4]
transform_mat = transform_mat.reshape(bs, hh, ww, 3, 4).permute(0,3,4,1,2) # [B, 3, 4, H, W]
# pdb.set_trace()
return transform_mat | 37,145 |
def kmercountexact(forward_in, reverse_in='NA', returncmd=False, **kwargs):
"""
Wrapper for kmer count exact.
:param forward_in: Forward input reads.
:param reverse_in: Reverse input reads. Found automatically for certain conventions.
:param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value.
:param kwargs: Arguments to give to kmercountexact in parameter='argument' format.
See kmercountexact documentation for full list.
:return: out and err: stdout string and stderr string from running kmercountexact.
"""
options = kwargs_to_string(kwargs)
if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in:
reverse_in = forward_in.replace('_R1', '_R2')
cmd = 'kmercountexact.sh in={} in2={} {}'.format(forward_in, reverse_in, options)
elif reverse_in == 'NA':
cmd = 'kmercountexact.sh in={} {}'.format(forward_in, options)
else:
cmd = 'kmercountexact.sh in={} in2={} {}'.format(forward_in, reverse_in, options)
out, err = accessoryfunctions.run_subprocess(cmd)
if returncmd:
return out, err, cmd
else:
return out, err | 37,146 |
def update(isamAppliance, local, remote_address, remote_port, remote_facility, check_mode=False, force=False):
"""
Updates logging configuration
"""
json_data = {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
}
change_required, warnings = _check(isamAppliance, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating logging configuration attributes", module_uri, json_data,
requires_modules=requires_modules,
requires_version=requires_versions,
requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings) | 37,147 |
def install_requirements():
"""Installs requirements inside vertualenv"""
with source_env():
run('pip install --force-reinstall -Ur requirements.txt') | 37,148 |
def save_vocab(vocab, separate_domains, crop, output_dir, vocab_filename):
"""Save vocabulary to file
Args:
vocab (list of String): the list of vocabularies.
separate_domains (Boolean): indicate the plaintext and cipher are in separate domain.
crop: (Integer): the amount of crop applied.
output_dir (String): path to output directory.
vocab_filename (String): output vocabulary filename
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping_fd = open(os.path.join(output_dir, vocab_filename), 'w')
for i, word in enumerate(vocab):
mapping_fd.write("{0}. {1}\n".format(i, word))
if separate_domains and i >= crop:
mapping_fd.write("{0}. {1}\n".format(i + len(vocab) - crop, word))
mapping_fd.close() | 37,149 |
def plot_label_per_class(y_data,LABELS):
"""
MOD:add Labels Dict
"""
classes = sorted(np.unique(y_data))
f, ax = plt.subplots(1,1, figsize=(12, 4))
g = sns.countplot(y_data, order=classes)
g.set_title("Number of labels for each class")
for p, label in zip(g.patches, classes):
g.annotate(LABELS[label], (p.get_x(), p.get_height() + 0.2))
plt.show() | 37,150 |
def get_model_and_tokenizer(
model_name_or_path: str,
tokenizer_name_or_path: str,
auto_model_type: _BaseAutoModelClass,
max_length: int = constants.DEFAULT_MAX_LENGTH,
auto_model_config: AutoConfig = None,
) -> Tuple[AutoModelForSequenceClassification, AutoTokenizer]:
"""Get transformer model and tokenizer
Args:
model_name_or_path (str): model name
tokenizer_name_or_path (str): tokenizer name
auto_model_type (_BaseAutoModelClass): auto model object such as AutoModelForSequenceClassification
max_length (int): max length of text
auto_model_config (AutoConfig): AutoConfig object
Returns:
Tuple[AutoModelForSequenceClassification, AutoTokenizer]: model and tokenizer
"""
logger.info(f"Loading model: {model_name_or_path}")
if auto_model_config:
model = auto_model_type.from_pretrained(
model_name_or_path, config=auto_model_config
)
else:
model = auto_model_type.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, max_length=max_length
)
return model, tokenizer | 37,151 |
def get_forecast_by_coordinates(
x: float,
y: float,
language: str = "en"
) -> str:
"""
Get the weather forecast for the site closest to the coordinates (x, y).
Uses the scipy kd-tree nearest-neighbor algorithm to find the closest
site.
Parameters
----------
x : float
Longitude of the query point.
y : float
Latitude of the query point.
language: str
The language to retrieve the forecast in. Allowed values: "en", "fr".
Returns
-------
str
The XML weather forecast.
"""
nearest_site = get_weather_site_by_coordinates(x, y)
site_code = nearest_site['properties']['Codes']
province_code = nearest_site['properties']['Province Codes']
forecast = get_forecast_by_site_code(
site_code=site_code,
province_code=province_code,
language=language
)
return forecast | 37,152 |
def get_calculated_energies(stem, data=None):
"""Return the energies from the calculation"""
if data is None:
data = {}
stem = stem.find('calculation')
for key, path in VASP_CALCULATED_ENERGIES.items():
text = get_text_from_section(stem, path, key)
data[key] = float(text.split()[0])
return data | 37,153 |
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code | 37,154 |
def test_analyze__analyze_storage__7(zodb_storage, zodb_root):
"""It analyzes contents of `BTree`s."""
zodb_root['tree'] = BTrees.OOBTree.OOBTree()
zodb_root['tree']['key'] = b'bïnäry'
zodb_root['tree']['stuff'] = [{'këy': b'binäry'}]
zodb_root['tree']['iter'] = [u'unicode_string']
transaction.commit()
result, errors = analyze_storage(zodb_storage)
assert {
"BTrees.OOBTree.OOBTree['key'] is string": 1,
"BTrees.OOBTree.OOBTree['stuff'] is iterable": 1,
} == result
assert {} == errors | 37,155 |
def reshape(spectra):
"""Rearrange a compressed 1d array of spherical harmonics to 2d
Args:
spectra (np.ndarray): 1 dimensional storage of 2d spherical modes
Returns:
np.ndarray:
2-dimensional array of the reshaped input with zonal and meridional
wavenumber coordinates
"""
# Account for complex inputs as two dimensions
if spectra.ndim == 2:
spectra = spectra[:, 0] + spectra[:, 1]*1j
if spectra.ndim != 1:
raise ValueError('Spectra must be a 1-dimensional array')
# Deduce truncation from shape
trunc = find_trunc(len(spectra))
# Zeros for output
spectra_2d = np.zeros((trunc, trunc))
idx0 = 0
idx1 = trunc
for i in range(trunc):
spectra_2d[i, i:trunc] = spectra[idx0:idx1]
idx0 += trunc - i
idx1 += trunc - i - 1
return spectra_2d | 37,156 |
def preprocess(save_dir, reactants, products, reaction_types=None):
"""
preprocess reaction data to extract graph adjacency matrix and features
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for index in tqdm(range(len(reactants))):
product = products[index]
reactant = reactants[index]
product_mol = Chem.MolFromSmiles(product)
reactant_mol = Chem.MolFromSmiles(reactant)
product_adj = Chem.rdmolops.GetAdjacencyMatrix(product_mol)
product_adj = product_adj + np.eye(product_adj.shape[0])
product_adj = product_adj.astype(np.bool)
reactant_adj = Chem.rdmolops.GetAdjacencyMatrix(reactant_mol)
reactant_adj = reactant_adj + np.eye(reactant_adj.shape[0])
reactant_adj = reactant_adj.astype(np.bool)
patomidx2pmapidx = get_atomidx2mapidx(product_mol)
rmapidx2ratomidx = get_mapidx2atomidx(reactant_mol)
# Get indexes of reactant atoms without a mapping number
# growing_group_index = rmapidx2ratomidx[0]
# Get the reactant atom index list in the order of product atom index.
order = get_order(product_mol, patomidx2pmapidx, rmapidx2ratomidx)
target_adj = reactant_adj[order][:, order]
# full_order = order + growing_group_index
# reactant_adj = reactant_adj[full_order][:, full_order]
product_bond_features = get_bond_features(product_mol)
product_atom_features = get_atom_features(product_mol)
rxn_data = {
'rxn_type': reaction_types[index],
'product_adj': product_adj,
'product_mol': product_mol,
'product_bond_features': product_bond_features,
'product_atom_features': product_atom_features,
'target_adj': target_adj,
#'reactant_adj': reactant_adj,
#'reactant_in_product_order': full_order,
'reactant_mol': reactant_mol,
}
with open(os.path.join(save_dir, 'rxn_data_{}.pkl'.format(index)),
'wb') as f:
pickle.dump(rxn_data, f) | 37,157 |
def dict_factory(cursor, row):
"""
This method is used to convert tuple type to dict after execute SQL queries in python.
:param cursor:
:param row:
:return:
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | 37,158 |
def ping2(report):
"""
Sends 'pong' back
"""
report.author.send_pm("pong2", "Hey %s! Pong!" % report.author.name) | 37,159 |
def predict_batch(model, x_batch, dynamics, fast_init):
"""
Compute the softmax prediction probabilities for a given data batch.
Args:
model: EnergyBasedModel
x_batch: Batch of input tensors
dynamics: Dictionary containing the keyword arguments
for the relaxation dynamics on u
fast_init: Boolean to specify if fast feedforward initilization
is used for the prediction
Returns:
Softmax classification probabilities for the given data batch
"""
# Initialize the neural state variables
model.reset_state()
# Clamp the input to the test sample, and remove nudging from ouput
model.clamp_layer(0, x_batch.view(-1, model.dimensions[0]))
model.set_C_target(None)
# Generate the prediction
if fast_init:
model.fast_init()
else:
model.u_relax(**dynamics)
return torch.nn.functional.softmax(model.u[-1].detach(), dim=1) | 37,160 |
def render_openapi(api, request):
"""Prepare openapi specs."""
# Setup Specs
options = dict(api.openapi_options)
options.setdefault('servers', [{
'url': str(request.url.with_query('').with_path(api.prefix))
}])
spec = APISpec(
options['info'].pop('title', f"{ api.app.cfg.name.title() } API"),
options['info'].pop('version', '1.0.0'),
options.pop('openapi_version', '3.0.0'),
**options, plugins=[MarshmallowPlugin()])
spec.tags = {}
# Setup Authorization
if api.authorize:
_, _, schema = parse_docs(api.authorize)
spec.options['security'] = []
for key, value in schema.items():
spec.components.security_scheme(key, value)
spec.options['security'].append({key: []})
# Setup Paths
routes = api.router.routes()
for route in routes:
if route.path in SKIP_PATH:
continue
spec.path(route.path, **route_to_spec(route, spec))
return spec.to_dict() | 37,161 |
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon) | 37,162 |
def affine_transform(transform, points):
"""
Transforms a set of N x 2 points using the given Affine object.
"""
reshaped_points = np.vstack([points.T, np.ones((1, points.shape[0]))])
transformed = np.dot(affine_to_matrix(transform), reshaped_points)
return transformed.T[:,:2] | 37,163 |
def disable_passwordless_registration() -> None:
"""Enable open registration configuation"""
yield from tweak_config('USERS_PASSWORDLESS_REGISTRATION', False) | 37,164 |
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow()) | 37,165 |
def _convert_named_signatures_to_signature_def(signatures):
"""Convert named signatures to object of type SignatureDef.
Args:
signatures: object of type manifest_pb2.Signatures()
Returns:
object of type SignatureDef which contains a converted version of named
signatures from input signatures object
Raises:
RuntimeError: if input and output named signatures are not of type
GenericSignature
"""
signature_def = meta_graph_pb2.SignatureDef()
input_signature = signatures.named_signatures[
signature_constants.PREDICT_INPUTS]
output_signature = signatures.named_signatures[
signature_constants.PREDICT_OUTPUTS]
# TODO(pdudnik): what if there are other signatures? Mimic cr/140900781 once
# it is submitted.
if (input_signature.WhichOneof("type") != "generic_signature" or
output_signature.WhichOneof("type") != "generic_signature"):
raise RuntimeError("Named input and output signatures can only be "
"up-converted if they are generic signature. "
"Input signature type is %s, output signature type is "
"%s" % (input_signature.WhichOneof("type"),
output_signature.WhichOneof("type")))
signature_def.method_name = signature_constants.PREDICT_METHOD_NAME
for key, val in input_signature.generic_signature.map.items():
_add_input_to_signature_def(val.tensor_name, key, signature_def)
for key, val in output_signature.generic_signature.map.items():
_add_output_to_signature_def(val.tensor_name, key, signature_def)
return signature_def | 37,166 |
def release(branch=None, fork="sympy"):
"""
Perform all the steps required for the release, except uploading
In particular, it builds all the release files, and puts them in the
release/ directory in the same directory as this one. At the end, it
prints some things that need to be pasted into various places as part of
the release.
To test the release, push a branch to your fork on GitHub and set the fork
option to your username.
"""
remove_userspace()
gitrepos(branch, fork)
# This has to be run locally because it itself uses fabric. I split it out
# into a separate script so that it can be used without vagrant.
local("../bin/mailmap_update.py")
test_sympy()
source_tarball()
build_docs()
copy_release_files()
test_tarball("2")
test_tarball("3")
compare_tar_against_git()
print_authors() | 37,167 |
def get_test_paths(paths, snaps):
"""
Return $snaps paths to be tested on GLUE
"""
if snaps == -1:
return paths
interval = len(paths) * 1. / snaps
test_paths = []
for i in range(1, snaps+1):
idx = int(math.ceil(interval * i)) - 1
test_paths.append(paths[idx])
return test_paths | 37,168 |
def save_games_list(games_list, location):
"""
Save a games list.
games_list/tuple: (game_full_name, game_full_path, game_icon, game_dir, game_state)
location/dir: full path.
"""
pass | 37,169 |
def is_success(code):
"""Return that the client's request was successfully received, understood, and accepted."""
return 200 <= code <= 299 | 37,170 |
def is_palindrome_v3(s):
""" (str) -> bool
Return True if and only if s is a palindrome.
>>> is_palindrome_v3('noon')
True
>>> is_palindrome_v3('racecar')
True
>>> is_palindrome_v3('dented')
False
>>> is_palindrome_v3('')
True
>>> is_palindrome_v3(' ')
True
"""
j = len(s) - 1
for i in range(len(s) // 2):
if s[i] != s[j - i]:
return False
return True | 37,171 |
def _create_sample_and_placeholder_dataset(project, row):
"""Create Datasets but don't copy data.
"""
# Parsing and validation.
fastq1_filename = row['Read_1_Filename']
maybe_fastq2_filename = row.get('Read_2_Filename', '')
assert fastq1_filename != maybe_fastq2_filename
# Now create the models.
experiment_sample = ExperimentSample.objects.create(
project=project, label=row['Sample_Name'])
fastq1_filename = row['Read_1_Filename']
_create_fastq_dataset(
experiment_sample, fastq1_filename, Dataset.TYPE.FASTQ1,
Dataset.STATUS.AWAITING_UPLOAD)
if maybe_fastq2_filename:
_create_fastq_dataset(
experiment_sample, maybe_fastq2_filename,
Dataset.TYPE.FASTQ2, Dataset.STATUS.AWAITING_UPLOAD)
# Add extra metadata columns.
_update_experiment_sample_data_for_row(experiment_sample, row,
PRE_DEFINED_SAMPLE_UPLOAD_THROUGH_BROWSER_PARTS) | 37,172 |
def read(fd: BinaryIO) -> Entity:
"""Read mug scene from `fd` file object.
Args:
fd: File object to read from.
Returns:
Root entity.
"""
if fd.read(4) != b'MUGS':
raise ValueError("not a valid mug file format")
return read_recursive(fd) | 37,173 |
def transform(x,y):
"""
This function takes an input vector of x values and y values, transforms them
to return the y in a linearized format (assuming nlogn function was used
to create y from x)
"""
final = []
for i in range(0, len(y)):
new = y[i]#/x[i]
final.append(2 ** new)
return final | 37,174 |
def _GRIsAreEnabled():
"""Returns True if GRIs are enabled."""
return (properties.VALUES.core.enable_gri.GetBool() or
properties.VALUES.core.resource_completion_style.Get() == 'gri') | 37,175 |
def record_images(root_folder: Union[str, Path]):
"""Use opencv to record timestamped images from the webcam."""
# Setup paths
if not root_folder:
root_folder = Path.cwd()
root_folder = create_path(root_folder, 'data')
current_subfolder = create_path(root_folder, dt_name(datetime.now()))
print('Starting in new folder:', current_subfolder)
# Setup Window
cv2.namedWindow(__file__)
# Setup queue and thread
image_queue = queue.Queue()
cvreader = CVReader(image_queue=image_queue)
cvreader_thread = Thread(target = cvreader.run)
cvreader_thread.start()
while True:
# Get frame
frame = None
while not image_queue.empty():
try:
(frame, dt) = image_queue.get(block=True, timeout=1)
except queue.Empty:
frame = None
continue
if frame is not None:
# Store frame
img_name = current_subfolder.joinpath('frame_{}.png'.format(dt_name(dt)))
cv2.imwrite(str(img_name), frame)
# Only the last frame in a queue is shown (else the queue will grow)
if frame is not None:
# Show frame
cv2.imshow(__file__, frame)
# User interfaction
key = cv2.waitKey(33)
if key == -1:
# No key pressed
continue
elif key in (27, ord('q')):
# Quit (ESC, q)
cvreader.terminate()
print('Quit with:', key)
break
elif key in (13, 32):
# Start in new folder (ENTER, SPACE)
current_subfolder = create_path(root_folder, dt_name(datetime.now()))
print('Starting in new folder:', current_subfolder)
cv2.destroyAllWindows()
cvreader_thread.join() | 37,176 |
def fatorial(n, show=False):
"""
-> Calcula o Fatorial de um número
:param n: O número a ser calculado.
:param show: (opcional) Mostrar ou não a conta.
:return: O valor do Fatorial de um número n.
"""
contador = n
resultado = guardado = 0
print('-' * 35)
while contador >= 0:
guardado = n * (n - 1)
resultado += guardado
contador -= 1
if show:
for contando in range(n, 0, -1):
print(contando, end='')
if contando > 1:
print(' x ', end='')
print(' = ', end='')
return resultado | 37,177 |
def start_job(job, hal_id, refGenome, opts):
"""Set up the structure of the pipeline."""
hal = hal_id
# Newick representation of the HAL species tree.
newick_string = get_hal_tree(hal)
job.fileStore.logToMaster("Newick string: %s" % (newick_string))
tree = newick.loads(newick_string)[0]
rerooted = reroot_tree(tree, refGenome)
job.fileStore.logToMaster("Rerooted newick string: %s" % (newick.dumps([rerooted])))
if opts.targetGenomes is not None:
# We don't need the alignment to all genomes, just a subset.
prune_tree(rerooted, opts.targetGenomes)
job.fileStore.logToMaster("Pruned newick string: %s" % newick.dumps(rerooted))
def setup_jobs(node):
"""Recursively set up jobs for this node and its children."""
prev_data = [setup_jobs(child) for child in node.descendants]
# At this point all of the jobs for the lower parts of the tree have been set up.
lifted_data = [prev_lifted for _, prev_lifted in prev_data]
merge_job = job.wrapJobFn(merge_blocks_job, node.name, [n.name for n in node.descendants], lifted_data, hal_id, opts)
for prev_job, _ in prev_data:
prev_job.addFollowOn(merge_job)
if node.is_leaf:
job.addChild(merge_job)
if node.ancestor is None:
return merge_job.rv()
else:
# Find out whether we have to lift up or down
original_node = find_node_by_name(tree, node.name)
if original_node.ancestor is None or node.ancestor.name != original_node.ancestor.name:
lift_down_job = merge_job.addFollowOnJobFn(lift_job, 'down', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_down_job, lift_down_job.rv()
else:
lift_up_job = merge_job.addFollowOnJobFn(lift_job, 'up', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_up_job, lift_up_job.rv()
blocks_on_ref = setup_jobs(rerooted)
all_genomes = [node.name for node in tree.walk()]
return job.addFollowOnJobFn(maf_export_job, hal, all_genomes, blocks_on_ref, opts).rv() | 37,178 |
def user_base(username):
"""Base path of user files"""
return os.path.join(BASE['user'], username) | 37,179 |
def write_html_columns(file):
"""Write HTML columns."""
write_html_column("color1", file, "Backlog", THINGS3.get_someday())
write_html_column("color8", file, "Grooming", THINGS3.get_cleanup())
write_html_column("color5", file, "Upcoming", THINGS3.get_upcoming())
write_html_column("color3", file, "Waiting", THINGS3.get_waiting())
write_html_column("color4", file, "Inbox", THINGS3.get_inbox())
write_html_column("color2", file, "MIT", THINGS3.get_mit())
write_html_column("color6", file, "Today", THINGS3.get_today())
write_html_column("color7", file, "Next", THINGS3.get_anytime()) | 37,180 |
def main(event, context):
# pylint: enable=unused-argument
"""
Requests the leaderboard for a competition from Kaggle and publishes it into a
Pub/Sub topic.
"""
data = decode(event["data"])
competition = data["competition"]
top = data.get("top")
requested_at = datetime.now().isoformat()
leaderboard = _extract_leaderboard(competition)
leaderboard = _transform_leaderboard(leaderboard, competition, requested_at)
if top:
# pylint: disable=redefined-variable-type
leaderboard = islice(leaderboard, top)
# pylint: enable=redefined-variable-type
leaderboard = list(leaderboard)
print(f"Publishing {len(leaderboard)} messages to {TOPIC}...")
for chunk in chunks(leaderboard, chunksize=100):
to_topic(chunk, TOPIC_NAME) | 37,181 |
def get_args(arg_input):
"""Takes args input and returns them as a argparse parser
Parameters
-------------
arg_input : list, shape (n_nargs,)
contains list of arguments passed to function
Returns
-------------
args : namespace
contains namespace with keys and values for each parser argument
"""
parser = argparse.ArgumentParser(description='tpu creation script')
parser.add_argument(
'--name',
type=str,
default='tpu',
help='Name to use for tpu vm',
)
parser.add_argument(
'--zone',
type=str,
default='europe-west4-a',
help='zone',
)
parser.add_argument(
'--version',
type=str,
default='tpu-vm-pt-1.11',
help='software version to load',
)
parser.add_argument(
'--accelerator-type',
type=str,
default='v3-8',
help='accelerator type. Eg v3-8, v2-8',
)
parser.add_argument(
'--project',
type=str,
default='trc-generative',
help='gcloud project name',
)
parser.add_argument(
'-n',
'--number_of_tpus',
type=int,
default=1,
help='Minimum number of atleast_tags required.',
)
args = parser.parse_args(arg_input)
return args | 37,182 |
def cleanup_databases():
"""
Returns:
bool: admin_client fixture should ignore any existing databases at
start of test and clean them up.
"""
return False | 37,183 |
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookApp): handle to the Notebook webserver instance.
"""
nb_server_app.log.info('serverless module enabled!')
web_app = nb_server_app.web_app
# Prepend the base_url so that it works in a jupyterhub setting
base_url = web_app.settings['base_url']
base_url = url_path_join(base_url, 'function')
default_scheduler.run_continuously()
handlers = [('{base}/(?P<function>[^?/]+)'.format(base=base_url),
FunctionHandler,
{"app": nb_server_app}
),
('{base}'.format(base=base_url),
FunctionHandler,
{"app": nb_server_app}
)
]
nb_server_app.log.info(handlers)
recover_jobs(nb_server_app)
web_app.add_handlers('.*$', handlers) | 37,184 |
def _round_down(rough_value, increment, minimum=None, maximum=None):
"""Utility method for rounding a value down to an increment.
Args:
rough_value: A float. The initial value to be rounded.
increment: The increment to round down to.
minimum: Optional minimum value, default is increment.
maximum: Optional maximum value, default is the max int.
Returns:
An integer formed by rounding rough_value down to the nearest positive
number of increments while staying between minimum and maximum.
"""
if not minimum:
minimum = increment
if not maximum:
maximum = sys.maxint
rounded_value = rough_value - (rough_value % increment)
return int(min(maximum, max(minimum, rounded_value))) | 37,185 |
def document_index_list(request, document_id):
"""
Show a list of indexes where the current document can be found
"""
document = get_object_or_404(Document, pk=document_id)
object_list = []
queryset = document.indexinstancenode_set.all()
try:
# TODO: should be AND not OR
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_VIEW, PERMISSION_DOCUMENT_INDEXING_VIEW])
except PermissionDenied:
queryset = AccessEntry.objects.filter_objects_by_access(PERMISSION_DOCUMENT_INDEXING_VIEW, request.user, queryset)
for index_instance in queryset:
object_list.append(get_breadcrumbs(index_instance, single_link=True, include_count=True))
return render_to_response('generic_list.html', {
'title': _(u'indexes containing: %s') % document,
'object_list': object_list,
'hide_link': True,
'object': document
}, context_instance=RequestContext(request)) | 37,186 |
def spot_centroid(regions):
"""Returns centroids for a list of regionprops.
Args:
regions (regionprops): List of region proposals (skimage.measure).
Returns:
list: Centroids of regionprops.
"""
return [r.centroid for r in regions] | 37,187 |
def FindChromeSrcFromFilename(filename):
"""Searches for the root of the Chromium checkout.
Simply checks parent directories until it finds .gclient and src/.
Args:
filename: (String) Path to source file being edited.
Returns:
(String) Path of 'src/', or None if unable to find.
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (os.path.basename(os.path.realpath(curdir)) == 'src'
and PathExists(curdir, 'DEPS')
and (PathExists(curdir, '..', '.gclient')
or PathExists(curdir, '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
return None
curdir = nextdir
return curdir | 37,188 |
def _merge_uuids(
before: "definitions.Configuration",
after: "definitions.Configuration",
):
"""Add matching UUIDs from before to the after for continuity."""
after.data["_uuid"] = before.uuid
mapping = {name: target for target in before.targets for name in target.names}
for target in after.targets:
match: typing.Optional[definitions.Target] = next(
(t for n, t in mapping.items() if n in target.names),
None,
)
if match:
_merge_target_uuids(match, target) | 37,189 |
def add_parse_cmd(parser):
"""This adds a Zeek script parser CLI interface to the given argparse parser. It
registers the cmd_parse() callback as the parser's run_cmd default."""
parser.set_defaults(run_cmd=cmd_parse)
parser.add_argument(
'--concrete', '-c', action='store_true',
help='report concrete syntax tree (CST) instead of AST')
parser.add_argument(
'script', metavar='FILE', nargs='?',
help='Zeek script to parse. ' + FILE_HELP) | 37,190 |
def one_election_set_reg(request, election):
"""
Set whether this is open registration or not
"""
# only allow this for public elections
if not election.private_p:
open_p = bool(int(request.GET['open_p']))
election.openreg = open_p
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid])) | 37,191 |
def test_duplicate_subjects():
"""Test that two triplets with the same subject can be retrieved."""
c = chrome_manifest("""
foo bar abc
foo bar def
foo bam test
oof rab cba
""")
assert len(list(c.get_entries('foo'))) == 3
assert len(list(c.get_entries('foo', 'bar'))) == 2
assert len(list(c.get_entries('foo', 'bar', 'abc'))) == 1 | 37,192 |
def overrides(conf, var):
"""This api overrides the dictionary which contains same keys"""
if isinstance(var, list):
for item in var:
if item in conf:
for key, value in conf[item].items():
conf[key] = value
elif var in conf:
for key, value in conf[var].items():
conf[key] = value
return conf | 37,193 |
def annual_to_daily_rate(rate, trading_days_in_year=TRADING_DAYS_IN_YEAR):
"""
Infer daily rate from annual rate
:param rate: the annual rate of return
:param trading_days_in_year: optional, trading days in year (default = 252)
:return: the daily rate
"""
return subdivide_rate(rate, trading_days_in_year) | 37,194 |
def make_hashable(data):
"""Make the given object hashable.
It makes it ready to use in a `hash()` call, making sure that
it's always the same for lists and dictionaries if they have the same items.
:param object data: the object to hash
:return: a hashable object
:rtype: object
"""
if isinstance(data, (list, tuple)):
return tuple((make_hashable(item) for item in data))
elif isinstance(data, dict):
return tuple(
(key, make_hashable(value))
for key, value in sorted(data.items())
)
else:
return data | 37,195 |
def reset_singularity_version_cache() -> None:
"""Reset the cache for testing."""
cwltool.singularity._SINGULARITY_VERSION = None
cwltool.singularity._SINGULARITY_FLAVOR = "" | 37,196 |
def prepare_model_parameters(
parameters: Dict[str, FloatOrDistVar], data: DataFrame,
beta_fun, splines, spline_power
) -> Tuple[Dict[str, FloatLike], Dict[str, NormalDistVar]]:
"""Prepares model input parameters and returns independent and dependent parameters
Also shifts back simulation to start with only exposed people.
"""
# Set up fit parameters
## Dependent parameters which will be fitted
pp = {key: val for key, val in parameters.items() if isinstance(val, GVar)}
## Independent model meta parameters
xx = {key: val for key, val in parameters.items() if key not in pp}
# This part ensures that the simulation starts with only exposed persons
## E.g., we shift the simulation backwards such that exposed people start to
## become infected
xx["offset"] = int(
expon.ppf(0.99, 1 / pp["incubation_days"].mean)
) # Enough time for 95% of exposed to become infected
# pp["logistic_x0"] += xx["offset"]
xx['beta_fun'] = beta_fun
xx['knots'] = splines
xx['spline_power'] = spline_power
## Store the actual first day and the actual last day
xx["day0"] = data.index.min()
xx["day-1"] = data.index.max()
## And start earlier in time
xx["dates"] = date_range(
xx["day0"] - timedelta(xx["offset"]), freq="D", periods=xx["offset"]
).union(data.index)
# initialize the spline parameters on the flexible beta
if xx['beta_fun'] == "flexible_beta":
pp['beta_splines'] = gvar([pp['pen_beta'].mean for i in range(len(xx['knots']))],
[pp['pen_beta'].sdev for i in range(len(xx['knots']))])
pp.pop("pen_beta")
pp.pop('logistic_k')
pp.pop('logistic_x0')
pp.pop('logistic_L')
## Thus, all compartment but exposed and susceptible are 0
for key in ["infected", "recovered", "icu", "vent", "hospital"]:
xx[f"initial_{key}"] = 0
pp["initial_exposed"] = (
xx["n_hosp"] / xx["market_share"] / pp["hospital_probability"]
)
xx["initial_susceptible"] -= pp["initial_exposed"].mean
return xx, pp | 37,197 |
def paper_features_to_author_features(
author_paper_index, paper_features):
"""Averages paper features to authors."""
assert paper_features.shape[0] == NUM_PAPERS
assert author_paper_index.shape[0] == NUM_AUTHORS
author_features = np.zeros(
[NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype)
for author_i in range(NUM_AUTHORS):
paper_indices = author_paper_index[author_i].indices
author_features[author_i] = paper_features[paper_indices].mean(
axis=0, dtype=np.float32)
if author_i % 10000 == 0:
logging.info("%d/%d", author_i, NUM_AUTHORS)
return author_features | 37,198 |
def set_chart_time_horizon(request) -> JsonResponse:
"""
Set the x-axis (time horizon) of a chart.
API Call:
/set_chart_time_horizon?
monitor_name=<monitor name>&
value=<time horizon to set>
:param request: HTTP request that expects a 'monitor_name' and 'value' argument. 'value' represents the new time horizon to be set. Valid time horizons are: 'day', 'week', 'month', 'year', or an integer representing a number of most recent hours to display.
:return: The new value after being set or the old value if it was not set.
"""
kwargs = _parse_args(request, 'monitor_name', 'value')
rv = MonitorServiceManager().set_value(kwargs.get('monitor_name'), 'charting_time_horizon', kwargs.get('value'))
return JsonResponse(rv, safe=False) | 37,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.