content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import os
import logging
def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype="float32"):
"""Dataset loader with preprocessing."""
train_dir = os.path.join(root, "train")
train_transform, val_transform = get_imagenet_transforms(data_shape, dtype)
logging.info("Loading image folder %s, this may take a bit long...", train_dir)
train_dataset = ImageFolderDataset(train_dir, transform=train_transform)
train_data = DataLoader(
train_dataset, batch_size, shuffle=True, last_batch="discard", num_workers=num_workers
)
val_dir = os.path.join(root, "val")
if not os.path.isdir(os.path.expanduser(os.path.join(root, "val", "n01440764"))):
user_warning = (
"Make sure validation images are stored in one subdir per category, a helper script is"
" available at https://git.io/vNQv1"
)
raise ValueError(user_warning)
logging.info("Loading image folder %s, this may take a bit long...", val_dir)
val_dataset = ImageFolderDataset(val_dir, transform=val_transform)
val_data = DataLoader(val_dataset, batch_size, last_batch="keep", num_workers=num_workers)
return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)
|
6f7324023d21b840f2bb7903b546de01232d0950
| 3,647,900
|
def path_content_to_string(path):
"""Convert contents of a directory recursively into a string for easier comparison."""
lines = []
prefix_len = len(path + sep)
for root, dirs, files in walk(path):
for dir_ in dirs:
full_path = join(root, dir_)
relative_path = full_path[prefix_len:]
size = 0
type_ = "dir"
hash_ = "0"
line = "{},{},{},{}".format(relative_path, type_, size, hash_)
lines.append(line)
for filename in files:
full_path = join(root, filename)
relative_path = full_path[prefix_len:]
size = getsize(full_path)
type_ = "file" if isfile(full_path) else "dir"
hash_ = get_md5(full_path)
line = "{},{},{},{}".format(relative_path, type_, size, hash_)
lines.append(line)
lines = sorted(lines)
return "\n".join(lines)
|
10d350eb866642f52e350b76f01de2b7e0ff6a5d
| 3,647,901
|
def get_evts(rslt, a_params):
"""Return start and end times of candidate replay events."""
# get PC firing rates
## PC spks
spks_pc = rslt.spks[:, :rslt.p['N_PC']]
## smoothed instantaneous firing rate avg'd over PCs
fr_pc = smooth(spks_pc.sum(axis=1) / (rslt.dt * rslt.p['N_PC']), a_params['SMOOTH_FR'])
# get start and end time idxs when PC FR is above threshold
starts, ends = get_segments(fr_pc >= a_params['EVT_DTCN_TH'])
# convert to time
starts = starts.astype(float) * rslt.dt
ends = ends.astype(float) * rslt.dt
# remove too-short gaps btwn events
if len(starts) > 0:
starts, ends = remove_short_gaps(starts, ends, a_params['MIN_GAP_DUR'])
# remove too-short events
if len(starts) > 0:
starts, ends = remove_short_evts(starts, ends, a_params['MIN_EVT_DUR'])
# remove all events that start before min start time
if len(starts):
mask = starts > a_params['MIN_START']
starts = starts[mask]
ends = ends[mask]
# remove final event if it hits end of smln
if len(ends) and ends[-1] >= rslt.ts[-1]:
starts = starts[:-1]
ends = ends[:-1]
return starts, ends
|
c8c6867588d72f97dd687dbe17b7494bc534fa1e
| 3,647,902
|
from typing import List
from typing import Dict
def get_threatfeed_command(client: Client, threatfeed_id: int = None):
"""
Retrieves the current list of threatFeed objects already configured in the system
:param threatfeed_id: The id of the ThreatFeed object.
:param client: Vectra Client
"""
raw_response = client.http_request(url_suffix=f'threatFeeds/{threatfeed_id}' if threatfeed_id else 'threatFeeds')
count = demisto.get(raw_response, 'meta.count')
if count == 0:
return "Couldn't find any results", {}, raw_response
res = raw_response.get('threatFeeds') # type: ignore
feeds: List[Dict] = [res] if not isinstance(res, List) else sorted(res, key=lambda h: h.get('id')) # type: ignore
for feed in feeds:
feed.update(feed.get('defaults')) # type: ignore
headers = ['id', 'name', 'certainty', 'category', 'duration', 'indicatorType']
readable_output = tableToMarkdown(name='Rules table', t=feeds, headers=headers)
context = []
for feed in feeds:
context.append(createContext(
{
'ID': feed.get('id'),
'Name': feed.get('name'),
'Duration': feed.get('duration'),
'Category': feed.get('category'),
'Certainty': feed.get('certainty'),
'Data': feed.get('data'),
'IndicatorType': feed.get('indicatorType'),
}, removeNull=True)
)
outputs = {'Vectra.ThreatFeed(val.ID==obj.ID)': context}
return readable_output, outputs, raw_response
|
7f0b37a724720aea73170d3575ed5b08dec7ea85
| 3,647,903
|
def email_subscribe_pending_confirm(hexdomain):
"""Send a confirmation email for a user."""
domain = tools.parse_domain(hexdomain)
if domain is None:
flask.abort(400, 'Malformed domain or domain not represented in hexadecimal format.')
hide_noisy = bool(flask.request.form.get('hide_noisy'))
email_address = flask.request.form['email_address']
if email_address.strip() == '':
return flask.redirect('/email/subscribe/{}/0?hide_noisy={}'.format(
hexdomain,
hide_noisy
))
verify_code = tools.random_id()
verify_url = flask.request.url_root + 'email/verify/{}'.format(verify_code)
email_body = email_tools.render_email(
'confirm.html',
domain=domain,
verify_url=verify_url
)
repository.propose_subscription(
verify_code,
email_address,
domain,
hide_noisy
)
emailer.send(
email_address, 'Please verify your subscription', email_body
)
return flask.render_template('www/email/pending_verify.html', domain=domain)
|
9932554a3349e3cf1ecd958d15dd762f787f61c7
| 3,647,904
|
def getTrackIds(sp, username, playlist, offset=0):
"""
Returns the ids of the tracks contained in a playlist
:param sp:
A spotipy.Spotify object to be used for the request.
:param username:
The username of the user who's playlists you want the retrieve.
:param playlist:
Name of the playlist from wich the tracks are retrieved.
:param offset:
Do not worry about this parameter, it is used for recursion.
:returns:
A list containing all the ids of the tracks that are in the playlist.
"""
limit = 100
fields = "items(track(id)), total"
api_response = sp.user_playlist_tracks(username,
playlist["id"], fields, limit=limit, offset=offset)
track_ids = [x["track"]["id"] for x in api_response["items"]]
if api_response["total"] > limit + offset:
next_page = getTrackIds(sp, username, playlist, offset + limit)
for item in next_page:
track_ids.append(item)
return track_ids
|
5b4e621022f49137b7fd4547bf5ab4efe92b4515
| 3,647,905
|
def children_of_head(element: Element):
"""
get children element of body element
:param element:
:return:
"""
if element is None:
return []
body_xpath = '//head'
body_element = element.xpath(body_xpath)
if body_element:
body_element.__class__ = Element
return descendants(body_element, True)
return []
|
90b47d1c0c3f04231ea5dade3f7e9288339eef71
| 3,647,906
|
def network(name, nodes):
"""nodes: [ NodeMeta, ... ]"""
return NetworkMeta(name=name, nodes=nodes)
|
5c0394ae2a31b83ac6889a4b973f51c1cdb1a0d9
| 3,647,907
|
def condensed_to_cosine(condensed_format):
"""Get mhd direction cosine for this condensed format axis"""
axis = Axis.from_condensed_format(condensed_format)
return permutation_to_cosine(axis.dim_order, axis.dim_flip)
|
25b3f0d63a84fa687bf4b238b53288fb8f64918b
| 3,647,908
|
def get_plants_for_species(item):
"""Get list of plants for a species."""
if item is None or not item or item['name'] is None:
return
@cached('species_list_{}.json'.format(item['name']),
directory='../../data/wikipedia')
def get():
def table(dom):
# We need to switch to table format - the wikipedia articles
# are inconsistent.
rows = dom.find('.mw-parser-output .wikitable tr')
if not rows:
return
headings = [h.text.strip() for h in rows[0]]
for row in rows[1:]:
row_data = {}
tds = row.findall('td')
if tds is None:
continue
for i, td in enumerate(tds):
try:
row_data[headings[i]] = td.text or None
except IndexError:
continue
data.append(row_data)
data = []
url = 'https://en.wikipedia.org{}'.format(item['link'])
_, dom = get_dom(url)
# Try to be specific, but broaden scope if none found.
if 'bamboo' in item['name']:
table(dom)
else:
links = dom.find('.mw-parser-output ul li a')
if not links:
links = dom.find('.mw-parser-output ol li a')
if not links:
links = dom.find('.mw-parser-output li a')
if links:
for link in links:
if link.text is None:
continue
# Reference links embedded within the lists.
if any([
# External link is invalid
link.get('href', '').startswith('http'),
# Anchors, invalid link
link.get('href', '').startswith('#'),
# Not real links/text
link.text.startswith('['),
link.text == '^',
link.text.startswith('\\'),
]):
continue
data.append(dict(name=link.text, link=link.get('href')))
else:
table(dom)
return data
return get()
|
ed9522fd97ac101a6040f0485d06b1b88a834060
| 3,647,909
|
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Given a password, hash, salt this function verifies the password is equal to hash/salt.
Args:
- ``password``: The password to perform check on.
Returns:
- ``bool``
"""
candidate_hash = generate_password_hash(password, salt, N, r, p, buflen)
return safe_str_cmp(password_hash, candidate_hash)
|
1e3a75235b11c45746cabf03dcf05b88e610c02f
| 3,647,910
|
def _dB_calc(J_field, x, y, z):
""" Calcualtes the magnetic field at a point due to a current.
Args:
J_field (VectorField): Vector field describing the current
that the magnetic field is generated from.
x: The x coordinate of the point in the magnetic field.
y: The y coordinate of the point in the magnetic field.
z: The z coordinate of the point in the magnetic field.
Returns:
tuple (u,v,w): A tuple with the magnitude of the magnetic field
at the point (x,y,z).
"""
B = (0, 0, 0)
for coordinates, mag in J_field.vec_field.items():
biot_savart_constant = 10 ** (-7)
distance = (x - coordinates[0], y - coordinates[1],
z - coordinates[2])
distanceMag = linalg.norm(distance)
distanceUnit = (distance[0] / distanceMag,
distance[1] / distanceMag,
distance[2] / distanceMag)
crossProduct = np.cross(coordinates, distanceUnit)
dB = (biot_savart_constant*crossProduct) / (distanceMag**2)
B = np.add(B, dB)
return B
|
0a66c59b4ece95c4f683a842b63e80d2a13a697a
| 3,647,911
|
import math
def create_mpl_subplot(images, color=True):
"""create mpl subplot with all images in list.
even when the color is set to false it still seems to
:param images: the list of images to plot
:type images: cv2 image
:param color: whether to plot in color or grayscale, defaults to True
:type color: boolean
:return: the complete plot
:rtype: mpl plot
"""
if not color:
plt.set_cmap('gray')
n = math.ceil(math.sqrt(len(images)))
i = 1
for img in images:
plt.subplot(n, n, i)
plt.imshow(img)
plt.xticks([]), plt.yticks([])
i += 1
return plt
|
259c352438c19c4639d78ef67aeb9af0271d6ade
| 3,647,912
|
def filter_subclasses(superclass, iter):
"""Returns an iterable of class obects which are subclasses of `superclass` filtered from a source iteration.
:param superclass: The superclass to filter against
:return: An iterable of classes which are subclasses of `superclass`
"""
return filter(lambda klass: issubclass(klass, superclass), iter)
|
2a891835379dfa3661d781d0c1860b650df013f0
| 3,647,913
|
def get_database_table_column_name(_conn: psycopg2.extensions.connection,
_table: str) -> list:
"""
Taken from:
https://kb.objectrocket.com/postgresql/get-the-column-names-from-a-postgresql-table-with-the-psycopg2-python-adapter-756 # noqa
defines a function that gets the column names from a PostgreSQL table.
"""
# declare an empty list for the column names
columns = []
# declare cursor objects from the connection
col_cursor = _conn.cursor()
# concatenate string for query to get column names
# SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'some_table'; # noqa
col_names_str = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE "
col_names_str += "table_name = '{}';".format(_table)
# print the SQL string
# print("\ncol_names_str:", col_names_str)
try:
sql_object = sql.SQL(
# pass SQL statement to sql.SQL() method
col_names_str
).format(
# pass the identifier to the Identifier() method
sql.Identifier(_table)
)
# execute the SQL string to get list with col names in a tuple
col_cursor.execute(sql_object)
# get the tuple element from the liast
col_names = (col_cursor.fetchall())
# print list of tuples with column names
# print("\ncol_names:", col_names)
# iterate list of tuples and grab first element
for tup in col_names:
# append the col name string to the list
columns += [tup[0]]
# close the cursor object to prevent memory leaks
col_cursor.close()
except Exception as err:
print("get_columns_names ERROR:", err)
# return the list of column names
return columns
|
9486e75792e2b7a63db589727a621fd648213487
| 3,647,914
|
import asyncio
def retry(*exceptions, retries=3, cooldown=5, verbose=True):
"""
Decorate an async function to execute it a few times before giving up.
Hopes that problem is resolved by another side shortly.
Args:
exceptions (Tuple[Exception]) : The exceptions expected during function execution
retries (int): Number of retries of function execution.
cooldown (int): Seconds to wait before retry.
verbose (bool): Specifies if we should log about not successful attempts.
"""
def wrap(func):
@wraps(func)
async def inner(*args, **kwargs):
retries_count = 0
while True:
try:
result = await func(*args, **kwargs)
except exceptions as err:
retries_count += 1
if retries_count > retries:
raise ValueError(
func.__qualname__, args, kwargs) from err
if cooldown:
await asyncio.sleep(cooldown)
else:
return result
return inner
return wrap
|
fea7b786815e2aabedf37e8011485eda3c989fe7
| 3,647,915
|
def process_student(filename_or_URL):
"""calls mark_student on one student HTML file
Creates a BeautifulSoup object and calls mark_student.
If the filename_or_URL starts with "https://", attempt to get Firefox cookies
before reading from the URL.
Parameters:
----------
filename_or_URL: either a local filename, or a URL
Returns:
--------
return-value of mark_student
"""
if filename_or_URL[0:8] == "https://":
cookiejar=get_cookie_jar()
soup=soup_from_URL(filename_or_URL, cookiejar)
else:
soup=soup_from_file(filename_or_URL)
#for q in list_questions(soup):
# print(q, "mark=",mark_question(q))
return mark_student(soup)
|
185d396f4005954fcc26b0c8ab3c9711b511c611
| 3,647,916
|
def find_CH2OH_in_chain(atoms, cycles):
""" this function finds terminal CH2OH that C is not in a cycle
H
'
O(6)
' H
' /
R---C(5)---H
"""
end_carbon_indices = []
end_carbon_indices_atom_list = {}
for _ in range(len(atoms)):
name = atoms[_].get_atom_name()
if name != 'C' or is_in_a_cycle(cycles, _):
continue
nghs_c5 = atoms[_].get_ngh()
nums_c5, nghs_list_c5 = parse_atom_nghs(nghs_c5, ['H', 'C', 'O'])
if nums_c5['H'] == 2 and nums_c5['O'] == 1:
o6_index = nghs_list_c5['O'][0]
nghs_o6 = atoms[o6_index].get_ngh()
nums_o6, nghs_list_o6 = parse_atom_nghs(nghs_o6, ['H', 'C', 'O'])
if len(nghs_o6) == 2 and nums_o6['H'] == 1 and nums_o6['C'] == 1:
end_carbon_indices.append(_)
end_carbon_indices_atom_list[_] = []
for __ in nghs_c5:
___ = __[0]
if ___ not in end_carbon_indices_atom_list[_]:
end_carbon_indices_atom_list[_].append(___)
for __ in nghs_o6:
___ = __[0]
if ___ not in end_carbon_indices_atom_list[_]:
end_carbon_indices_atom_list[_].append(___)
return end_carbon_indices, end_carbon_indices_atom_list
|
55dbb4767c905fd90f5085b4fcea08e80bf43902
| 3,647,917
|
import os
def get_gradient_descent_query(COO=True, parameter=None):
"""
Generates the query for solving the logistic regression problem
:param COO: boolean indicating if the data are in the C00 format
:param parameter: dictionary containing number of iterations, features, regularization parameter and step width
:return:
"""
iterations = parameter.get('iterations', 10)
features = parameter.get('features', 10)
regularization = parameter.get('regularization', 2)
step_width = parameter.get('step_width', 0.001)
if COO:
with open(os.path.join('queries', f'demo_gradient_descent_COO.sql')) as f:
query = f.read().format(
iterations=iterations,
regularization=regularization,
step_width=step_width
)
else:
# create format strings
n_features = features
weights = ",".join([f"w{i + 1}" for i in range(n_features)])
features = ",".join([f"f{i + 1}" for i in range(n_features)])
floats = ",".join(["0.0::float"] * n_features)
features_times_weight = "+".join([f"f{i + 1}*w{i + 1}" for i in range(n_features)])
temp_with_intercept = ",".join([f"t{i + 1}" for i in range(n_features + 1)])
# may have to change the regularization parameter
sum_feature_times_val = ",".join([f"{regularization}*SUM(f{i + 1}*val)" for i in range(n_features)])
g_with_intercept = ",".join([f"g{i + 1}" for i in range(n_features + 1)])
# may have to change the step size
weight_minus_temp_with_intercept = ",".join(
[f"w{i + 1}-t{i + 1}" for i in range(n_features)]) + f",intercept-t{n_features + 1}"
weight_times_reg_with_intercept = ",".join([f"w{i + 1}-{step_width}*g{i + 1}" for i in
range(n_features)]) + f",intercept-{step_width}*g{n_features + 1}"
weight_comma_text = "||".join([f"w{i + 1}::text||','" for i in range(n_features)])
# load the file and replace everything specific for the model
with open(os.path.join('queries', f'demo_gradient_descent_db-friendly.sql')) as f:
query = f.read().format(
iterations=iterations,
weights=weights,
features=features,
features_times_weight=features_times_weight,
temp_with_intercept=temp_with_intercept,
floats=floats,
sum_feature_times_val=sum_feature_times_val,
g_with_intercept=g_with_intercept,
weight_minus_temp_with_intercept=weight_minus_temp_with_intercept,
weight_times_reg_with_intercept=weight_times_reg_with_intercept,
weight_comma_text=weight_comma_text,
step_width=step_width,
regularization=regularization
)
return query
|
5326100e05d5e8cc83359af592dc0f8c36c1bbb5
| 3,647,918
|
def keep_point(p, frame):
"""
p: TrackedPoint instance
frame: image (numpy array)
"""
if not p.in_bounds():
return False
if p.coasted_too_long():
return False
if p.coasted_too_far():
return False
return True
|
7f51b9f15ac8befe07b463875b9245194aebbef0
| 3,647,919
|
from typing import Dict
from typing import List
def sqrt(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]) -> List[XLayer]:
"""ONNX Sqrt to XLayer Sqrt conversion function"""
logger.info("ONNX Sqrt -> XLayer Sqrt")
assert len(node.get_outputs()) == 1
name = node.get_outputs()[0]
bottoms = node.get_inputs()
iX = xmap[bottoms[0]] # NCHW
X = px.ops.sqrt(
op_name=px.stringify(name),
in_xlayers=[iX],
onnx_id=name)
return [X]
|
711dfe71eaf337c75acd07bf3f09ca8a7c090fa4
| 3,647,920
|
def get_testcase_desc(suite, testcase_name):
"""
Return the description of the testcase with the given name of the
given testsuite.
Remove trailing line returns if applicable, they look nasty
in the reports (text and otherwise)
"""
desc = getattr(suite, testcase_name).__doc__
return strings.format_description(desc.rstrip()) if desc else ""
|
1a97e02047d42f76328cc55debe8006bcfb80a43
| 3,647,921
|
def slave_freq_one_pc(args):
"""Wrapper to be able to use Pool"""
return args, freq_one_pc(*args)
|
0627685181cbec45564066ea9e29601fc3717257
| 3,647,922
|
def base10_to_base26_alph(base10_no):
"""Convert base-10 integer to base-26 alphabetic system.
This function provides a utility to write pdb/psf files such that it can
add many more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-26 alphabetic system
Returns
-------
str
The converted base-26 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return _to_base(base10_no, base=26)
|
67aed6602c6813702416310518c892f02fdb58ef
| 3,647,923
|
import pickle
def train(model, X, y, name: str):
"""
train a model on the given training set and optionally save it to disk
:param model: the model to train
:param X: the sample images, list of numpy arrays (greyscale images)
:param y: the target labels, list of strings (kanji)
:param name: name of the model used to save it on disk, or None if it is not to be saved
:return: the trained model
"""
# reshape X to 2d
X = np.asarray(X)
X = X.reshape((X.shape[0], -1))
print("fitting on {} samples".format(len(y)))
# train the model
print("begin fitting")
model.fit(X, y)
print("done fitting")
# optionally save trained model
if name is not None:
with open("trained_{}.pkl".format(name), 'wb') as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
return model
|
9b5e4e03b25d7692a233370dd2db1fd2435365e0
| 3,647,924
|
import sys
def create_heterodyne_parser():
"""
Create the argument parser.
"""
description = """\
A script to heterodyne raw gravitational-wave strain data based on the \
expected evolution of the gravitational-wave signal from a set of pulsars."""
parser = BilbyArgParser(
prog=sys.argv[0],
description=description,
ignore_unknown_config_file_keys=False,
allow_abbrev=False,
)
parser.add("--config", type=str, is_config_file=True, help="Configuration ini file")
parser.add(
"--version",
action="version",
version="%(prog)s {version}".format(version=cwinpy.__version__),
)
parser.add(
"--periodic-restart-time",
default=14400,
type=int,
help=(
"Time after which the job will be self-evicted with code 130. "
"After this, condor will restart the job. Default is 14400s. "
"This is used to decrease the chance of HTCondor hard evictions."
),
)
parser.add(
"--overwrite",
action="store_true",
default=False,
help=(
"Set this flag to make sure any previously generated heterodyned "
'files are overwritten. By default the analysis will "resume" '
"from where it left off (by checking whether output files, as set "
'using "--output" and "--label" arguments, already exist), such '
"as after forced Condor eviction for checkpointing purposes. "
"Therefore, this flag is needs to be explicitly given (the "
"default is False) if not wanting to use resume and overwrite "
"existing files."
),
)
dataparser = parser.add_argument_group("Data inputs")
dataparser.add(
"--starttime",
required=True,
type=int,
help=("The start time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--endtime",
required=True,
type=int,
help=("The end time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--stride",
default=3600,
type=int,
help=(
"The number of seconds to stride through the data (i.e., this "
"number of seconds of data will be read in in one go), Defaults "
"to 3600."
),
)
dataparser.add(
"--detector",
required=True,
type=str,
help=("The name of the detectors for which the data is to be heterodyned."),
)
dataparser.add(
"--frametype",
type=str,
help=(
'The "frame type" name of the data to be heterodyned. If this '
"is not given the correct data set will be attempted to be found "
"using the channel name."
),
)
dataparser.add(
"--channel",
required=True,
type=str,
help=(
'The "channel" within the gravitational-wave data file(s) '
'(either a GW frame ".gwf", or HDF5 file) containing the strain '
"data to be heterodyned. The channel name should contain the "
"detector name prefix as the first two characters followed by a "
'colon, e.g., "L1:GWOSC-4KHZ_R1_STRAIN"'
),
)
dataparser.add(
"--host",
type=str,
help=(
"The server name for finding the gravitational-wave data files. "
'Use "datafind.ligo.org:443" for open data available via CVMFS. '
"To use open data available from the GWOSC use "
'"https://www.gw-openscience.org".'
),
)
dataparser.add(
"--outputframecache",
type=str,
help=(
"If given this should give a file path to which a list of "
"gravitational-wave data file paths, as found by the code, will "
"be written. If not given then the file list will not be output."
),
)
dataparser.add(
"--appendframecache",
action="store_true",
default=False,
help=(
"If writing out the frame cache to a file, set this to True to "
"append to the file rather than overwriting. Default is False."
),
)
dataparser.add(
"--framecache",
help=(
"Provide a pregenerated cache of gravitational-wave files, either "
"as a single file, or a list of files. Alternatively, you can "
"supply a directory containing the files (which will be "
"searched recursively for gwf and then hdf5 files), which should "
'be used in conjunction with the "frametype" argument. If giving '
"a list, this should be in the form of a Python list, surrounded "
"by quotation marks, e.g., \"['file1.lcf','file2.lcf']\"."
),
)
dataparser.add(
"--heterodyneddata",
help=(
"A string, or dictionary of strings, containing the full file "
"path, or directory path, pointing the the location of "
"pre-heterodyned data. For a single pulsar a file path can be "
"given. For multiple pulsars a directory containing heterodyned "
"files (in HDF5 or txt format) can be given provided that within "
"it the file names contain the pulsar names as supplied in the "
'file input with "--pulsarfiles". Alternatively, a dictionary '
"can be supplied, keyed on the pulsar name, containing a single "
"file path or a directory path as above. If supplying a "
"directory, it can contain multiple heterodyned files for a each "
"pulsar and all will be used. If giving a dictionary it should be "
"surrounded by quotation marks."
),
)
segmentparser = parser.add_argument_group("Analysis segment inputs")
segmentparser.add(
"--segmentlist",
help=(
"Provide a list of data segment start and end times, as "
"list/tuple pairs in the list, or an ASCII text file containing "
"the segment start and end times in two columns. If a list, this "
"should be in the form of a Python list, surrounded by quotation "
'marks, e.g., "[(900000000,900086400),(900100000,900186400)]".'
),
)
segmentparser.add(
"--includeflags",
help=(
"If not providing a segment list then give a string, or list of "
"strings, giving the data DQ flags that will be used to generate "
"a segment list. Lists should be surrounded by quotation marks, "
"e.g., \"['L1:DMT-ANALYSIS_READY:1']\"."
),
)
segmentparser.add(
"--excludeflags",
help=(
"A string, or list of strings, giving the data DQ flags to "
"when generating a segment list. Lists should be surrounded by "
"quotation marks."
),
)
segmentparser.add(
"--outputsegmentlist",
type=str,
help=(
"If generating a segment list it will be output to the file "
"specified by this argument."
),
)
segmentparser.add(
"--appendsegmentlist",
action="store_true",
default=False,
help=(
"If generating a segment list set this to True to append to the "
'file specified by "--outputsegmentlist" rather than '
"overwriting. Default is False."
),
)
segmentparser.add("--segmentserver", type=str, help=("The segment database URL."))
pulsarparser = parser.add_argument_group("Pulsar inputs")
pulsarparser.add(
"--pulsarfiles",
action="append",
help=(
"This specifies the pulsars for which to heterodyne the data. It "
"can be either i) a string giving the path to an individual "
"pulsar Tempo(2)-style parameter file, ii) a string giving the "
"path to a directory containing multiple Tempo(2)-style parameter "
"files (the path will be recursively searched for any file with "
'the extension ".par"), iii) a list of paths to individual '
"pulsar parameter files, iv) a dictionary containing paths to "
"individual pulsars parameter files keyed to their names. If "
"instead, pulsar names are given rather than parameter files it "
"will attempt to extract an ephemeris for those pulsars from the "
"ATNF pulsar catalogue. If such ephemerides are available then "
"they will be used (notification will be given when this is "
"these cases). If providing a list or dictionary it should be "
"surrounded by quotation marks."
),
)
pulsarparser.add(
"--pulsars",
action="append",
help=(
"You can analyse only particular pulsars from those specified by "
'parameter files found through the "--pulsarfiles" argument by '
"passing a string, or list of strings, with particular pulsars "
"names to use."
),
)
outputparser = parser.add_argument_group("Data output inputs")
outputparser.add(
"--output",
help=(
"The base directory into which the heterodyned results will be "
"output. To specify explicit directory paths for individual "
"pulsars this can be a dictionary of directory paths keyed to the "
'pulsar name (in which case the "--label" argument will be used '
"to set the file name), or full file paths, which will be used in "
'place of the "--label" argument. If not given then the current'
"working directory will be used."
),
)
outputparser.add(
"--label",
help=(
"The output format for the heterodyned data files. These can be "
'format strings containing the keywords "psr" for the pulsar '
'name, "det" for the detector, "freqfactor" for the rotation '
'frequency scale factor used, "gpsstart" for the GPS start '
'time, and "gpsend" for the GPS end time. The extension should '
'be given as ".hdf", ".h5", or ".hdf5". E.g., the default '
'is "heterodyne_{psr}_{det}_{freqfactor}_{gpsstart}-{gpsend}.hdf".'
),
)
heterodyneparser = parser.add_argument_group("Heterodyne inputs")
heterodyneparser.add(
"--filterknee",
type=float,
help=(
"The knee frequency (Hz) of the low-pass filter applied after "
"heterodyning the data. This should only be given when "
"heterodying raw strain data and not if re-heterodyning processed "
"data. Default is 0.5 Hz."
),
)
heterodyneparser.add(
"--resamplerate",
type=float,
required=True,
help=(
"The rate in Hz at which to resample the data (via averaging) "
"after application of the heterodyne (and filter if applied)."
),
)
heterodyneparser.add(
"--freqfactor",
type=float,
help=(
"The factor applied to the pulsars rotational parameters when "
"defining the gravitational-wave phase evolution. For example, "
"the default value of 2 multiplies the phase evolution by 2 under "
"the assumption of a signal emitted from the l=m=2 quadrupole "
"mode of a rigidly rotating triaxial neutron star."
),
)
heterodyneparser.add(
"--crop",
type=int,
help=(
"The number of seconds to crop from the start and end of data "
"segments to remove filter impulse effects and issues prior to "
"lock-loss. Default is 60 seconds."
),
)
heterodyneparser.add(
"--includessb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal due to "
"Solar System motion and relativistic effects (e.g., Roemer, "
"Einstein, and Shapiro delay) during the heterodyne."
),
)
heterodyneparser.add(
"--includebsb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal "
"due to binary system motion and relativistic effects during the "
'heterodyne. To use this "--includessb" must also be set.'
),
)
heterodyneparser.add(
"--includeglitch",
action="store_true",
default=False,
help=(
"Set this flag to include removing the effects of the phase "
"evolution of any modelled pulsar glitches during the heterodyne."
),
)
heterodyneparser.add(
"--includefitwaves",
action="store_true",
default=False,
help=(
"Set this to True to include removing the phase evolution of a "
"series of sinusoids designed to model low-frequency timing noise "
"in the pulsar signal during the heterodyne."
),
)
heterodyneparser.add(
"--usetempo2",
action="store_true",
default=False,
help=(
"Set this to True to use Tempo2 (via libstempo) to calculate the "
"signal phase evolution. For this to be used v2.4.2 or greater of "
"libstempo must be installed. When using Tempo2 the "
'"--earthephemeris", "--sunephemeris" and "--timeephemeris" '
"arguments do not need to be supplied. This can only be used when "
"running the full heterodyne in one stage, but not for "
're-heterodyning previous data, as such all the "--include..." '
"arguments will be assumed to be True."
),
)
ephemerisparser = parser.add_argument_group("Solar system ephemeris inputs")
ephemerisparser.add(
"--earthephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Earth. The dictionary must be supplied within quotation marks, "
"e.g., \"{'DE436':'earth_DE436.txt'}\". If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
ephemerisparser.add(
"--sunephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Sun. If a pulsar requires a specific ephemeris that is not "
"provided in this dictionary, then the code will automatically "
"attempt to find or download the required file if available."
),
)
ephemerisparser.add(
"--timeephemeris",
help=(
"A dictionary, keyed to time system name, which can be either "
'"TCB" or "TDB", pointing to the location of a file containing '
"that ephemeris for that time system. If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
cfparser = parser.add_argument_group("Configuration inputs")
cfparser.add(
"--cwinpy-heterodyne-dag-config-file",
help=(
"A path to the cwinpy_heterodyne_dag configuration file can be "
"supplied if this was has been used to setup the heterodyne job."
),
)
return parser
|
8928c52126a157338a9fa0d67f12f20d0facf05f
| 3,647,925
|
def run_phage_boost(genecalls, model_file, verbose):
"""
Run phage boost
:param model_file: The model file that is probably something like model_delta_std_hacked.pickled.silent.gz
:param genecalls: The pandas data frame of gene calls
:param verbose: more output
:return:
"""
# rolling params
period = 20
win_type = 'parzen'
min_periods = 1
# region finding params
threshold = 0.9
length = 10
gaps = 5
neighbouring = 0
alpha = 0.001
# calculate features from gene calls
if verbose:
message("Calculating features", "GREEN")
df = calculate_features(genecalls)
# load model
model, feats, feats_, limit = read_model_from_file(model_file)
# transform data
df = get_predictions.get_deltas(df[feats_])
if verbose:
message("Transforming gene predictions to regions", "GREEN")
# transform single gene predictions to regions
newgenecalls, nphages, res = predict(model, genecalls, df,
feats, period, win_type,
min_periods, limit, threshold,
length, gaps, neighbouring, alpha)
return res
|
28592977483d092cf67e7eb7bbd98b911044084b
| 3,647,926
|
from datetime import datetime
def get_wishlist_confirmation_time():
"""Return whether user can confirm his wishlist or not
No request params.
"""
try:
confirmation_time = g.user.get_wishlist_confirmation_time()
can_confirm = datetime.now() - confirmation_time > timedelta( days = 1 ) if confirmation_time is not None else True
return data_response( { 'can_confirm' : can_confirm } )
except AuthorizationError:
return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja: Nedozvoljena mogućnost.', 403 )
# except:
# return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja.' )
|
89c2fbe9a3801805194dbf41274ba348a87954b1
| 3,647,927
|
def get_bprop_npu_clear_float_status(self):
"""Grad definition for `NPUClearFloatStatus` operation."""
def bprop(x, out, dout):
return (zeros_like(x),)
return bprop
|
8e0733a9d6294e507bb99f3536cf1898137a0f3b
| 3,647,928
|
import pathlib
def path_to_filename(path, with_suffix=True):
"""Get filename from path.
Parameters
==========
path : str
Path to retrieve file name from e.g. '/path/to/image.png'.
with_suffix : bool
Whether to include the suffix of file path in file name.
Returns
=======
str
The file name of the path e.g. 'image.png'
or 'image' if `with_suffix` is false.
"""
p = pathlib.Path(path)
if with_suffix:
return str(p.name)
else:
return str(p.with_suffix("").name)
|
45ecfb6e263e65de7165a69eda99bc8de2a157f4
| 3,647,929
|
def encode3(Married):
"""
This function encodes a loan status to either 1 or 0.
"""
if Married == 'Yes':
return 1
else:
return 0
|
3be5ca3b773e5ded6fe8ec834bc0d99af68bf9e6
| 3,647,930
|
def pool_init_price(token0, token1, tick_upper, tick_lower, liquidity_delta,
token0_decimals, token1_decimals):
"""
TODO: finish documentation
:param token0:
:param token1:
:param tick_upper:
:param tick_lower:
:param liquidity_delta: Can get from etherscan.io using the txn hash
(check the logs).
:param token0_decimals:
:param token1_decimals:
:return:
"""
if (token0 == 0) or (token1 == 0):
raise ValueError('Tick range does not span the initial price.')
sqrt_price_lower = tick_to_sqrt_price(tick_lower)
sqrt_price_upper = tick_to_sqrt_price(tick_upper)
# adjust tokens if different decimal conventions are used
token0_multiplier = 10.0 ** max(token1_decimals - token0_decimals, 0)
token1_multiplier = 10.0 ** max(token0_decimals - token1_decimals, 0)
token0 = token0 / token0_multiplier
token1 = token1 / token1_multiplier
# formula 6.29
sqrt_price = token1 / liquidity_delta + sqrt_price_lower
# formula 6.30
calc_token0 = liquidity_delta * (1 / sqrt_price - 1 / sqrt_price_upper)
# verify that the calculated price satisfies formula 6.30
assert np.isclose(token0, calc_token0, atol=1e-12, rtol=1e-8), (
f'Calculated token0 {calc_token0:,.4f} does not match input '
f'token0 {token0:,.4f}.'
)
return sqrt_price ** 2
|
ababdf4d569a8856a196dfd0a3fa83fbd3ab8e52
| 3,647,931
|
def rle_encoding(img, mask_val=1):
"""
Turns our masks into RLE encoding to easily store them
and feed them into models later on
https://en.wikipedia.org/wiki/Run-length_encoding
Args:
img (np.array): Segmentation array
mask_val (int): Which value to use to create the RLE
Returns:
RLE string
"""
dots = np.where(img.T.flatten() == mask_val)[0]
run_lengths = []
prev = -2
for b in dots:
if (b > prev + 1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return ' '.join([str(x) for x in run_lengths])
|
8639094ea57138212a73b179eed593e248363314
| 3,647,932
|
import asyncio
def alt(*ops, priority=False, default=_Undefined):
"""
alt(*ops, priority=False, default=Undefined)
Returns an awaitable representing the first and only channel operation to finish.
Accepts a variable number of operations that either get from or put to a
channel and commits only one of them. If no `default` is provided, then
only the first op to finish will be committed. If `default` is provided and
none of the `ops` finish immediately, then no operation will be committed
and `default` will instead be used to complete the returned awaitable.
Args:
ops: Operations that either get from or put to a channel.
A get operation is represented as simply a channel to get from.
A put operation is represented as an iterable of the form
``[channel, val]``, where `val` is an item to put onto `channel`.
priority: An optional bool. If True, operations will be tried in order.
If False, operations will be tried in random order.
default: An optional value to use in case no operation finishes
immediately.
Returns:
An awaitable that evaluates to a tuple of the form ``(val, ch)``.
If `default` is not provided, then `val` will be what the first
successful operation returned and `ch` will be the channel used in that
operation. If `default` is provided and none of the operations complete
immediately, then the awaitable will evaluate to
``(default, 'default')``.
Raises:
ValueError: If `ops` is empty or contains both a get and put operation
to the same channel.
RuntimeError: If the calling thread has no running event loop.
See Also:
:func:`b_alt`
"""
flag = create_flag()
future = FlagFuture(flag)
ret = _alts(flag, future_deliver_fn(future), ops, priority, default)
if ret is not None:
asyncio.Future.set_result(future, ret)
return future
|
e26660938b760e9f3e2b43375c26ee1a2e946056
| 3,647,933
|
def make_class_dictable(
cls,
exclude=constants.default_exclude,
exclude_underscore=constants.default_exclude_underscore,
fromdict_allow_pk=constants.default_fromdict_allow_pk,
include=None,
asdict_include=None,
fromdict_include=None,
):
"""Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class
"""
setattr(cls, "dictalchemy_exclude", exclude)
setattr(cls, "dictalchemy_exclude_underscore", exclude_underscore)
setattr(cls, "dictalchemy_fromdict_allow_pk", fromdict_allow_pk)
setattr(cls, "asdict", asdict)
setattr(cls, "fromdict", fromdict)
setattr(cls, "__iter__", iter)
setattr(cls, "dictalchemy_include", include)
setattr(cls, "dictalchemy_asdict_include", asdict_include)
setattr(cls, "dictalchemy_fromdict_include", fromdict_include)
return cls
|
87a0ed0b0baa1449396921c3651c9d2ef4549f35
| 3,647,934
|
def async_request_config(
hass,
name,
callback=None,
description=None,
description_image=None,
submit_caption=None,
fields=None,
link_name=None,
link_url=None,
entity_picture=None,
):
"""Create a new request for configuration.
Will return an ID to be used for sequent calls.
"""
if link_name is not None and link_url is not None:
description += f"\n\n[{link_name}]({link_url})"
if description_image is not None:
description += f"\n\n"
if (instance := hass.data.get(_KEY_INSTANCE)) is None:
instance = hass.data[_KEY_INSTANCE] = Configurator(hass)
request_id = instance.async_request_config(
name, callback, description, submit_caption, fields, entity_picture
)
if DATA_REQUESTS not in hass.data:
hass.data[DATA_REQUESTS] = {}
hass.data[DATA_REQUESTS][request_id] = instance
return request_id
|
f3c8ee70b3b51debeb404660a35491b07c78170e
| 3,647,935
|
def get_blueprint_docs(blueprints, blueprint):
"""Returns doc string for blueprint."""
doc_string = blueprints[blueprint].__doc__
return doc_string
|
8a334a9ddd1ff5fe844821152f4312b2db0e9da5
| 3,647,936
|
from typing import Counter
def getColorPalatte(image, num, show_chart=False):
"""
Returns the most prevelent colors of an image
arguments:
image - image to sample colors from
num - number of colors to sample
show_chart - show a visual representation of the colors selected
"""
modified_image = np.array(image)
modified_image = cv2.resize(
modified_image, (600, 400), interpolation=cv2.INTER_AREA
)
modified_image = modified_image.reshape(-1, 3)
clf = KMeans(n_clusters=num)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
# sort to ensure correct color percentage
counts = dict(sorted(counts.items()))
center_colors = clf.cluster_centers_
center_colors = np.rint(center_colors)
center_colors = center_colors.astype(int)
center_colors = [tuple(color) for color in center_colors]
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
if show_chart:
plt.figure(figsize=(10, 6))
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.subplot(1, 2, 2)
plt.pie(counts.values(), labels=hex_colors, colors=hex_colors)
plt.show()
return rgb_colors
|
9eaa125cefb1b23161479eaf2e2765ebb58bcd9e
| 3,647,937
|
import numpy
def run_classifier(data,labels, shuffle=False,nfolds=8,scale=True,
clf=None,verbose=False):
"""
run classifier for a single dataset
"""
features=data
if scale:
features=sklearn.preprocessing.scale(features)
if shuffle:
numpy.random.shuffle(labels)
if not clf:
clf=sklearn.svm.SVC(C=C)
skf = sklearn.model_selection.StratifiedKFold(5,shuffle=True)
pred=numpy.zeros(labels.shape[0])
for train, test in skf.split(features,labels):
clf.fit(features[train,:],labels[train])
pred[test]=clf.predict(features[test,:])
if verbose:
print(clf.best_params_)
acc=sklearn.metrics.accuracy_score(labels, pred)
return acc
|
3a479971040131cb05f7441112ad0e951b8374f2
| 3,647,938
|
def merge_sort(linked_list):
"""
Sorts a linked list in ascending order
- Recursively divide the linked list into sublist containing a single node
- Repeatedly merge the sublist to produce sorted sublist until one remains
Returns a sorted linked list
Takes O(kn log n) time
"""
if linked_list.size() == 1:
return linked_list
elif linked_list.head is None:
return linked_list
left_half, right_half = split(linked_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
|
07dfee0cb5bdcddb688431f00aeb0520f1d2ed1c
| 3,647,939
|
def is_binary(file_path):
""" Returns True if the file is binary """
with open(file_path, 'rb') as fp:
data = fp.read(1024)
if not data:
return False
if b'\0' in data:
return True
return False
|
2df56f93d4e31220a580bf1e659c3c51b96260d2
| 3,647,940
|
def convert_host_names_to_ids(session, instanceList):
"""Look up ID of each instance on Amazon. Returns a list of IDs."""
idList = []
for i in instanceList:
instId = aws.instanceid_lookup(session, i)
if instId is not None:
idList.append(instId)
return idList
|
128d3d4a5e5e0729b477687f665abac43d29aef9
| 3,647,941
|
def handle_over_max_file_size(error):
"""
Args:
error:
Returns:
"""
print("werkzeug.exceptions.RequestEntityTooLarge" + error)
return 'result : file size is overed.'
|
2bbdc1e38dea46ac08c314b3962ed63063578021
| 3,647,942
|
from typing import Mapping
import logging
import urllib
def _load_from_url(url: str,
chinese_only=False) -> Mapping[str, DictionaryEntry]:
"""Reads the dictionary from a local file
"""
logging.info('Opening the dictionary remotely')
with urllib.request.urlopen(url) as dict_file:
data = dict_file.read().decode('utf-8')
return _load_dictionary(data.splitlines(), chinese_only)
|
b496db0b767c17476ecbdc7cab89b962f19a4510
| 3,647,943
|
def get_images():
"""
Canned response for glance images list call
"""
return images
|
3f26e3e0527c0885cfff3470e5d40baf19b3ca82
| 3,647,944
|
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
letters = 'abcdefghijklmnopqrstuvwxyz'
index = [s.index(l) for l in letters if s.count(l) == 1]
return min(index) if len(index) > 0 else -1
|
8b42b281c9e80cf89fb9952a0fe7c60c5270c210
| 3,647,945
|
def get_form_class_for_class(klass):
"""
A helper function for creating a model form class for a model on the fly. This is used with models (usually
part of an inheritance hierarchy) which define a function **get_editable_fields** which returns an iterable
of the field names which should be placed in the form.
"""
meta_dict = dict(model=klass)
if hasattr(klass, 'get_editable_fields'):
meta_dict['fields'] = klass.get_editable_fields()
meta = type('Meta', (),meta_dict)
modelform_class = type('modelform', (forms.ModelForm,), {"Meta": meta})
return modelform_class
|
12fcdcf9a3155e718bab28b30b466824ad425508
| 3,647,946
|
def dict_remove_key(d, key, default=None):
"""
removes a key from dict __WITH__ side effects
Returns the found value if it was there (default=None). It also modifies the original dict.
"""
return d.pop(key, default)
|
47bd0edf2bbeb9bad5c696d289c69d2d9eba6a1b
| 3,647,947
|
from typing import Optional
def momentum(snap: Snap, mask: Optional[ndarray] = None) -> ndarray:
"""Calculate the total momentum vector on a snapshot.
Parameters
----------
snap
The Snap object.
mask
Mask the particle arrays. Default is None.
Returns
-------
ndarray
The total momentum as a vector (px, py, pz).
"""
mass: ndarray = snap['mass']
vel: ndarray = snap['velocity']
if mask is None:
return (mass * vel).sum(axis=0)
return (mass * vel)[mask].sum(axis=0)
|
022f58ed494fb381e650ec0f61ed8d75704b846c
| 3,647,948
|
import types
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor num_epochs times and then raises an OutOfRange error.
Args:
tensor: Any Tensor.
num_epochs: An integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or OutOfRange.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=types.int64)
epochs = variables.Variable(zero64, name="epochs")
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
|
82fa475bf4fe0f63a66c5718dc2a0336b887b3d6
| 3,647,949
|
def hex_machine(emit):
"""
State machine for hex escaped characters in strings
Args:
emit (callable): callback for parsed value (number)
Returns:
callable: hex-parsing state machine
"""
left = 4
num = 0
def _hex(byte_data):
nonlocal num, left
if 0x30 <= byte_data <= 0x39: # 0-9
i = byte_data - 0x30
elif 0x61 <= byte_data <= 0x66: # a-f
i = byte_data - 0x57
elif 0x41 <= byte_data <= 0x46: # A-F
i = byte_data - 0x37
else:
raise Exception(
"Invalid hex char in string hex escape: " + hex(byte_data))
left -= 1
num |= i << (left * 4)
if left:
return _hex
return emit(num)
return _hex
|
39232fdaf3c0ae19154e28307fb7f1254133dc94
| 3,647,950
|
import re
def isbns(self, key, value):
"""Translates isbns fields."""
_isbns = self.get("identifiers", [])
for v in force_list(value):
subfield_u = clean_val("u", v, str)
isbn = {
"value": clean_val("a", v, str) or clean_val("z", v, str),
"scheme": "ISBN",
}
if not isbn["value"]:
raise IgnoreKey("identifiers")
if subfield_u:
volume = re.search(r"(\(*v[.| ]*\d+.*\)*)", subfield_u)
if volume:
volume = volume.group(1)
subfield_u = subfield_u.replace(volume, "").strip()
existing_volume = self.get("volume")
if existing_volume:
raise ManualImportRequired(subfield="u")
self["volume"] = volume
# WARNING! vocabulary document_identifiers_materials
material = mapping(
IDENTIFIERS_MEDIUM_TYPES,
subfield_u,
field=key, subfield="u"
)
if material:
isbn.update({"material": material})
if isbn not in _isbns:
_isbns.append(isbn)
return _isbns
|
6db2f27733155e33e64b2d2ffba621deda86808d
| 3,647,951
|
import requests
def create_user(name, age, occupation):
"""
Function to post a new user.
Parameters
----------
name : str
Name of the user.
age : int
Age of the user.
occupation : str
Occupation of the user.
Returns
-------
message : str
request_status : int
HTTP response status code.
`400` "User already exists"
`201` "Created User `name`"
Examples
--------
>>> create_user(name = "micha", age= 28, occupation = 'PhD Student')
"Created User micha", 201
"""
# create a user
user = dict(
name = name,
age = age,
occupation = occupation,
)
# post it (as shortcut)
resp = requests.post("{}/user/{}".format(server,name), json=user)
if resp.status_code == 400:
return "User already exists", resp.status_code
elif resp.status_code == 201:
return "Created User {}".format(name), resp.status_code
else:
raise ApiError("Some unexpected ERROR code: {}".format(resp.status_code))
|
7e7a9a1071fd28a10beeaaf3922eaf36533334f8
| 3,647,952
|
import torch
def gauss_dataset(dim, size=1e6):
"""
Creates a dataloader of randomly sampled gaussian noise
The returned dataloader produces batsize batches of dim-sized vectors
"""
def samplef(bsize):
return torch.randn(bsize, dim)
ret = SampleDataset(samplef, size=size)
return ret
|
224640cff465b7e73d091a799498f3282d309b4e
| 3,647,953
|
def nightwatch_environment(request): # convenience spelling
"""Run tests against this environment (staging, production, etc.)"""
return request.config.getoption('--nightwatch-environment')
|
dc284660e062abf1b74a327e4b045cf79a64ee3a
| 3,647,954
|
def get_hrs(pid_arg):
"""
Pulls all recorded heart rate data for a patient from the database
Args:
pid_arg: patient_id to pull heart rate data for
Returns:
list: containing all recorded heart rates
"""
u5 = User.objects.raw({"_id": pid_arg}).first()
return u5.heart_rate
|
48794e2b94359a81d05d435feb0cf39e52142ca1
| 3,647,955
|
def resolve(match, *objects):
"""Given an array of objects and a regex match, this function returns the first
matched group if it exists in one of the objects, otherwise returns the orginial
fully matches string by the regex.
Example: if regex = \\\.([a-z]) and string = test\.abc, then
the match = {group0: \.abc, group1: abc}. Assuimg one object:
- obj = {abc: def}, then we return 'def'
- obj = {test: value}, then we return \.abc
Args:
objects (array[dict]): the array of objects we use to look up the key in match.group(1)
match: the regex match object
Returns:
str: the value of the matched group(1) in the first object found if exists, otherwise
returns the fully matched string.
"""
for obj in objects:
if obj is not None and match.group(1) in obj:
return str(obj[match.group(1)])
return match.group(0)
|
52f59fb5248ba635866fcd59a549067c3984e460
| 3,647,956
|
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.legacy.general as general
import azlmbr.bus
import azlmbr
def Collider_CollisionGroupsWorkflow():
# type: () -> None
"""
Summary:
Runs an automated test to ensure PhysX collision groups dictate whether collisions happen or not.
The test has two phases (A and B) for testing collision groups under different circumstances. Phase A
is run first and upon success Phase B starts.
Level Description:
Entities can be divided into 2 groups for the two phases, A and B. Each phase has identical entities with exception
to Terrain, where Terrain_A has a collision group/layer set for demo_group1/demo1 and Terrain_B has a collision
group/layer set for demo_group2/demo2.
Each Phase has two boxes, Box_1 and Box_2, where each box has it's collision group/layer set to it's number
(1 or 2). Each box is positioned just above the Terrain with gravity enabled.
All entities for Phase B are deactivated by default. If Phase A is setup and executed successfully it's
entities are deactivated and Phase B's entities are activated and validated before running the Phase B test.
Expected behavior:
When Phase A starts, it's two boxes should fall toward the terrain. Once the boxes' behavior is validated the
entities from Phase A are deactivated and Phase B's entities are activated. Like in Phase A, the boxes in Phase B
should fall towards the terrain. If all goes as expected Box_1_A and Box_2_B should collide with teh terrain, and
Box_2A and Box_1_B should fall through the terrain.
Test Steps:
0) [Define helper classes and functions]
1) Load the level
2) Enter game mode
3) Retrieve and validate entities
4) Phase A
a) set up
b) execute test
c) log results (deactivate Phase A entities)
5) Phase B
a) set up (activate Phase B entities)
b) execute test
c) log results
6) close editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
- The level for this test uses two PhysX Terrains and must be run with cmdline argument "-autotest_mode"
to suppress the warning for having multiple terrains.
:return: None
"""
# ******* Helper Classes ********
# Phase A's test results
class PhaseATestData:
total_results = 2
box_1_collided = False
box_1_fell_through = True
box_2_collided = False
box_2_fell_through = False
box_1 = None
box_2 = None
terrain = None
box_1_pos = None
box_2_pos = None
terrain_pos = None
@staticmethod
# Quick check for validating results for Phase A
def valid():
return (
PhaseATestData.box_1_collided
and PhaseATestData.box_2_fell_through
and not PhaseATestData.box_1_fell_through
and not PhaseATestData.box_2_collided
)
# Phase B's test results
class PhaseBTestData:
total_results = 2
box_1_collided = False
box_1_fell_through = False
box_2_collided = False
box_2_fell_through = True
box_1 = None
box_2 = None
terrain = None
box_1_pos = None
box_2_pos = None
terrain_pos = None
@staticmethod
# Quick check for validating results for Phase B
def valid():
return (
not PhaseBTestData.box_1_collided
and not PhaseBTestData.box_2_fell_through
and PhaseBTestData.box_1_fell_through
and PhaseBTestData.box_2_collided
)
# **** Helper Functions ****
# ** Validation helpers **
# Attempts to validate an entity based on the name parameter
def validate_entity(entity_name, msg_tuple):
# type: (str, (str, str)) -> EntityId
entity_id = general.find_game_entity(entity_name)
Report.critical_result(msg_tuple, entity_id.IsValid())
return entity_id
# Attempts to retrieve an entity's initial position and logs result
def validate_initial_position(entity_id, msg_tuple):
# type: (EntityId, (str, str)) -> azlmbr.math.Vector3
# Attempts to validate and return the entity's initial position.
# logs the result to Report.result() using the tuple parameter
pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", entity_id)
valid = not (pos is None or pos.IsZero())
entity_name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", entity_id)
Report.critical_result(msg_tuple, valid)
Report.info_vector3(pos, "{} initial position:".format(entity_name))
return pos
# ** Phase completion checks checks **
# Checks if we are done collecting data for phase A
def done_collecting_results_a():
# type: () -> bool
# Update positions
PhaseATestData.box_1_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_1
)
PhaseATestData.box_2_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_2
)
# Check for boxes to fall through terrain
if PhaseATestData.box_1_pos.z < PhaseATestData.terrain_pos.z:
PhaseATestData.box_1_fell_through = True
else:
PhaseATestData.box_1_fell_through = False
if PhaseATestData.box_2_pos.z < PhaseATestData.terrain_pos.z:
PhaseATestData.box_2_fell_through = True
else:
PhaseATestData.box_2_fell_through = False
results = 0
if PhaseATestData.box_1_collided or PhaseATestData.box_1_fell_through:
results += 1
if PhaseATestData.box_2_collided or PhaseATestData.box_2_fell_through:
results += 1
return results == PhaseATestData.total_results
# Checks if we are done collecting data for phase B
def done_collecting_results_b():
# type: () -> bool
# Update positions
PhaseBTestData.box_1_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_1
)
PhaseBTestData.box_2_pos = azlmbr.components.TransformBus(
azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_2
)
# Check for boxes to fall through terrain
if PhaseBTestData.box_1_pos.z < PhaseBTestData.terrain_pos.z:
PhaseBTestData.box_1_fell_through = True
else:
PhaseBTestData.box_1_fell_through = False
if PhaseBTestData.box_2_pos.z < PhaseBTestData.terrain_pos.z:
PhaseBTestData.box_2_fell_through = True
else:
PhaseBTestData.box_2_fell_through = False
results = 0
if PhaseBTestData.box_1_collided or PhaseBTestData.box_1_fell_through:
results += 1
if PhaseBTestData.box_2_collided or PhaseBTestData.box_2_fell_through:
results += 1
return results == PhaseBTestData.total_results
# **** Event Handlers ****
# Collision even handler for Phase A
def on_collision_begin_a(args):
# type: ([EntityId]) -> None
collider_id = args[0]
if (not PhaseATestData.box_1_collided) and PhaseATestData.box_1.Equal(collider_id):
Report.info("Box_1_A / Terrain_A collision detected")
PhaseATestData.box_1_collided = True
if (not PhaseATestData.box_2_collided) and PhaseATestData.box_2.Equal(collider_id):
Report.info("Box_2_A / Terrain_A collision detected")
PhaseATestData.box_2_collided = True
# Collision event handler for Phase B
def on_collision_begin_b(args):
# type: ([EntityId]) -> None
collider_id = args[0]
if (not PhaseBTestData.box_1_collided) and PhaseBTestData.box_1.Equal(collider_id):
Report.info("Box_1_B / Terrain_B collision detected")
PhaseBTestData.box_1_collided = True
if (not PhaseBTestData.box_2_collided) and PhaseBTestData.box_2.Equal(collider_id):
Report.info("Box_2_B / Terrain_B collision detected")
PhaseBTestData.box_2_collided = True
TIME_OUT = 1.5
# 1) Open level
helper.init_idle()
helper.open_level("Physics", "Collider_CollisionGroupsWorkflow")
# 2) Enter game mode
helper.enter_game_mode(Tests.enter_game_mode)
# 3) Retrieve and validate entities
PhaseATestData.box_1 = validate_entity("Box_1_A", Tests.box_1_a_valid)
PhaseATestData.box_2 = validate_entity("Box_2_A", Tests.box_2_a_valid)
PhaseATestData.terrain = validate_entity("Terrain_Entity_A", Tests.terrain_a_valid)
PhaseBTestData.box_1 = validate_entity("Box_1_B", Tests.box_1_b_valid)
PhaseBTestData.box_2 = validate_entity("Box_2_B", Tests.box_2_b_valid)
PhaseBTestData.terrain = validate_entity("Terrain_Entity_B", Tests.terrain_b_valid)
# Make sure Phase B objects are disabled
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.terrain)
# 4) *********** Phase A *****************
# 4.a) ** Set Up **
Report.info(" **** Beginning Phase A **** ")
# Locate Phase A entities
PhaseATestData.box_1_pos = validate_initial_position(PhaseATestData.box_1, Tests.box_1_a_pos_found)
PhaseATestData.box_2_pos = validate_initial_position(PhaseATestData.box_2, Tests.box_2_a_pos_found)
PhaseATestData.terrain_pos = validate_initial_position(PhaseATestData.terrain, Tests.terrain_a_pos_found)
# Assign Phase A event handler
handler_a = azlmbr.physics.CollisionNotificationBusHandler()
handler_a.connect(PhaseATestData.terrain)
handler_a.add_callback("OnCollisionBegin", on_collision_begin_a)
# 4.b) Execute Phase A
if not helper.wait_for_condition(done_collecting_results_a, TIME_OUT):
Report.info("Phase A timed out: make sure the level is set up properly or adjust time out threshold")
# 4.c) Log results for Phase A
Report.result(Tests.box_1_a_did_collide_with_terrain, PhaseATestData.box_1_collided)
Report.result(Tests.box_1_a_did_not_pass_through_terrain, not PhaseATestData.box_1_fell_through)
Report.info_vector3(PhaseATestData.box_1_pos, "Box_1_A's final position:")
Report.result(Tests.box_2_a_did_pass_through_terrain, PhaseATestData.box_2_fell_through)
Report.result(Tests.box_2_a_did_not_collide_with_terrain, not PhaseATestData.box_2_collided)
Report.info_vector3(PhaseATestData.box_2_pos, "Box_2_A's final position:")
if not PhaseATestData.valid():
Report.info("Phase A failed test")
# Deactivate entities for Phase A
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.terrain)
# 5) *********** Phase B *****************
# 5.a) ** Set Up **
Report.info(" *** Beginning Phase B *** ")
# Activate entities for Phase B
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_1)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_2)
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.terrain)
# Initialize positions for Phase B
PhaseBTestData.box_1_pos = validate_initial_position(PhaseBTestData.box_1, Tests.box_1_b_pos_found)
PhaseBTestData.box_2_pos = validate_initial_position(PhaseBTestData.box_2, Tests.box_2_b_pos_found)
PhaseBTestData.terrain_pos = validate_initial_position(PhaseBTestData.terrain, Tests.terrain_b_pos_found)
# Assign Phase B event handler
handler_b = azlmbr.physics.CollisionNotificationBusHandler()
handler_b.connect(PhaseBTestData.terrain)
handler_b.add_callback("OnCollisionBegin", on_collision_begin_b)
# 5.b) Execute Phase B
if not helper.wait_for_condition(done_collecting_results_b, TIME_OUT):
Report.info("Phase B timed out: make sure the level is set up properly or adjust time out threshold")
# 5.c) Log results for Phase B
Report.result(Tests.box_1_b_did_not_collide_with_terrain, not PhaseBTestData.box_1_collided)
Report.result(Tests.box_1_b_did_pass_through_terrain, PhaseBTestData.box_1_fell_through)
Report.info_vector3(PhaseBTestData.box_1_pos, "Box_1_B's final position:")
Report.result(Tests.box_2_b_did_not_pass_through_terrain, not PhaseBTestData.box_2_fell_through)
Report.result(Tests.box_2_b_did_collide_with_terrain, PhaseBTestData.box_2_collided)
Report.info_vector3(PhaseBTestData.box_2_pos, "Box_2_B's final position:")
if not PhaseBTestData.valid():
Report.info("Phase B failed test")
# 6) Exit Game mode
helper.exit_game_mode(Tests.exit_game_mode)
Report.info(" **** TEST FINISHED ****")
|
6463d4543a771a50709712012650c804b365fe81
| 3,647,957
|
import time
from datetime import datetime
def _timestamp(zone="Europe/Istanbul") -> int:
"""Return timestamp of now."""
return int(time.mktime(datetime.now(timezone(zone)).timetuple()))
|
871c1dcba8b6f581097c2e24d34903c00034fa03
| 3,647,958
|
def sumReplacements(tex, functionName):
"""
Search tex file for the keyString "\\apisummary{" and its matching
parenthesis. All text between will be processed such that there are no
consecutive spaces, no tabs, and unnecessary "\\n". The text will then
have all the macros replaced and put back into its corresponding place
in the text file.
The strings "./ sectionStart" and "./ sectionEnd" are appending at the
beginning and end of the processed text, respectively, for differenti-
ation between the text with sections and "dangling text".
These strings will not appear in the manpage as any line that begins
with a period will be treated as a comment.
"""
startOfText = tex.find("\\apisummary{")
endOfText = findMatchingBrace(tex, tex.find("{", startOfText))
sectionText = cleanText(tex[startOfText:endOfText])
tex = tex[:startOfText] + \
"./ sectionStart\n" + \
".SH NAME\n" + functionName + " \- " \
+ sectionText + "\n" + \
"./ sectionEnd\n" + tex[endOfText+1:]
tex = tex.replace("\\apisummary{", "")
return tex
|
8b2ed7bec78c6f2fa03c1308cc4a8fcdfbfa6f8d
| 3,647,959
|
import argparse
def parse_args(args):
"""
Parse command line parameters
Parameters
----------
args : list
command line parameters as list of strings
Returns
-------
argparse.Namespace : obj
command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Create mesh and linear system of a PDE via Galerkins method."
)
parser.add_argument(
"-f",
"--file",
dest="data_path",
help="filepath to save data at",
default="../../data/Galerkins_method/",
type=str,
)
parser.add_argument(
"-r",
"--resolutions",
dest="resolutions",
help="Mesh resolutions.",
default=[6, 128],
type=list,
)
return parser.parse_args(args)
|
c87a3dbb37b84076ac4d1cf3506a69abaac2c968
| 3,647,960
|
from typing import Union
from typing import List
from typing import Optional
def path(path: Union[str, List[str]], *, disable_stage_removal: Optional[bool] = False):
"""Validate the path in the event against the given path(s).
The following APIErrorResponse subclasses are used:
PathNotFoundError: When the path doesn't match.
Args:
path: A path literal or list of path literals to validate against.
disable_stage_removal (bool): preserve the original path with stage.
"""
return _get_decorator(
validate_path, path=path, disable_stage_removal=disable_stage_removal
)
|
eff9b153e90e3d657733c5c83b13c77aef21395f
| 3,647,961
|
def get_last_id(statefile):
"""Retrieve last status ID from a file"""
debug_print('Getting last ID from %s' % (statefile,))
try:
f = open(statefile,'r')
id = int(f.read())
f.close()
except IOError:
debug_print('IOError raised, returning zero (0)')
return 0
debug_print('Got %d' % (id,))
return id
|
4fa95ce2672b19359a8e6a25407c4b2480e23db4
| 3,647,962
|
import math
import torch
def inference_fn(trained_model,
remove,
fixed_params,
overwrite_fixed_params=False,
days_of_purchases=710,
days_of_clicks=710,
lifespan_of_items=710,
**params):
"""
Function to run inference inside the hyperparameter loop and calculate metrics.
Parameters
----------
trained_model:
Model trained during training of hyperparameter loop.
remove:
Percentage of data removed. See src.utils_data for more details.
fixed_params:
All parameters used during training of hyperparameter loop. See src.utils_data for more details.
overwrite_fixed_params:
If true, training parameters will overwritten by the parameters below. Can be useful if need to test the model
on different parameters, e.g. that includes older clicks or purchases.
days_of_purchases, days_of_clicks, lifespan_of_items:
All parameters that can overwrite the training parameters. Only useful if overwrite_fixed_params is True.
params:
All other parameters used during training.
Returns
-------
recall:
Recall on the test set. Relevant to compare with recall computed on hyperparametrization test set (since
parameters like 'remove' and all overwritable parameters are different)
Saves to file
-------------
Metrics computed on the test set.
"""
# Import parameters
if isinstance(fixed_params, str):
path = fixed_params
fixed_params = read_data(path)
class objectview(object):
def __init__(self, d):
self.__dict__ = d
fixed_params = objectview(fixed_params)
if 'params' in params.keys():
# if isinstance(params['params'], str):
path = params['params']
params = read_data(path)
# Initialize data
data_paths = DataPaths()
fixed_params.remove = remove
if overwrite_fixed_params:
fixed_params.days_of_purchases = days_of_purchases
fixed_params.days_of_clicks = days_of_clicks
fixed_params.lifespan_of_items = lifespan_of_items
data = DataLoader(data_paths, fixed_params)
# Get graph
valid_graph = create_graph(
data.graph_schema,
)
valid_graph = assign_graph_features(valid_graph,
fixed_params,
data,
**params,
)
dim_dict = {'user': valid_graph.nodes['user'].data['features'].shape[1],
'item': valid_graph.nodes['item'].data['features'].shape[1],
'out': params['out_dim'],
'hidden': params['hidden_dim']}
all_sids = None
if 'sport' in valid_graph.ntypes:
dim_dict['sport'] = valid_graph.nodes['sport'].data['features'].shape[1]
all_sids = np.arange(valid_graph.num_nodes('sport'))
# get training and test ids
(
train_graph,
train_eids_dict,
valid_eids_dict,
subtrain_uids,
valid_uids,
test_uids,
all_iids,
ground_truth_subtrain,
ground_truth_valid,
all_eids_dict
) = train_valid_split(
valid_graph,
data.ground_truth_test,
fixed_params.etype,
fixed_params.subtrain_size,
fixed_params.valid_size,
fixed_params.reverse_etype,
fixed_params.train_on_clicks,
fixed_params.remove_train_eids,
params['clicks_sample'],
params['purchases_sample'],
)
(
edgeloader_train,
edgeloader_valid,
nodeloader_subtrain,
nodeloader_valid,
nodeloader_test
) = generate_dataloaders(valid_graph,
train_graph,
train_eids_dict,
valid_eids_dict,
subtrain_uids,
valid_uids,
test_uids,
all_iids,
fixed_params,
num_workers,
all_sids,
embedding_layer=params['embedding_layer'],
n_layers=params['n_layers'],
neg_sample_size=params['neg_sample_size'],
)
num_batches_test = math.ceil((len(test_uids) + len(all_iids)) / fixed_params.node_batch_size)
# Import model
if isinstance(trained_model, str):
path = trained_model
trained_model = ConvModel(valid_graph,
params['n_layers'],
dim_dict,
params['norm'],
params['dropout'],
params['aggregator_type'],
fixed_params.pred,
params['aggregator_hetero'],
params['embedding_layer'],
)
trained_model.load_state_dict(torch.load(path, map_location=device))
if cuda:
trained_model = trained_model.to(device)
trained_model.eval()
with torch.no_grad():
embeddings = get_embeddings(valid_graph,
params['out_dim'],
trained_model,
nodeloader_test,
num_batches_test,
cuda,
device,
params['embedding_layer'],
)
for ground_truth in [data.ground_truth_purchase_test, data.ground_truth_test]:
precision, recall, coverage = get_metrics_at_k(
embeddings,
valid_graph,
trained_model,
params['out_dim'],
ground_truth,
all_eids_dict[('user', 'buys', 'item')],
fixed_params.k,
True, # Remove already bought
cuda,
device,
fixed_params.pred,
params['use_popularity'],
params['weight_popularity'],
)
sentence = ("TEST Precision "
"{:.3f}% | Recall {:.3f}% | Coverage {:.2f}%"
.format(precision * 100,
recall * 100,
coverage * 100))
print(sentence)
save_txt(sentence, data_paths.result_filepath, mode='a')
return recall
|
3fa306d97d4db7cf5b321b6284c5ab75ff108845
| 3,647,963
|
import random
import scipy
def initialize_mean_variance(args):
"""Initialize the current mean and variance values semi-intelligently.
Inspired by the kmeans++ algorithm: iteratively choose new centers from the data
by weighted sampling, favoring points that are distant from those already chosen
"""
X = args.X.reshape(args.X.shape[0] * args.X.shape[1], args.X.shape[2])
# kmeans++ inspired choice
centers = [random.choice(X)]
min_dists = scipy.array([distance(centers[-1], x) for x in X])
for l in range(1, args.K):
weights = min_dists * min_dists
new_center = weighted_sample(zip(weights, X), 1).next()
centers.append(new_center)
min_dists = scipy.fmin(min_dists, scipy.array([distance(centers[-1], x) for x in X]))
means = scipy.array(centers)
# for the variance, get the variance of the data in this cluster
variances = []
for c in centers:
idxs = tuple(i for i, (x, m) in enumerate(zip(X, min_dists)) if distance(c, x) == m)
v = scipy.var(X[idxs, :], axis=0)
variances.append(v)
variances = scipy.array(variances) + args.pseudocount
#import pdb; pdb.set_trace()
#for k in range(args.K):
# print sp.sqrt(variances[k,:])
variances[variances < .1] = .1
return means, variances
|
98808bc7ab069c1ea7ca8e05b6dd27275d6c0f09
| 3,647,964
|
def verify_file_checksum(path, expected_checksum):
"""Verifies the sha256 checksum of a file."""
actual_checksum = calculate_file_checksum(path)
return actual_checksum == expected_checksum
|
519d58892a122d5bc7850cb21ca047c152ef4183
| 3,647,965
|
import decimal
def float_to_str(f, p=20):
""" 将给定的float转换为字符串,而无需借助科学计数法。
@param f 浮点数参数
@param p 精读
"""
if type(f) == str:
f = float(f)
ctx = decimal.Context(p)
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
|
551ab2f58b48e4005d8b5a85a7eb096e4e749d23
| 3,647,966
|
from typing import List
def get_classes(parsed) -> List[ClassDef]:
"""Returns classes identified in parsed Python code."""
return [
element
for element in parsed.body
if isinstance(element, ClassDef)
]
|
e339899eb1dd039c9a708bf39f2fafa527d15882
| 3,647,967
|
def get_steps(r):
"""Clone OSA."""
nextsteps = []
nextsteps.append(
steps.SimpleCommandStep(
'git-clone-osa',
('git clone %s/openstack/openstack-ansible '
'/opt/openstack-ansible'
% r.complete['git-mirror-openstack']),
**r.kwargs
)
)
nextsteps.append(
steps.KwargsStep(
'kwargs-osa',
r,
{
'cwd': '/opt/openstack-ansible',
'env': {
'ANSIBLE_ROLE_FETCH_MODE': 'git-clone',
'ANSIBLE_DEBUG': _ansible_debug(r),
'ANSIBLE_KEEP_REMOTE_FILES': '1'
}
},
**r.kwargs
)
)
if utils.is_ironic(r):
nextsteps.append(
steps.KwargsStep(
'kwargs-ironic',
r,
{
'env': {
'BOOTSTRAP_OPTS': 'nova_virt_type=ironic'
}
},
**r.kwargs
)
)
if r.complete['enable-ceph'] == 'yes':
if r.complete['osa-branch'] in ['stable/mitaka',
'stable/newton']:
# This isn't implemented for these releases
pass
else:
nextsteps.append(
steps.KwargsStep(
'kwargs-ceph',
r,
{
'env': {
'SCENARIO': 'ceph'
}
},
**r.kwargs
)
)
return nextsteps
|
00daddf13256b2cb244aa72ad3f37d8fe1b03cc5
| 3,647,968
|
import tqdm
def create_index(
corpus_f: str,
model_name_or_path: str,
output_f: str,
mode: str = "sent2vec",
batch_size: int = 64,
use_cuda: bool = False,
):
"""Given a corpus file `corpus_f` and a sent2vec model `sent2vec_f`, convert the sentences in
the corpus (line-by-line) to vector representations, normalise them (L2norm), and add them
to a Flat FAISS index. Finally, save the index to `output_f`.
:param corpus_f: path to the corpus file, with one sentence per line
:param model_name_or_path: path to the binary sent2vec model (when mode=="sent2vec") or model name of the stransformer to use
:param output_f: path to save the FAISS index to
:param mode: whether to use "sent2vec" or "stransformers" (sentence-transformers)
:param batch_size: batch_size to use to create sent2vec embeddings or sentence-transformers embeddings
:param use_cuda: whether to use GPU when using sentence-transformers
:return: the created FAISS index
"""
if not FAISS_AVAILABLE:
raise ImportError(
"Faiss not installed. Please install the right version before continuing. If you have a "
"CUDA-enabled device and want to use GPU acceleration, you can `pip install faiss-gpu`."
" Otherwise, install faiss-cpu. For more, see https://github.com/facebookresearch/faiss"
)
if mode == "sent2vec":
if not SENT2VEC_AVAILABLE:
raise ImportError(
"Requested 'sent2vec', but module not installed. Install the right version from"
" https://github.com/epfml/sent2vec"
)
try:
model = sent2vec.Sent2vecModel()
except AttributeError as exc:
raise AttributeError(
"'sent2vec' does not have attribute Sent2vecModel. You may have uninstalled an"
" incorrect version of sent2vec. The correct version can be found here:"
" https://github.com/epfml/sent2vec"
) from exc
logger.info(f"Loading sent2vec model of {model_name_or_path}")
model.load_model(model_name_or_path, inference_mode=True)
hidden_size = model.get_emb_size()
elif mode == "stransformers":
if not STRANSFORMERS_AVAILABLE:
raise ImportError(
"Requested 'stransformers', but module not installed. Please install the library"
" before continuing. https://github.com/UKPLab/sentence-transformers#installation"
)
logger.info(f"Loading SentenceTransformer model {model_name_or_path}")
model = SentenceTransformer(model_name_or_path, device="cuda" if use_cuda else "cpu")
hidden_size = model.encode(["This is a test ."]).shape[1]
else:
raise ValueError("'mode' must be 'sent2vec' or 'stransformers'")
logger.info(f"Creating empty index with hidden_size {hidden_size:,}...")
# We want to do cosine similarity search, so we use inner product as suggested here:
# https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity
index = faiss.index_factory(hidden_size, "Flat", faiss.METRIC_INNER_PRODUCT)
vecs = []
n_lines = get_n_lines(corpus_f)
logger.info("Converting corpus into vectors. This can take a while...")
batch = []
with open(corpus_f, encoding="utf-8") as fhin:
for line_idx, line in tqdm(enumerate(fhin, 1), total=n_lines, unit="line"):
line = line.rstrip()
if line:
batch.append(line)
if len(batch) == batch_size or line_idx == n_lines:
if mode == "sent2vec":
# Normalize vectors for cosine distance as suggested here:
# https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity
vecs.extend(model.embed_sentences(batch))
else:
vecs.extend(model.encode(batch, batch_size=batch_size, show_progress_bar=False))
batch = []
logger.info(f"Number of entries: {len(vecs)}")
logger.info("Normalizing vectors...")
sent_vecs = np.array(vecs)
# normalize_L2 works in-place so do not assign
faiss.normalize_L2(sent_vecs)
logger.info("Adding vectors to index...")
index.add(sent_vecs)
logger.info(f"Saving index to {output_f}...")
faiss.write_index(index, output_f)
return index
|
b757d55ecf3001cad2ad285f476a391cb013d8f4
| 3,647,969
|
import inspect
def _convert_and_call(function, *args, **kwargs):
"""
Use annotation to convert args and kwargs to the correct type before calling function
If __annotations__ is not present (py2k) or empty, do not perform any conversion.
This tries to perform the conversion by calling the type (works for int,str).
If calling the type results in an error, no conversion is performed.
"""
args = list(args)
if PY3K:
argspec = inspect.getfullargspec(function)
annot = argspec.annotations
log.debug("Function's annotations are: %s", annot)
for i, arg in enumerate(argspec.args):
i=i-1 # cls/ self does not count
if arg in annot:
log.debug("For arg %s: i=%s, args=%s", arg, i, args)
if i<len(args):
args[i]=_try_convert(args[i], annot[arg])
elif arg in kwargs:
kwargs[arg]=_try_convert(kwargs[arg], annot[arg])
else:
log.debug("No annotation present for %s", arg)
log.debug("Calling %s with args=%s, kwargs=%s", function.__name__, args, kwargs)
return function(*args, **kwargs)
|
27892f4afa66d4e2b977c5ca64155758bedd5f76
| 3,647,970
|
import importlib
def import_module(name, path):
"""
correct way of importing a module dynamically in python 3.
:param name: name given to module instance.
:param path: path to module.
:return: module: returned module instance.
"""
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
d78dc5bc9d3a121c53bdd3bc44ad57378976eb28
| 3,647,971
|
def response_ssml_text_and_prompt(output, endsession, reprompt_text):
""" create a Ssml response with prompt """
return {
'outputSpeech': {
'type': 'SSML',
'ssml': "<speak>" + output + "</speak>"
},
'reprompt': {
'outputSpeech': {
'type': 'SSML',
'ssml': "<speak>" + reprompt_text + "</speak>"
}
},
'shouldEndSession': endsession
}
|
7cfa6b245bb80a29b10f3b972d1e9eb68377e836
| 3,647,972
|
import re
def getAreaQuantityQuantUnit(words):
"""
from training data:
count perc cum_sum cum_perc
kind_c
hectare 7 58.333333 7 58.333333
acre 2 16.666667 9 75.000000
meter 1 8.333333 10 83.333333
square-foot 1 8.333333 11 91.666667
square-meter 1 8.333333 12 100.000000
"""
allWords = ' '.join(words)
unitKind = 'hectare'
quant = None
units = ['hectare', 'acre', 'euro', 'meter', 'square-foot', 'square-meter' ]
for u in units:
if u in allWords : unitKind=u; break
if 'square foot' in allWords : unitKind='square-foot'
if 'square feet' in allWords : unitKind='square-foot'
if 'square meter' in allWords : unitKind='square-meter'
m = re.search(r'([0-9,\.]+)', allWords.lower())
if m:
quant = m.group(1)
quant = quant.replace(",", "")
quant = float(quant)
if quant=='.':
quant=None
if not quant:
q = text2int(allWords)
if q:
quant = q
else:
m = text2int(allWords)
if m:
quant *= m
if not quant:
quant = 1
quant = ('%f' % quant).rstrip('0').rstrip('.')
return quant, unitKind
#subGraph['attrDict_p'] = attrDict_p
|
10397a73042469a949fa6dbf70e8bba406cf510c
| 3,647,973
|
from typing import Optional
from typing import List
import pwd
import grp
def add_user(
username: str,
password: Optional[str] = None,
shell: str = "/bin/bash",
system_user: bool = False,
primary_group: str = None,
secondary_groups: List[str] = None,
uid: int = None,
home_dir: str = None,
) -> str:
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
Arguments:
username: Username to create
password: Password for user; if ``None``, create a system user
shell: The default shell for the user
system_user: Whether to create a login or system user
primary_group: Primary group for user; defaults to username
secondary_groups: Optional list of additional groups
uid: UID for user being created
home_dir: Home directory for user
Returns:
The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
if uid:
user_info = pwd.getpwuid(int(uid))
logger.info("user '%d' already exists", uid)
return user_info
user_info = pwd.getpwnam(username)
logger.info("user with uid '%s' already exists", username)
return user_info
except KeyError:
logger.info("creating user '%s'", username)
cmd = ["useradd", "--shell", shell]
if uid:
cmd.extend(["--uid", str(uid)])
if home_dir:
cmd.extend(["--home", str(home_dir)])
if password:
cmd.extend(["--password", password, "--create-home"])
if system_user or password is None:
cmd.append("--system")
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(["-g", primary_group])
if secondary_groups:
cmd.extend(["-G", ",".join(secondary_groups)])
cmd.append(username)
check_output(cmd, stderr=STDOUT)
user_info = pwd.getpwnam(username)
return user_info
|
17e9cc717f5ff63e65df202e05de88e703a9cf03
| 3,647,974
|
def tokenize_and_align(tokenizer, words, cased=False):
"""Splits up words into subword-level tokens."""
words = ["[CLS]"] + list(words) + ["[SEP]"]
basic_tokenizer = tokenizer.basic_tokenizer
tokenized_words = []
for word in words:
word = tokenization.convert_to_unicode(word)
word = basic_tokenizer._clean_text(word)
if word == "[CLS]" or word == "[SEP]":
word_toks = [word]
else:
if not cased:
word = word.lower()
word = basic_tokenizer._run_strip_accents(word)
word_toks = basic_tokenizer._run_split_on_punc(word)
tokenized_word = []
for word_tok in word_toks:
tokenized_word += tokenizer.wordpiece_tokenizer.tokenize(word_tok)
tokenized_words.append(tokenized_word)
assert len(tokenized_words) == len(words)
return tokenized_words
|
d6bd3fa2523b0f2422d6d0c2c87ac2637462542a
| 3,647,975
|
def _vagrant_format_results(line):
"""Extract fields from vm status line.
:param line: Status line for a running vm
:type line: str
:return: (<vm directory path>, <vm status>)
:rtype: tuple of strings
"""
line_split = line.split()
return (line_split[-1], line_split[-2],)
|
78788572e6b695696621775c28ae8b3a1e577ee3
| 3,647,976
|
def rect_to_xys(rect, image_shape):
"""Convert rect to xys, i.e., eight points
The `image_shape` is used to to make sure all points return are valid, i.e., within image area
"""
h, w = image_shape[0:2]
def get_valid_x(x):
if x < 0:
return 0
if x >= w:
return w - 1
return x
def get_valid_y(y):
if y < 0:
return 0
if y >= h:
return h - 1
return y
rect = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])
points = cv2.cv.BoxPoints(rect)
points = np.int0(points)
for i_xy, (x, y) in enumerate(points):
x = get_valid_x(x)
y = get_valid_y(y)
points[i_xy, :] = [x, y]
points = np.reshape(points, -1)
return points
|
a706007dc1651f1b8ce3c35b355b5b02915158e9
| 3,647,977
|
from typing import Optional
def determine_aws_service_name(
request: Request, services: ServiceCatalog = get_service_catalog()
) -> Optional[str]:
"""
Tries to determine the name of the AWS service an incoming request is targeting.
:param request: to determine the target service name of
:param services: service catalog (can be handed in for caching purposes)
:return: service name string (or None if the targeting service could not be determined exactly)
"""
signing_name, target_prefix, operation, host, path = _extract_service_indicators(request)
candidates = set()
# 1. check the signing names
if signing_name:
signing_name_candidates = services.by_signing_name(signing_name)
if len(signing_name_candidates) == 1:
# a unique signing-name -> service name mapping is the case for ~75% of service operations
return signing_name_candidates[0]
# try to find a match with the custom signing name rules
custom_match = custom_signing_name_rules(signing_name, path)
if custom_match:
return custom_match
# still ambiguous - add the services to the list of candidates
candidates.update(signing_name_candidates)
# 2. check the target prefix
if target_prefix and operation:
target_candidates = services.by_target_prefix(target_prefix)
if len(target_candidates) == 1:
# a unique target prefix
return target_candidates[0]
# still ambiguous - add the services to the list of candidates
candidates.update(target_candidates)
# exclude services where the operation is not contained in the service spec
for service_name in list(candidates):
service = services.get(service_name)
if operation not in service.operation_names:
candidates.remove(service_name)
else:
# exclude services which have a target prefix (the current request does not have one)
for service_name in list(candidates):
service = services.get(service_name)
if service.metadata.get("targetPrefix") is not None:
candidates.remove(service_name)
if len(candidates) == 1:
return candidates.pop()
# 3. check the path
if path:
# iterate over the service spec's endpoint prefix
for prefix, services_per_prefix in services.endpoint_prefix_index.items():
if path.startswith(prefix):
if len(services_per_prefix) == 1:
return services_per_prefix[0]
candidates.update(services_per_prefix)
# try to find a match with the custom path rules
custom_path_match = custom_path_addressing_rules(path)
if custom_path_match:
return custom_path_match
# 4. check the host (custom host addressing rules)
if host:
custom_host_match = custom_host_addressing_rules(host)
if custom_host_match:
return custom_host_match
# 5. check the query / form-data
values = request.values
if "Action" in values and "Version" in values:
# query / ec2 protocol requests always have an action and a version (the action is more significant)
query_candidates = services.by_operation(values["Action"])
if len(query_candidates) == 1:
return query_candidates[0]
for service in list(query_candidates):
service_model = services.get(service)
if values["Version"] != service_model.api_version:
# the combination of Version and Action is not unique, add matches to the candidates
query_candidates.remove(service)
if len(query_candidates) == 1:
return query_candidates[0]
candidates.update(query_candidates)
# 6. check the legacy rules in the end
legacy_match = legacy_rules(request)
if legacy_match:
return legacy_match
LOG.warning("could not uniquely determine service from request, candidates=%s", candidates)
if signing_name:
return signing_name
if candidates:
return candidates.pop()
return None
|
c29cc59324c3bf946cdfcd936b3f523feb657fda
| 3,647,978
|
def file_senzing_info():
"""#!/usr/bin/env bash
# --- Main --------------------------------------------------------------------
SCRIPT_DIR="$( cd "$( dirname "${{BASH_SOURCE[0]}}" )" >/dev/null 2>&1 && pwd )"
PROJECT_DIR="$(dirname ${{SCRIPT_DIR}})"
source ${{SCRIPT_DIR}}/docker-environment-vars.sh
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
COLUMN_WIDTH_1=${{#SENZING_PROJECT_NAME}}
COLUMN_WIDTH=$((${{COLUMN_WIDTH_1}}+16))
DOCKER_CONTAINERS=(
"${{SENZING_DOCKER_CONTAINER_NAME_SENZING_API_SERVER}};${{SENZING_DOCKER_PORT_SENZING_API_SERVER}};senzing/senzing-api-server:${{SENZING_DOCKER_IMAGE_VERSION_SENZING_API_SERVER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SENZING_DEBUG}};----;senzing/senzing-debug:${{SENZING_DOCKER_IMAGE_VERSION_SENZING_DEBUG}}"
"${{SENZING_DOCKER_CONTAINER_NAME_JUPYTER}};${{SENZING_DOCKER_PORT_JUPYTER}};senzing/jupyter:${{SENZING_DOCKER_IMAGE_VERSION_JUPYTER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_PHPPGADMIN}};${{SENZING_DOCKER_PORT_PHPPGADMIN_HTTP}};senzing/phppgadmin:${{SENZING_DOCKER_IMAGE_VERSION_PHPPGADMIN}}"
"${{SENZING_DOCKER_CONTAINER_NAME_PORTAINER}};${{SENZING_DOCKER_PORT_PORTAINER}};portainer/portainer:${{SENZING_DOCKER_IMAGE_VERSION_PORTAINER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_POSTGRES}};${{SENZING_DOCKER_PORT_POSTGRES}};postgres:${{SENZING_DOCKER_IMAGE_VERSION_POSTGRES}}"
"${{SENZING_DOCKER_CONTAINER_NAME_QUICKSTART}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/web-app-demo:${{SENZING_DOCKER_IMAGE_VERSION_WEB_APP_DEMO}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SQLITE_WEB}};${{SENZING_DOCKER_PORT_SENZING_SQLITE_WEB}};coleifer/sqlite-web:${{SENZING_DOCKER_IMAGE_VERSION_SQLITE_WEB}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SSHD}};${{SENZING_DOCKER_PORT_SSHD}};senzing/sshd:${{SENZING_DOCKER_IMAGE_VERSION_SSHD}}"
"${{SENZING_DOCKER_CONTAINER_NAME_STREAM_LOADER}};----;senzing/stream-loader:${{SENZING_DOCKER_IMAGE_VERSION_STREAM_LOADER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_STREAM_PRODUCER}};----;senzing/stream-producer:${{SENZING_DOCKER_IMAGE_VERSION_STREAM_PRODUCER}}"
"${{SENZING_DOCKER_CONTAINER_NAME_SWAGGERAPI_SWAGGER_UI}};${{SENZING_DOCKER_PORT_SENZING_SWAGGERAPI_SWAGGER_UI}};swaggerapi/swagger-ui:${{SENZING_DOCKER_IMAGE_VERSION_SWAGGERAPI_SWAGGER_UI}}"
"${{SENZING_DOCKER_CONTAINER_NAME_ENTITY_SEARCH_WEB_APP}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/entity-search-web-app:${{SENZING_DOCKER_IMAGE_VERSION_ENTITY_SEARCH_WEB_APP}}"
"${{SENZING_DOCKER_CONTAINER_NAME_WEB_APP_DEMO}};${{SENZING_DOCKER_PORT_ENTITY_SEARCH_WEB_APP}};senzing/web-app-demo:${{SENZING_DOCKER_IMAGE_VERSION_WEB_APP_DEMO}}"
"${{SENZING_DOCKER_CONTAINER_NAME_XTERM}};${{SENZING_DOCKER_PORT_XTERM}};senzing/xterm:${{SENZING_DOCKER_IMAGE_VERSION_XTERM}}"
"${{SENZING_DOCKER_CONTAINER_NAME_RABBITMQ}};${{SENZING_DOCKER_PORT_RABBITMQ_UI}};bitnami/rabbitmq:${{SENZING_DOCKER_IMAGE_VERSION_RABBITMQ}}"
)
echo "${{SENZING_HORIZONTAL_RULE}}"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} senzing-info.sh {environment_version} ({environment_updated})"
if [[ ( -n "$(command -v jq)" ) ]]; then
G2_BUILD_VERSION_FILE=${{PROJECT_DIR}}/g2BuildVersion.json
if [ -f "${{PROJECT_DIR}}/g2/g2BuildVersion.json" ]; then
G2_BUILD_VERSION_FILE=${{PROJECT_DIR}}/g2/g2BuildVersion.json
fi
SENZING_VERSION_API=$(jq --raw-output ".VERSION" ${{G2_BUILD_VERSION_FILE}})
SENZING_VERSION_DATA=$(jq --raw-output ".DATA_VERSION" ${{G2_BUILD_VERSION_FILE}})
echo "${{SENZING_HORIZONTAL_RULE:0:2}} senzing api: ${{SENZING_VERSION_API}} data: ${{SENZING_VERSION_DATA}}"
fi
echo "${{SENZING_HORIZONTAL_RULE:0:2}}"
for DOCKER_CONTAINER in ${{DOCKER_CONTAINERS[@]}};
do
IFS=";" read -r -a CONTAINER_DATA <<< "${{DOCKER_CONTAINER}}"
CONTAINER_NAME="${{CONTAINER_DATA[0]}} "
CONTAINER_PORT="${{CONTAINER_DATA[1]}}"
CONTAINER_VERSION="${{CONTAINER_DATA[2]}}"
if [ "$( docker container inspect -f '{{{{.State.Status}}}}' ${{CONTAINER_NAME}} 2>/dev/null )" == "running" ]; then
printf "${{SENZING_HORIZONTAL_RULE:0:2}} %-${{COLUMN_WIDTH}}s ${{GREEN}}up${{NC}} http://${{SENZING_DOCKER_HOST_IP_ADDR}}:${{CONTAINER_PORT}} ${{CONTAINER_VERSION}}\\n" ${{CONTAINER_NAME}}
else
printf "${{SENZING_HORIZONTAL_RULE:0:2}} %-${{COLUMN_WIDTH}}s ${{RED}}down${{NC}} http://${{SENZING_DOCKER_HOST_IP_ADDR}}:${{CONTAINER_PORT}} ${{CONTAINER_VERSION}}\\n" ${{CONTAINER_NAME}}
fi
done
echo "${{SENZING_HORIZONTAL_RULE:0:2}}"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} For more information:"
echo "${{SENZING_HORIZONTAL_RULE:0:2}} ${{SENZING_REFERENCE_URL}}#senzing-info"
echo "${{SENZING_HORIZONTAL_RULE}}"
"""
return 0
|
02cd47b34b4353034de5d00804fdfcc0ede7794b
| 3,647,979
|
from typing import Any
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator[Domain] = hass.data[DOMAIN][entry.entry_id]
return {
"creation_date": coordinator.data.creation_date,
"expiration_date": coordinator.data.expiration_date,
"last_updated": coordinator.data.last_updated,
"status": coordinator.data.status,
"statuses": coordinator.data.statuses,
"dnssec": coordinator.data.dnssec,
}
|
3aa4e17c646367721ffc266c31f66cd9f81c26fe
| 3,647,980
|
from typing import Dict
def binary_to_single(param_dict: Dict[str, float], star_index: int) -> Dict[str, float]:
"""
Function for converting a dictionary with atmospheric parameters
of a binary system to a dictionary of parameters for one of the
two stars.
Parameters
----------
param_dict : dict
Dictionary with the atmospheric parameters of both stars. The
keywords end either with ``_0`` or ``_1`` that correspond with
``star_index=0`` or ``star_index=1``.
star_index : int
Star index (0 or 1) that is used for the parameters in
``param_dict``.
Returns
-------
dict
Dictionary with the parameters of the selected star.
"""
new_dict = {}
for key, value in param_dict.items():
if star_index == 0 and key[-1] == "0":
new_dict[key[:-2]] = value
elif star_index == 1 and key[-1] == "1":
new_dict[key[:-2]] = value
elif key in ["teff", "logg", "feh", "c_o_ratio", "fsed", "radius", "distance"]:
new_dict[key] = value
return new_dict
|
21099162ffe83715892abf82660e35ee98e02930
| 3,647,981
|
def welcome():
"""List all available api routes."""
return (
"""Available Routes:
/api/v1.0/precipitation
Convert the query results to a dictionary using date as the key and prcp as the value.
Return the JSON representation of your dictionary.
/api/v1.0/stations
Return a JSON list of stations from the dataset.
/api/v1.0/tobs
Return a JSON list of temperature observations (TOBS) for the previous year.
/api/v1.0/start_date
/api/v1.0/start_date/end_date
Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
"""
)
|
83fcd43ff8dddd0596232dfb4420525bc592b583
| 3,647,982
|
import re
def armenian_input_latin(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace latin characters with Armenian equivalents.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
textTrans = ''
for c in re.findall('.[\'_]+|.', text):
try:
c = dictLat2Arm[c]
except KeyError:
try:
c = dictLat2Arm[c.lower()].upper()
except KeyError:
pass
textTrans += c
return textTrans
|
94764ec931a4469ea0dca39a70880b41345ab7cf
| 3,647,983
|
from typing import Union
from typing import Tuple
from typing import Callable
from typing import Optional
import types
def df_style_cell(*styles: Union[
Tuple[Callable[['cell'], bool], 'style'],
Tuple['cell', 'style'],
Callable[['cell'], Optional['style']],
]) -> Callable[['cell'], 'style']:
"""
Shorthand for df.style.applymap(...). Example usage:
df.style.applymap(df_style_cell(
(lambda x: 0 < x < 1, 'color: red'),
(0, 'color: green'),
lambda x: 'background: %s' % to_rgb_hex(x),
))
"""
def f(x):
y = None
for style in styles:
if isinstance(style, tuple) and isinstance(style[0], types.FunctionType) and style[0](x):
y = style[1]
elif isinstance(style, tuple) and x == style[0]:
y = style[1]
elif isinstance(style, types.FunctionType):
y = style(x)
if y:
break
return y or ''
return f
|
e45d1b17ecd3bfe6bf05ba70e7ef0c8dc4b99a81
| 3,647,984
|
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = np.minimum(dw, np.log(1000. / 16.))
dh = np.minimum(dh, np.log(1000. / 16.))
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
|
90e4cb394a12cbb73ce0dea85557b8195f04a961
| 3,647,985
|
def get_item_tds(item_id):
"""
Method conntect to ILS to retrieve item information and generates
an html table cells with the information.
:param item_id: Item id
:rtype: HTML string
"""
item_bot = ItemBot(opac_url=ils_settings.OPAC_URL,item_id=item_id)
output_html = "<td>{0}</td><td>{1}</td><td>{2}</td>".format(item_bot.status(),
item_bot.location(),
item_bot.callnumber())
return mark_safe(output_html)
|
866e364257aae174a7ddecdaa94f5af1e9cbfcca
| 3,647,986
|
def query_ports(session, switch_resource_id, **kwargs):
"""
Retrieve multiple :class:`Port` objects from database.
switch_resource_id is optional
# TODO: Implement and document query_ports() correctly
"""
# TODO: Add csv output option to query_ports()
# Check all arguments before querying
Port.check_params(**kwargs)
# Query
pts = session.query(Port)
# Filter by switch if given
if switch_resource_id is not None:
sw = query_switch(session, switch_resource_id)
if sw is None:
raise ValueError('Given switch does not exist')
pts = pts.filter_by(_switch_id = sw._switch_id)
# Filter
for key, val in kwargs.items():
if key == 'vlans':
for vlan in val:
pts = pts.filter(Port._vlans.contains(vlan))
else:
raise NotImplementedError('query_ports() is not yet implemented')
return pts.all()
|
1e673024ab2da742d023f6ab1d1211a2f86c8a3b
| 3,647,987
|
def build_data_request(mac, request_type='current', interval=1, units='english'):
"""
Creates RainWise API request for Recent Data based on station mac, format (optional), and units (optional)
"""
# Check if interval requested is valid interval
if interval not in [1, 5, 10, 15, 30, 60]:
raise ValueError('Invalid Request: Parameter interval must be 1, 5, 10, 15, 30, or 60')
# Check if units requested are valid units
if units.lower() not in ['english', 'metric']:
raise ValueError('Invalid Request: Parameter units must be english or metric')
# Build request URL for current conditions
if request_type == 'current':
return f'http://api.rainwise.net/main/v1.4/get-data.php?mac={mac}&format=json'
# Build request URL for recent data
elif request_type == 'recent':
return f'http://api.rainwise.net/main/v1.4/get-recent.php?mac={mac}&interval={interval}&units={units}&format=json'
raise ValueError('Invalid Request: Parameter request_type must be either ''current'' or ''recent''')
|
733c20f5c67fe2c630427bfb70ab563df111558c
| 3,647,988
|
def load_acs_access_to_car() -> pd.DataFrame:
"""Function to merge the two files for the QOL outputs and do some standard renaming. Because
these are QOL indicators they remain in the same csv output with columns indicating year"""
df_0812 = pd.read_excel(
"./resources/ACS_PUMS/EDDT_ACS2008-2012.xlsx",
sheet_name="ACS08-12",
dtype={"Geog": str},
)
df_1519 = pd.read_excel(
"./resources/ACS_PUMS/EDDT_ACS2015-2019.xlsx",
sheet_name="ACS15-19",
dtype={"Geog": str},
)
df = pd.merge(df_0812, df_1519, on="Geog", how="left")
df = df.filter(regex="Geog|Wk16p|CWCar")
df = df.replace(
{
"Geog": {
"Bronx": "BX",
"Brooklyn": "BK",
"Manhattan": "MN",
"Queens": "QN",
"Staten Island": "SI",
"NYC": "citywide",
}
}
)
df.set_index("Geog", inplace=True)
return df
|
fa6d606ebeef142417f1ac47c18947d0de08065b
| 3,647,989
|
import os
def canonicalize(top_dir):
"""
Canonicalize filepath.
"""
return os.path.realpath(top_dir)
|
ad0eb534bed1ad656820de776a1845161bdafced
| 3,647,990
|
def elastic_transform(image, alpha, sigma, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert len(image.shape) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
return map_coordinates(image, indices, order=1).reshape(shape)
|
e01660ecf753d7c33aa66786cf9f9db3b94cef49
| 3,647,991
|
import time
def convert_epoch_to_mysql_timestamp(epoch_timestamp):
"""
Converts a given epoch timestamp in seconds to the MySQL datetime format.
:param epoch_timestamp: The timestamp as seconds since epoch time
:return: The MySQL timestamp string in the format 'Y-m-d HH:MM:SS'
:rtype: str
"""
try:
epoch = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(epoch_timestamp))
return epoch
except Exception as e:
print(e)
return None
|
15647a816e638e7668e2e830ebc4f1c6fdb2f030
| 3,647,992
|
import six
def _ensure_eventlet(func):
"""Decorator that verifies we have the needed eventlet components."""
@six.wraps(func)
def wrapper(*args, **kwargs):
if not _utils.EVENTLET_AVAILABLE or greenthreading is None:
raise RuntimeError('Eventlet is needed to wait on green futures')
return func(*args, **kwargs)
return wrapper
|
4193b8d68ae45c13a3a88b1e4c7caba5572f16cf
| 3,647,993
|
import os
import time
import shutil
from general_functions import LogMessage
import filecmp
def ReplaceOldWithNewFile(orig_file='', new_temp_file=''):
"""
Compare original file and the new temp file ( contents and permissions).
If they are the same, just remove the temp version. ( maybe not needed, handled in calling function)
If they are different, backup original and then replace teh orig_file with the new_temp_file
Return code values:
0: No changes made
1: Changes made
"""
## Well I've broken this by moving it to another module.
## I need to read up on logging, and logging config files
# https://docs.python.org/3.8/howto/logging.html#configuring-logging
# If file exists,
try:
type(logMessage) ## check if logMessage is already set up
except:
logMessage=LogMessage() # test just using default log file
if os.path.exists(orig_file):
#content_matches=filecmp.cmp(orig_file,new_temp_file)
#permission_matches=os.stat(orig_file).st_mode == os.stat(new_temp_file).st_mode
#user_matches=os.stat(orig_file).st_uid == os.stat(new_temp_file).st_uid
#group_matches=os.stat(orig_file).st_gid == os.stat(new_temp_file).st_gid
orig_permission=os.stat(orig_file).st_mode
new_permission=os.stat(new_temp_file).st_mode
orig_user=os.stat(orig_file).st_uid
new_user=os.stat(new_temp_file).st_uid
orig_group=os.stat(orig_file).st_gid
new_group=os.stat(new_temp_file).st_gid
content_matches=filecmp.cmp(orig_file,new_temp_file)
permission_matches=orig_permission == new_permission
user_matches=orig_user == new_user
group_matches=orig_group == new_group
logMessage.info( 'Checking file ' + orig_file + '. content_matches:' + str(content_matches) + '; permission_matches:' + str(permission_matches) + '; user_matches:' + str(user_matches) + '; group_matches:' + str(group_matches))
if content_matches and permission_matches and user_matches and group_matches:
logMessage.info(orig_file + ' is unchanged.')
os.remove(new_temp_file)
return 0
else:
logMessage.info( orig_file + 'Permission: ' + str(orig_permission) + ', owner: ' + str(orig_user) + ', group :' + str(orig_group) )
logMessage.info( new_temp_file + 'Permission: ' + str(new_permission) + ', owner: ' + str(new_user) + ', group :' + str(new_group) )
# backup the original file
t = time.localtime()
backupfile=orig_file + time.strftime('%Y%m%d_%H%M%S', t)
shutil.copyfile(orig_file,backupfile)
else:
logMessage.info(orig_file + ' - does not exist. Creating new file.')
## Only got to here if does not match (ie new or different)
logMessage.info(orig_file + ' - has been amended. ( to match ' + new_temp_file + ' )')
#shutil.copyfile(new_temp_file, orig_file)
shutil.move(new_temp_file, orig_file)
## On some test servers this doesn't end up with correct ownership and permissions. So adding this to get round it
try:
os.chown(orig_file,new_user,new_group)
except PermissionError:
logMessage.error('Unable to set ownership on ' + orig_file + '. Trying to change to chown ' + new_user + ':' + new_group() )
try:
os.chmod(orig_file,new_permission)
except:
logMessage.error('Unable to set permissions on ' + orig_file + '. Trying to change to chmod ' + orig_file + ' ' + new_permission )
return 1
|
37799a6e8ed94f93a100911a32b2a146142171a2
| 3,647,994
|
def check_public_key(pk):
""" Checks if a given string is a public (or at least if it is formatted as if it is).
:param pk: ECDSA public key to be checked.
:type pk: hex str
:return: True if the key matches the format, raise exception otherwise.
:rtype: bool
"""
prefix = pk[0:2]
l = len(pk)
if prefix not in ["02", "03", "04"]:
raise Exception("Wrong public key format.")
if prefix == "04" and l != 130:
raise Exception(
"Wrong length for an uncompressed public key: " + str(l))
elif prefix in ["02", "03"] and l != 66:
raise Exception("Wrong length for a compressed public key: " + str(l))
else:
return True
|
120b3e88a96db45e5e4df0996414448da8b84462
| 3,647,995
|
def empty_tree(input_list):
"""Recursively iterate through values in nested lists."""
for item in input_list:
if not isinstance(item, list) or not empty_tree(item):
return False
return True
|
1dceb351aac4db23b57394a531db38a3edf61a8c
| 3,647,996
|
def validate_config_params(optimo_url, version, access_key):
"""Validates and normalizes the parameters passed to
:class:`optimo.api.OptimoAPI` constructor.
:param optimo_url: string url of the optimoroute's service
:param version: ``int`` or ``str`` denoting the API version
:param access_key: string access key provided by optimoroute
:return: ``tuple`` of the, possibly adjusted, passed parameters.
:raises OptimoError: On providing incomplete or invalid config data
"""
if not optimo_url or not isinstance(optimo_url, basestring):
raise OptimoError("'optimo_url' must be a url string")
validate_url(optimo_url)
if not version or not isinstance(version, basestring) or not \
version.startswith('v'):
raise OptimoError("'version' must be a string denoting the API version "
"you want to use('v1', 'v2', etc")
if not access_key or not isinstance(access_key, basestring):
raise OptimoError("'access_key' must be the string access key provided "
"to you by optimoroute")
return optimo_url, version, access_key
|
94056115d999a7e6e97cd343f2fd40ae8f99a6d9
| 3,647,997
|
def deque_and_stack():
"""Solution to exercise R-6.14.
Repeat the previous problem using the deque D and an initially empty
stack S.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
0. Initial state Deque [1, 2, 3, 4, 5, 6, 7, 8]
Stack []
1. popright() 4 nums to S Deque [1, 2, 3, 4]
from D Stack [8, 7, 6, 5]
2. popright() 1 num from D, Deque [4, 1, 2, 3]
addleft() that num to D Stack [8, 7, 6, 5]
3. addright() 1 nums to D Deque [4, 1, 2, 3, 5]
from S Stack [8, 7, 6]
4. popleft() 1 num from D, Deque [1, 2, 3, 5, 4]
addright() that num to D Stack [8, 7, 6]
5. addright() 3 nums to D Deque [1, 2, 3, 5, 4, 6, 7, 8]
from S Stack []
"""
deq = deque([1, 2, 3, 4, 5, 6, 7, 8])
stack = ArrayStack()
for _ in range(4):
stack.push(deq.pop()) # Step 1
deq.appendleft(deq.pop()) # Step 2
deq.append(stack.pop()) # Step 3
deq.append(deq.popleft()) # Step 4
for _ in range(3):
deq.append(stack.pop()) # Step 5
return deq, stack
|
012b6d5916247c34688749d08156a65c5f9b5634
| 3,647,998
|
import re
def apply_subst(name, user):
"""
user.username forced in lowercase (VMware Horizon)
"""
name = re.sub(r'_SCIPER_DIGIT_', user.sciper_digit, name)
name = re.sub(r'_SCIPER_', user.sciper, name)
name = re.sub(r'_USERNAME_', user.username.lower(), name)
name = re.sub(r'_HOME_DIR_', user.home_dir, name)
name = re.sub(r'_GROUPNAME_', user.groupname, name)
name = re.sub(r'_DOMAIN_', user.domain, name)
name = re.sub(r'_UID_', user.uid, name)
name = re.sub(r'_GID_', user.gid, name)
name = re.sub(r'_FSTYPE_', user.automount_fstype, name)
name = re.sub(r'_HOST_', user.automount_host, name)
name = re.sub(r'_PATH_', user.automount_path, name)
name = re.sub(r'_OPTIONS_', user.automount_options, name)
return name
|
b2df5630cc63ecf0e8468e2eb19019ec4bd9ad2a
| 3,647,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.