content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def train(x_mat: ndarray, k: int, *, max_iters: int = 10, initial_centroids: Iterable = None, history: bool = False):
"""
进行k均值训练
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param k: 聚类数目
:param max_iters: 最大迭代次数
:param initial_centroids: 初始聚类中心,不提供别的话将随机挑选聚类中心
:param history: 是否返回历史信息
:return: 计算好的聚类中心;包含每个样本所属聚类中心下标的行向量;包含每一次迭代计算的聚类中心列表(history为True的话)
"""
x_mat = __t.r2m(x_mat)
m, n = x_mat.shape
if initial_centroids is None:
rand_indices = np.arange(0, m)
np.random.shuffle(rand_indices)
initial_centroids = x_mat[rand_indices[:k], :]
if not isinstance(initial_centroids, ndarray):
initial_centroids = np.asarray(initial_centroids)
idx = None
centroids_history = None
if history:
centroids_history = [initial_centroids]
for i in range(max_iters):
idx = find_closest(x_mat, initial_centroids)
initial_centroids = compute_centroids(x_mat, idx)
if history:
centroids_history.append(initial_centroids)
if history:
return initial_centroids, idx, centroids_history
else:
return initial_centroids, idx
| 25,500
|
def download4(url, user_agent='wswp', num_retries=2):
"""Download function that includes user agent support"""
# wswp: web scraping with python
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download4(url, user_agent, num_retries-1)
return html
| 25,501
|
def test_intersectionAndUnion_3classes() -> None:
"""
(0,0) are matched once. (1,1) are matched once. (2,2) are matched once,
giving us intersection [1,1,1] for those three classes.
No way to compute union of two sets, without understanding where they intersect.
Union of sets
{0} union {0} -> {0}
{0} union {1} -> {0,1}
{2} union {2} -> {2}
{1} union {1} -> {1}
yields class counts [2,2,1]
"""
pred = np.array([[2, 0], [1, 0]])
target = np.array([[2, 0], [1, 1]])
num_classes = 3
# contain the number of samples in each bin.
area_intersection, area_union, area_target = iou_utils.intersectionAndUnion(
pred, target, K=num_classes, ignore_index=255
)
assert area_intersection.shape == (3,)
assert area_union.shape == (3,)
assert area_target.shape == (3,)
assert np.allclose(area_intersection, np.array([1, 1, 1]))
assert np.allclose(area_target, np.array([1, 2, 1]))
assert np.allclose(area_union, np.array([2, 2, 1]))
| 25,502
|
def testv1():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('./tests/api/v1', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
| 25,503
|
def test_container_add_constructed_component(model_with_container: MockModel):
"""Verify behaviour when adding a newly constructed Container."""
empty_container = model_with_container.empty_container
component = Component(name="Component")
empty_container += component
assert component in empty_container.components
assert component.id != ""
assert component.model is model_with_container
assert component.parent is empty_container
| 25,504
|
def test_api_create_article(api):
"""Test Apple Create Article API"""
response = api.create_article(
{"title": "A Title"},
{"key1": "value1"},
{'image1.jpg': 'FFFDASFAFADADFA',
'image2.jpg': 'AFFDASFAFADADFA'},
)
assert "name='request().json()'" in repr(response)
req_call = requests.request
assert req_call.call_count == 1
req_args = req_call.call_args[0]
req_kw = req_call.call_args[1]
assert req_args[0] == 'POST'
assert req_args[1] == 'https://news-api.apple.com/channels/FAKE_CHANNEL/articles'
assert 'Authorization' in req_kw['headers']
assert 'HHMAC; key=FAKE_ID; signature=' in req_kw['headers']['Authorization']
data_lines = req_kw['data'].split(b'\r\n')
# Each file has 5 parts:
# boundary, Content-Type, Content-Disposition, blank, body data
# Three items => 15 lines + 1 final boundary
assert len(data_lines) == 21
assert data_lines[1] == b'Content-Type: application/json'
assert data_lines[2] == b'Content-Disposition: form-data; filename=metadata; size=18'
assert data_lines[4] == ensure_binary(json.dumps({"key1": "value1"}), 'utf8')
assert data_lines[6] == b'Content-Type: application/json'
assert data_lines[7] == b'Content-Disposition: form-data; filename=article.json; size=20'
assert data_lines[9] == ensure_binary(json.dumps({"title": "A Title"}), 'utf8')
assert data_lines[11] == b'Content-Type: image/jpeg'
assert data_lines[12] == b'Content-Disposition: form-data; filename=image1.jpg; size=15'
assert data_lines[14] == b'FFFDASFAFADADFA'
assert data_lines[16] == b'Content-Type: image/jpeg'
assert data_lines[17] == b'Content-Disposition: form-data; filename=image2.jpg; size=15'
assert data_lines[19] == b'AFFDASFAFADADFA'
| 25,505
|
def test_API_tilejson(app, event):
"""Test /tilejson.json route."""
from cogeo_tiler.handler import app
urlqs = urllib.parse.urlencode([("url", cog_path)])
event["path"] = "/tilejson.json"
res = app(event, {})
assert res["statusCode"] == 500
headers = res["headers"]
assert headers["Content-Type"] == "application/json"
body = json.loads(res["body"])
assert "url" in body["errorMessage"]
event["path"] = "/tilejson.json"
event["queryStringParameters"] = {"url": cog_path, "tile_scale": "2"}
res = app(event, {})
assert res["statusCode"] == 200
headers = res["headers"]
assert headers["Content-Type"] == "application/json"
body = json.loads(res["body"])
assert body["name"] == "cog.tif"
assert body["tilejson"] == "2.1.0"
assert body["tiles"][0] == (
f"https://somewhere-over-the-rainbow.com/{{z}}/{{x}}/{{y}}@2x?{urlqs}"
)
assert len(body["bounds"]) == 4
assert len(body["center"]) == 3
assert body["minzoom"] == 6
assert body["maxzoom"] == 8
# test with tile_format
event["path"] = "/tilejson.json"
event["queryStringParameters"] = {"url": cog_path, "tile_format": "jpg"}
res = app(event, {})
assert res["statusCode"] == 200
headers = res["headers"]
assert headers["Content-Type"] == "application/json"
body = json.loads(res["body"])
assert body["tiles"][0] == (
f"https://somewhere-over-the-rainbow.com/{{z}}/{{x}}/{{y}}@1x.jpg?{urlqs}"
)
# test with kwargs
event["path"] = "/tilejson.json"
event["queryStringParameters"] = {"url": cog_path, "rescale": "-1,1"}
res = app(event, {})
assert res["statusCode"] == 200
headers = res["headers"]
assert headers["Content-Type"] == "application/json"
body = json.loads(res["body"])
assert body["tiles"][0] == (
f"https://somewhere-over-the-rainbow.com/{{z}}/{{x}}/{{y}}@1x?rescale=-1%2C1&{urlqs}"
)
| 25,506
|
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds
| 25,507
|
def test_atomic_positive_integer_enumeration_3_nistxml_sv_iv_atomic_positive_integer_enumeration_4_4(mode, save_output, output_format):
"""
Type atomic/positiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/positiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-positiveInteger-enumeration-4.xsd",
instance="nistData/atomic/positiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-positiveInteger-enumeration-4-4.xml",
class_name="NistschemaSvIvAtomicPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 25,508
|
def get_result_type(action):
"""Gets the corresponding ROS action result type.
Args:
action: ROS action name.
Returns:
Result message type. None if not found.
"""
msg_type = rostopic.get_topic_type("{}/result".format(action))[0]
# Replace 'ActionResult' with 'Result'.
return msg_type[:-12] + "Result"
| 25,509
|
def get_turbulence(sequence):
"""
Computes turbulence for a given sequence, based on `Elzinga & Liefbroer's 2007 definition <https://www.researchgate.net/publication/225402919_De-standardization_of_Family-Life_Trajectories_of_Young_Adults_A_Cross-National_Comparison_Using_Sequence_Analysis>`_ which is also implemented in the `TraMineR <http://traminer.unige.ch/doc/seqST.html>`_ sequence analysis library.
Example
--------
>>> sequence = [1,1,2,2,3]
>>> ps.get_turbulence(sequence)
5.228...
"""
phi = get_ndistinct_subsequences(sequence)
#print('phi', phi)
state_durations = [value for key, value in get_spells(sequence)]
#print('durations', state_durations)
#print('mean duration', statistics.mean(state_durations))
variance_of_state_durations = statistics.variance(state_durations)
#print('variance', variance_of_state_durations)
tbar = statistics.mean(state_durations)
maximum_state_duration_variance = (len(sequence) - 1) * (1 - tbar) ** 2
#print('smax', maximum_state_duration_variance)
top_right = maximum_state_duration_variance + 1
bot_right = variance_of_state_durations + 1
turbulence = math.log2(phi * (top_right / bot_right))
#print('turbulence', turbulence)
return turbulence
| 25,510
|
def reflect(array, holder=1):
"""
Reflects a np array across the y-axis
Args:
array: array to be reflected
holder: a holder variable so the function can be used in optimization algorithms. If <0.5, does not reflect.
Returns:
Reflected array
"""
c = array.copy()
if holder > 0.5:
c[:, 0] = -c[:, 0]
return c
| 25,511
|
def create_and_return_logger(logger_name, filename="log"):
"""
Function to create a custom logger that will print to terminal
as well as write to a log file
Accepts: Logger name for script that is calling logger, and filename for log.
Returns: Logger object and Log file path.
"""
LOG_FILE_DIR = os.getcwd() + "/logs"
if not os.path.exists(LOG_FILE_DIR):
os.makedirs(LOG_FILE_DIR)
LOG_FILE = f"{LOG_FILE_DIR}/{filename}_{datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')}.log"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter(
"%(levelname)s %(asctime)s %(processName)s %(message)s"
)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(logFormatter)
logger.handlers.clear()
logger.addHandler(sh)
fileHandler = logging.FileHandler(f"{LOG_FILE}")
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
return logger, LOG_FILE
| 25,512
|
def _get_all_entries(entry_list, keep_top_dir):
"""
Returns a list of all entries (files, directories) that should be copied.
The main purpose of this function is to evaluate 'keep_top_dir' and in case
it should not be kept use all the entries below the top-level directories.
"""
all_files = []
entry_list = [path.local(entry) for entry in entry_list]
if keep_top_dir:
all_files = entry_list
else:
for entry in entry_list:
if entry.isdir():
all_files.extend(entry.listdir())
else:
all_files.append(entry)
return all_files
| 25,513
|
def process_exclude_items(exclude_items=[]):
"""
Process the exclude items to get list of directories to NOT be scanned
:return: a list of directories to not be scanned if any, otherwise an empty list
"""
logger.debug("Parsing exclude items ...")
parsed_list = []
for item in exclude_items:
item = item.strip()
if not item or item.startswith('#'):
continue
exclude_item = os.path.normpath(item).replace('//', '/')
if os.path.exists(exclude_item):
# ignore the exclude_item if its not a full directory path
if exclude_item == '/':
# Found / in exclude list. No need to get the other items because / trumps all
logger.debug("Found root directory in the exclude list. Expanding it to all toplevel directories ...")
parsed_list = get_toplevel_dirs()
break
elif not exclude_item.startswith('/'):
logger.debug("Skipping partial directory path '%s' ...", exclude_item)
continue
else:
parsed_list.append(exclude_item)
else:
logger.debug("Skipping missing item '%s' ...", exclude_item)
if not parsed_list:
logger.debug("No items specified to be excluded")
else:
# Remove any duplicates and any children of parent directories before returning
parsed_list = remove_child_items(sorted(list(set(parsed_list))))
logger.debug("Exclude items: %s", parsed_list)
return parsed_list
| 25,514
|
def absolute_name_scope(scope, reuse=tf.AUTO_REUSE):
"""Builds an absolute tf.name_scope relative to the current_scope.
This is helpful to reuse nested name scopes.
E.g. The following will happen when using regular tf.name_scope:
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner_1/Const:0
With absolute_name_scope:
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const_1:0
"""
current_scope = tf.get_default_graph().get_name_scope()
if not current_scope:
if scope.endswith('/'):
scope = tf.variable_scope(scope, reuse=reuse)
else:
scope = tf.variable_scope('{}/'.format(scope), reuse=reuse)
else:
scope = tf.variable_scope('{}/{}/'.format(current_scope, scope), reuse=reuse)
return scope
| 25,515
|
def regina_edge_orientation_agrees(tet, vert_pair):
"""
Given tet and an ordered pair of (regina) vert nums of that tet, does this ordering
agree with regina's ordering of the verts of that edge of the triangulation
"""
edge_num = vert_pair_to_edge_num[tuple(vert_pair)]
mapping = tet.faceMapping(1, edge_num)
map_order = [mapping[0], mapping[1]]
assert set(map_order) == set(vert_pair)
return map_order == vert_pair
| 25,516
|
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge
| 25,517
|
def is_common_prefix(words: List[str], length: int) -> bool:
"""Binary Search"""
word: str = words[0][:length]
for next_word in words[1:]:
if not next_word.startswith(word):
return False
return True
| 25,518
|
def freq_upsample(s, upsample):
""" padding in frequency domain, should be used with ifft so that
signal is upsampled in time-domain.
Args:
s : frequency domain signal
upsample : an integer indicating factor of upsampling.
Returns:
padded signal
"""
if upsample == 1:
return s
assert isinstance(upsample, int) and upsample > 1
l = len(s)
if l % 2 == 0:
h = l / 2
return upsample * np.concatenate(
(s[:h], np.array([s[h] / 2.0]),
np.zeros(l * (upsample - 1) - 1),
np.array([s[h] / 2.0]), s[h+1:]))
else:
h = l / 2 + 1
return upsample * np.concatenate(
(s[:h], np.zeros(l * (upsample - 1)), s[h:]))
| 25,519
|
def test_eager_fill():
"""
Test Fill op is callable
"""
fill_op = data_trans.Fill(3)
expected = np.array([3, 3, 3, 3])
assert np.array_equal(fill_op([4, 5, 6, 7]), expected)
| 25,520
|
def get_cryptowatch_exchanges():
"""
-> cryptowat.ch
for information purposes
:return:
"""
json_files = glob.glob("../input/currency_info/" + "*.json")
currency_at_exchange = {}
for file in json_files:
with open(file, 'r') as fin:
data = json.load(fin)
my_set = set()
for market in data['result']['markets']['base']:
my_set.add(market['exchange'])
currency_symbol = data['result']['symbol']
currency_at_exchange[currency_symbol] = [el for el in my_set]
save_as_json(output=currency_at_exchange, as_array=False, filename="../input/currency_at_exchange.json")
| 25,521
|
def test_parse_06():
"""Parse key with no values."""
hexainput = [':::key1', 'value1', 'value2', ':::key2']
expected = [{'key1': ['value1', 'value2'], 'key2': []}]
result = hexaparse(hexainput)
assert result == expected
| 25,522
|
def write_pc(filename, xyz, rgb=None):
"""
write into a ply file
ref.:https://github.com/loicland/superpoint_graph/blob/ssp%2Bspg/partition/provider.py
"""
if rgb is None:
# len(xyz[0]): for a xyz list, I don't use `.shape`.
rgb = np.full((len(xyz), 3), 255, dtype=np.int32)
if not isinstance(xyz, (np.ndarray, np.generic)):
xyz = np.array(xyz, np.float32)
prop = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertex_all = np.empty(len(xyz), dtype=prop)
for i_prop in range(0, 3):
vertex_all[prop[i_prop][0]] = xyz[:, i_prop]
for i_prop in range(0, 3):
vertex_all[prop[i_prop+3][0]] = rgb[:, i_prop]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=True)
ply.write(filename)
| 25,523
|
def streaming_parsing_covering(groundtruth_categories,
groundtruth_instances,
predicted_categories,
predicted_instances,
num_classes,
max_instances_per_category,
ignored_label,
offset,
normalize_by_image_size=True,
name=None):
"""Aggregates the covering across calls with different input tensors.
See tf.metrics.* functions for comparable functionality and usage.
Args:
groundtruth_categories: A 2D uint16 tensor of groundtruth category labels.
groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels.
predicted_categories: A 2D uint16 tensor of predicted category labels.
predicted_instances: A 2D uint16 tensor of predicted instance labels.
num_classes: Number of classes in the dataset as an integer.
max_instances_per_category: The maximum number of instances for each class
as an integer or integer tensor.
ignored_label: The class id to be ignored in evaluation as an integer or
integer tensor.
offset: The maximum number of unique labels as an integer or integer tensor.
normalize_by_image_size: Whether to normalize groundtruth region areas by
image size. If True, groundtruth instance areas and weighted IoUs will be
divided by the size of the corresponding image before accumulated across
the dataset.
name: An optional variable_scope name.
Returns:
coverings: A tensor of shape `[3, num_classes]`, where (1) per class
coverings, (2) per class sum of weighted IoUs, and (3) per class sum of
groundtruth region areas are saved in the perspective rows.
update_ops: List of operations that update the running overall parsing
covering.
Raises:
RuntimeError: If eager execution is enabled.
"""
if tf.executing_eagerly():
raise RuntimeError('Cannot aggregate when eager execution is enabled.')
input_args = [
tf.convert_to_tensor(groundtruth_categories, tf.uint16),
tf.convert_to_tensor(groundtruth_instances, tf.uint16),
tf.convert_to_tensor(predicted_categories, tf.uint16),
tf.convert_to_tensor(predicted_instances, tf.uint16),
tf.convert_to_tensor(num_classes, tf.int32),
tf.convert_to_tensor(max_instances_per_category, tf.int32),
tf.convert_to_tensor(ignored_label, tf.int32),
tf.convert_to_tensor(offset, tf.int32),
tf.convert_to_tensor(normalize_by_image_size, tf.bool),
]
return_types = [
tf.float64,
tf.float64,
]
with tf.variable_scope(name, 'streaming_parsing_covering', input_args):
covering_results = tf.py_func(
_parsing_covering_helper, input_args, return_types, stateful=False)
weighted_iou_per_class, gt_area_per_class = tuple(covering_results)
total_weighted_iou_per_class, updated_weighted_iou_per_class = (
_running_total(
weighted_iou_per_class, [num_classes],
name='weighted_iou_per_class_total'))
total_gt_area_per_class, updated_gt_area_per_class = _running_total(
gt_area_per_class, [num_classes], name='gt_area_per_class_total')
covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class,
total_gt_area_per_class)
coverings = tf.stack([
covering_per_class,
total_weighted_iou_per_class,
total_gt_area_per_class,
],
axis=0)
update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class]
return coverings, update_ops
| 25,524
|
def get_rxn_lookup(medObj:Union[m.Medication, m.LocalMed, m.NDC]):
"""
DEPRECATED
Lookup RxCUI for codes from a different source
:param medObj:
:return:
"""
if isinstance(medObj, m.RxCUI):
smores_error('TBD')
return 0, []
success_count, errors = 0, []
non_rxc_dict = medObj.get_cui_all(omit=['PARENT', 'RXNORM'], inc_obj=True)
_e = {}
if len(non_rxc_dict) > 0:
for src in non_rxc_dict:
_src_e = []
_src_s = 0
for medC, medO in non_rxc_dict[src].items():
rxc_l = medO.get_linked_cui('RXNORM')
for _o in rxc_l:
if _o is None:
_src_e.append(medC)
else:
_src_s += 1
medObj.add_cui(_o)
success_count += 1 if _src_s > 0 else 0
if len(_src_e) > 0:
_e[src] = _src_e
if len(_e) > 0:
errors = _e
return success_count, errors
| 25,525
|
def log(ctx: ContextObject, number: int, since: str, until: str, author: str, revision_range: str, paths: Tuple[click.Path]) -> None:
"""Show commit log.
REVISION_RANGE optional branch, tag or hash to start viewing log from. If of the form <hash>..<hash> only show log
for given range
PATHS optional list of paths. If given, only show commits which affected the given paths
"""
if not revision_range:
start = ctx.nessie.get_default_branch()
end = None
else:
if ".." in revision_range:
start, end = revision_range.split("..")
else:
start = revision_range
end = None
log_result = show_log(ctx.nessie, start, number, since, until, author, end, paths)
if ctx.json:
click.echo(CommitMetaSchema().dumps(log_result, many=True))
else:
click.echo_via_pager(_format_log_result(x) for x in log_result)
| 25,526
|
def identity(dims):
"""
Create an identity linear operator
:param dims: array of dimensions
"""
dims = expand_dims(dims)
return identity_create(dims)
| 25,527
|
def _gen_samples_2d(enn_sampler: testbed_base.EpistemicSampler,
x: chex.Array,
num_samples: int,
categorical: bool = False) -> pd.DataFrame:
"""Generate posterior samples at x (not implemented for all posterior)."""
# Generate the samples
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for seed in range(num_samples):
net_out = enn_sampler(x, next(rng))
y = jax.nn.softmax(net_out)[:, 1] if categorical else net_out[:, 0]
df = pd.DataFrame({'x0': x[:, 0], 'x1': x[:, 1], 'y': y, 'seed': seed})
data.append(df)
return pd.concat(data)
| 25,528
|
def rules(r_index, c_index, lives, some_board, duplicate_board):
"""Apply Conway's Rules to a board
Args:
r_index (int): Current row index
c_index (int): Current column index
lives (int): Number of ALIVE cells around current position
some_board (List of lists of strings): Board used to determine rule
duplicate_board (List of lists of strings): Board used to apply rule
Returns:
[List of lists of strings]: Board used to apply rule (modified board)
"""
if some_board[r_index][c_index] == ALIVE:
if lives < 2 or lives > 3:
duplicate_board[r_index][c_index] = DEAD
else:
if lives == 3:
duplicate_board[r_index][c_index] = ALIVE
return duplicate_board
| 25,529
|
def readfq(filehandle):
"""Fastq iterator.
Args:
filehandle (file): open file handle
Yields:
Fastq
"""
fqclean = (x.strip("\r\n") for x in filehandle if x.strip())
while True:
rd = [x for x in islice(fqclean, 4)]
if not rd:
raise StopIteration
assert all(rd) and len(rd) == 4
yield Fastq(rd)
| 25,530
|
async def test_form_import(hass):
"""Test we get the form with import source."""
await setup.async_setup_component(hass, "persistent_notification", {})
harmonyapi = _get_mock_harmonyapi(connect=True)
with patch(
"homeassistant.components.harmony.util.HarmonyAPI", return_value=harmonyapi,
), patch(
"homeassistant.components.harmony.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.harmony.async_setup_entry", return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
"host": "1.2.3.4",
"name": "friend",
"activity": "Watch TV",
"delay_secs": 0.9,
"activity_notify": True,
"unique_id": "555234534543",
},
)
assert result["result"].unique_id == "555234534543"
assert result["type"] == "create_entry"
assert result["title"] == "friend"
assert result["data"] == {
"host": "1.2.3.4",
"name": "friend",
"activity": "Watch TV",
"delay_secs": 0.9,
"activity_notify": True,
}
# It is not possible to import options at this time
# so they end up in the config entry data and are
# used a fallback when they are not in options
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| 25,531
|
def get_describe_tasks(cluster_name, tasks_arns):
"""Get information about a list of tasks."""
return (
ecs_client()
.describe_tasks(cluster=cluster_name, tasks=tasks_arns)
.get("tasks", [])
)
| 25,532
|
def train(log_path, radar_product, eval_increment=5,
num_iterations=2500, checkpoint_frequency=100, lr=.0001,
model_name=utils.ML_Model.Shallow_CNN, dual_pol=True,
high_memory_mode=False, num_temporal_data=0):
""""Train the shallow CNN model on a single radar product.
Args:
log_path: The location of the save directory. The model checkpoints,
model weights, and the tensorboard events are all saved in this
directory.
radar_product: The radar product the model is training on. This should
be a value of type utils.Radar_Products.
eval_increment: How frequently the model prints checks validation result
num_iterations: The number of training iterations the model will run.
checkpoint_frequency: How many training iterations should the model
perform before saving out a checkpoint of the model training.
lr: The learning rate of the model, this value must be between 0 and 1.
e.g. .1, .05, .001
model_name: Select the model to train. Must be of type utils.ML_Model
dual_pol: True if data training on dual polarization radar data, false
when training on legacy data.
high_memory_mode: True if training in high memory mode. High memory
mode reduces the amount of IO operations by keeping all the data in
memory during trainig. Not recommended for computes with fewer than
8 GB of memory.
"""
save_file = ml_utils.KERAS_SAVE_FILE.format(radar_product.fullname, '{}')
checkpoint_path = log_path + ml_utils.CHECKPOINT_DIR
if not os.path.exists(checkpoint_path):
os.makedirs(os.path.dirname(checkpoint_path))
if model_name == utils.ML_Model.Shallow_CNN:
batch_generator = BatchGenerator.Single_Product_Batch_Generator(
ml_label_csv=settings.LABEL_CSV,
ml_split_csv=settings.ML_SPLITS_DATA,
high_memory_mode=high_memory_mode)
model = keras_model.build_model(inputDimensions=(240, 240, 1), lr=lr)
elif model_name == utils.ML_Model.Shallow_CNN_All:
batch_generator = BatchGenerator.Multiple_Product_Batch_Generator(
ml_label_csv=settings.LABEL_CSV,
ml_split_csv=settings.ML_SPLITS_DATA,
high_memory_mode=high_memory_mode)
model = keras_model.build_model(inputDimensions=(240, 240, 4), lr=lr)
else:
batch_generator = BatchGenerator.Temporal_Batch_Generator(
ml_label_csv=settings.LABEL_CSV,
ml_split_csv=settings.ML_SPLITS_DATA,
high_memory_mode=False)
model = keras_model.build_model(
inputDimensions=(240, 240, num_temporal_data * 2 + 1), lr=lr)
# Setup callbacks
callback = TensorBoard(log_path)
callback.set_model(model)
train_names = ['train_loss', 'train_accuracy']
val_names = ['val_loss', 'val_accuracy']
progress_string = '{} Epoch: {} Loss: {} Accuracy {}'
for batch_no in range(num_iterations):
try:
x, y, _ = batch_generator.get_batch(
ml_set=utils.ML_Set.training,
dualPol=dual_pol,
radar_product=radar_product,
num_temporal_data=num_temporal_data)
train_logs = model.train_on_batch(x, y)
print progress_string.format(utils.ML_Set.training.fullname,
batch_no,
train_logs[0], train_logs[1])
ml_utils.write_log(callback, train_names, train_logs, batch_no)
except Exception as e:
print e.message
if (batch_no % eval_increment == 0):
model.save_weights(log_path + save_file.format(''))
try:
x_, y_, _ = batch_generator.get_batch(
ml_set=utils.ML_Set.validation,
dualPol=dual_pol,
radar_product=radar_product,
num_temporal_data=num_temporal_data)
val_logs = model.test_on_batch(x_, y_)
ml_utils.write_log(callback, val_names, val_logs, batch_no)
print progress_string.format(utils.ML_Set.validation.fullname,
batch_no,
val_logs[0], val_logs[1])
except Exception as e:
print e.message
if batch_no % checkpoint_frequency == 0 \
or batch_no == num_iterations - 1:
model.save_weights(
os.path.join(checkpoint_path, save_file.format(batch_no)))
model.save_weights(save_file)
| 25,533
|
def make_bcc110(latconst=1.0):
"""
Make a cell of bcc structure with z along [110].
"""
s= NAPSystem(specorder=_default_specorder)
#...lattice
a1= np.array([ 1.0, 0.0, 0.0 ])
a2= np.array([ 0.0, 1.414, 0.0 ])
a3= np.array([ 0.0, 0.0, 1.414 ])
s.set_lattice(latconst,a1,a2,a3)
symbol = _default_specorder[0]
symbols = [ symbol, symbol, symbol, symbol]
poss = [[0.00, 0.00, 0.00],
[0.00, 0.50, 0.50],
[0.50, 0.50, 0.00],
[0.50, 0.00, 0.50]]
vels = [ [0., 0., 0.] for i in range(4) ]
frcs = [ [0., 0., 0.] for i in range(4) ]
s.add_atoms(symbols, poss, vels, frcs)
return s
| 25,534
|
def test_config(ctx):
"""
Test that you have properly filled in the necessary
aws fields, your boto install is working correctly, your s3 bucket is
readable and writable (also by your indicated role), etc.
"""
client = boto3.client("sts")
account_id = client.get_caller_identity()["Account"]
print("The accountID is ", account_id)
# make sure the bucket exists
#config = pywren.wrenconfig.default()
| 25,535
|
def mode_mods_to_int(mode: str) -> int:
"""Converts mode_mods (str) to mode_mods (int)."""
# NOTE: This is a temporary function to convert the leaderboard mode to an int.
# It will be removed when the site is fully converted to use the new
# stats table.
for mode_num, mode_str in enumerate((
'vn_std', 'vn_taiko', 'vn_catch', 'vn_mania',
'rx_std', 'rx_taiko', 'rx_catch',
'ap_std'
)):
if mode == mode_str:
return mode_num
else:
return 0
| 25,536
|
def sender(sim, cable):
"""A process which randomly generates messages."""
while True:
# wait for next transmission
sim.sleep(5)
cable.put('Sender sent this at %d' % sim.now)
| 25,537
|
def ntu_tranform_skeleton(test):
"""
:param test: frames of skeleton within a video sample
"""
remove_frame = False
test = np.asarray(test)
transform_test = []
d = test[0, 0:3]
v1 = test[0, 1 * 3:1 * 3 + 3] - test[0, 0 * 3:0 * 3 + 3]
v1 = v1 / np.linalg.norm(v1)
v2_ = test[0, 12 * 3:12 * 3 + 3] - test[0, 16 * 3:16 * 3 + 3]
if np.equal(np.sum(v2_), 0):
v2_ += 1e-6
proj_v2_v1 = np.dot(v1.T, v2_) * v1 / np.linalg.norm(v1)
v2 = v2_ - np.squeeze(proj_v2_v1)
v2 = v2 / np.linalg.norm(v2)
v3 = np.cross(v2, v1) / np.linalg.norm(np.cross(v2, v1))
v1 = np.reshape(v1, (3, 1))
v2 = np.reshape(v2, (3, 1))
v3 = np.reshape(v3, (3, 1))
R = np.hstack([v2, v3, v1])
for i in range(test.shape[0]):
xyzs = []
for j in range(25):
if test[i][j * 3:j * 3 + 3].all() == 0:
remove_frame = True
break
xyz = np.squeeze(np.matmul(np.linalg.inv(R), np.reshape(test[i][j * 3:j * 3 + 3] - d, (3, 1))))
xyzs.append(xyz)
if not remove_frame:
xyzs = np.reshape(np.asarray(xyzs), (-1, 75))
transform_test.append(xyzs)
else:
remove_frame = False
transform_test = np.squeeze(np.asarray(transform_test))
return transform_test.tolist()
| 25,538
|
def request_to_dataframe(UF):
"""Recebe string do estado, retona DataFrame com faixa de CEP do estado"""
#Try to load the proxy list. If after several attempts it still doesn't work, raise an exception and quit.
proxy_pool = proxy_list_to_cycle()
#Set initial values for post request's parameters.
pagini = 1
pagfim = 50
count = 1
while True:
#random sleep times to decrease the chances of being blocked.
num1 = random.randint(2,5)
time.sleep(num1)
try:
#select_proxy from proxy pool.
proxy = next(proxy_pool)
print(f"Proxy atual: {proxy}")
#Define o post Field de acordo com a página Atual. Para a primeira página os campos "Bairro", "qtdrow", "pagini", "pagfim" não são considerados.
if count == 1:
post_fields = {"UF":UF, "Localidade":""}
full_dataframe = pd.DataFrame()
else:
post_fields = {"UF": UF, "Localidade":"**", "Bairro":"", "qtdrow":"50", "pagini":str(pagini),"pagfim": str(pagfim)}
#Makes the post request
request = make_post_request(post_fields, proxy)
#Extrai tabela com as faixas de CEP do HTML. Se estivermos na primeira página, o conteúdo se encontra no primeiro index do page content, caso o contrário, se encontra no próximo index.
if count == 1:
UF_table = request_text_to_table(request = request, page_content_index = 1)
else:
UF_table = request_text_to_table(request = request, page_content_index = 0)
except requests.exceptions.ProxyError:
print("")
print(f"Error with the proxy: {proxy}")
print(f"Proxies left: {proxy_pool}")
print("Tentando novamente")
print("")
continue
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as err:
print("")
print('Servidor demorando muito')
print("Tentando novamente")
print("")
continue
except Exception as e:
print("")
print(e)
proxy_pool = proxy_list_to_cycle()
continue
#Turning the table into a dataframe.
current_page_df = table_to_df(UF_table)
#Concat DataFrames for each page into one DataFrame
full_dataframe = pd.concat([full_dataframe, current_page_df])
print(f"Total de dados coletados sobre o Estado {UF}: {full_dataframe.shape[0]} ")
#Sair do loop de post requests para o estado atual se chegamos na última página.
if current_page_df.shape[0] < 49:
print(f"Última página do estado:{UF}")
break
#Incrementa o número da página e o contador de página.
pagini += 50
pagfim += 50
count = count + 1
return full_dataframe
| 25,539
|
def read_sto_mot_file(filename):
"""
Read sto or mot file from Opensim
----------
filename: path
Path of the file witch have to be read
Returns
-------
Data Dictionary with file informations
"""
data = {}
data_row = []
first_line = ()
end_header = False
with open(f"{filename}", "rt") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if len(row) == 0:
pass
elif row[0][:9] == "endheader":
end_header = True
first_line = idx + 1
elif end_header is True and row[0][:9] != "endheader":
row_list = row[0].split("\t")
if idx == first_line:
names = row_list
else:
data_row.append(row_list)
for r in range(len(data_row)):
for col in range(len(names)):
if r == 0:
data[f"{names[col]}"] = [float(data_row[r][col])]
else:
data[f"{names[col]}"].append(float(data_row[r][col]))
return data
| 25,540
|
def verify_cef_labels(device, route, expected_first_label, expected_last_label=None, max_time=90,
check_interval=10):
""" Verify first and last label on route
Args:
device ('obj'): Device object
route ('str'): Route address
expected_first_label ('str'): Expected first label
expected_last_label ('str'): Expected last label
max_time ('int'): Max time in seconds checking output
check_interval ('int'): Interval in seconds of each checking
Return:
True/False
Raises:
None
"""
reqs = R(
[
'vrf',
'(.*)',
'address_family',
'(.*)',
'prefix',
'(.*{}.*)'.format(route),
'nexthop',
'(.*)',
'outgoing_interface',
'(.*)',
'(?P<val>.*)'
]
)
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
result = True
out = None
try:
out = device.parse('show ip cef {}'.format(route))
except SchemaEmptyParserError:
out = None
if not out:
result = False
log.info('Could not get information about show ip cef {}'.format(route))
timeout.sleep()
continue
found = find([out], reqs, filter_=False, all_keys=True)
if found:
keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},
source=found)
for item in keys:
first_label = item.get('val',{}).get('outgoing_label', None)
if first_label and str(expected_first_label) not in str(first_label):
result = False
if expected_last_label:
sid = item.get('val',{}).get('sid', None)
if str(expected_last_label) != str(sid):
result = False
if result:
return True
timeout.sleep()
return False
| 25,541
|
def fcat(*fs):
"""Concatenate a sequence of farrays.
The variadic *fs* input is a homogeneous sequence of functions or arrays.
"""
items = list()
for f in fs:
if isinstance(f, boolfunc.Function):
items.append(f)
elif isinstance(f, farray):
items.extend(f.flat)
else:
raise TypeError("expected Function or farray")
return farray(items)
| 25,542
|
def plot_2images(phi1, phi2):
"""Plot two images from MNIST
Args:
phi1: MNIST image
phi2: MNIST image
Returns:
plot of the images
"""
if phi1.shape == (28,28):
f, axarr = plt.subplots(2)
axarr[0].imshow(np.reshape(phi1,(28,28)), cmap='Greys', vmin=-1, vmax=1, extent=[0,100,0,1], aspect=100)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[1].imshow(np.reshape(phi2,(28,28)), cmap='Greys', vmin=-1, vmax=1, extent=[0,100,0,1], aspect=100)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
elif phi1.shape == (784,):
f, axarr = plt.subplots(1,2)
axarr[0].imshow(np.reshape(phi1,(28,28)), cmap='Greys', vmin=-1, vmax=1, extent=[0,100,0,1], aspect=100)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[1].imshow(np.reshape(phi2,(28,28)), cmap='Greys', vmin=-1, vmax=1, extent=[0,100,0,1], aspect=100)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
| 25,543
|
def get_path_to_spix(
name: str,
data_directory: str,
thermal: bool,
error: bool = False,
file_ending: str = "_6as.fits",
) -> str:
"""Get the path to the spectral index
Args:
name (str): Name of the galaxy
data_directory (str): dr2 data directory
thermal (bool): non thermal data
error (bool): path to error
file_ending (str, optional): File ending. Defaults to ".fits".
Returns:
str: [description]
"""
return f"{data_directory}/magnetic/{name}/{name}_spix{'_non_thermal' if thermal else ''}{'_error' if error else ''}{file_ending}"
| 25,544
|
def hexpos (nfibres,diam) :
"""
Returns a list of [x,y] positions for a classic packed hex IFU configuration.
"""
positions = [[np.nan,np.nan] for i in range(nfibres)]
# FIND HEX SIDE LENGTH
nhex = 1
lhex = 1
while nhex < nfibres :
lhex += 1
nhex = 3*lhex**2-3*lhex+1
if nhex != nfibres:
lhex -= 1
nhex = 3*lhex**2-3*lhex+1
nextra = nfibres-nhex
n = 0
khex = 2*lhex-1 # NUMBER OF FIBRES IN THE CENTRAL ROW
xhex = (-khex//2)*diam
for i in range(khex) : # CENTRAL ROW
x = xhex+diam*i
positions[n] = [int(x*100)/100,0.]
n += 1
dx = 0.5*diam
dy = diam*np.sqrt(3./4.)
for i in range(1,lhex,1) : # FOR ALL ROWS PAIRS i
khex -= 1 # EACH ROW HAS 1 LESS THAN THE PREVIOUS
xhex += dx
for j in range(khex) : # FOR ALL FIBRES j IN ROWS i
x = xhex+diam*j
y = dy*i
positions[n] = [int(x*100)/100, int(y*100)/100]
positions[n+1] = [int(x*100)/100,-int(y*100)/100]
n += 2
return positions
| 25,545
|
def parse_object_properties(html):
"""
Extract key-value pairs from the HTML markup.
"""
if isinstance(html, bytes):
html = html.decode('utf-8')
page = BeautifulSoup(html, "html5lib")
propery_ps = page.find_all('p', {'class': "list-group-item-text"})
obj_props_dict = {}
for p in propery_ps:
if 'data-name' in p.attrs:
key = p.attrs['data-name']
value = p.get_text().strip()
obj_props_dict[key] = value
return obj_props_dict
| 25,546
|
def send_email(subject, sender, recipients, text_body, html_body):
"""Send an email.
Args:
subject: subject
sender: sender
recipients: recipients
text_body: text body
html_body: html body
"""
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(
target=send_async_email,
args=(current_app._get_current_object(), msg),
).start()
| 25,547
|
def test_warped_vrt_add_alpha(dsrec, path_rgb_byte_tif):
"""A VirtualVRT has the expected VRT properties."""
with rasterio.Env() as env:
with rasterio.open(path_rgb_byte_tif) as src:
vrt = WarpedVRT(src, crs=DST_CRS, add_alpha=True)
records = dsrec(env)
assert len(records) == 1
assert "2 N GTiff" in records[0]
assert vrt.dst_crs == CRS.from_string(DST_CRS)
assert vrt.src_nodata == 0.0
assert vrt.dst_nodata is None
assert vrt.tolerance == 0.125
assert vrt.resampling == Resampling.nearest
assert vrt.warp_extras == {"init_dest": "NO_DATA"}
assert vrt.count == 4
assert vrt.mask_flag_enums == (
[MaskFlags.per_dataset, MaskFlags.alpha],
) * 3 + (
[MaskFlags.all_valid],
)
records = dsrec(env)
assert len(records) == 1
assert "1 N GTiff" in records[0]
| 25,548
|
async def test_reconcile_patch_failed(clean_booked_grace_time_mock, report_mock, respx_mock):
"""
Check that when patch to /license/reconcile response status_code is not 200, should raise exception.
"""
respx_mock.patch("/lm/api/v1/license/reconcile").mock(
return_value=Response(
status_code=400,
)
)
report_mock.return_value = [{"foo": "bar"}]
with pytest.raises(LicenseManagerBackendConnectionError):
await reconcile()
clean_booked_grace_time_mock.assert_awaited_once()
| 25,549
|
def rgb2hex(r, g, b, normalised=False):
"""Convert RGB to hexadecimal color
:param: can be a tuple/list/set of 3 values (R,G,B)
:return: a hex vesion ofthe RGB 3-tuple
.. doctest::
>>> from colormap.colors import rgb2hex
>>> rgb2hex(0,0,255, normalised=False)
'#0000FF'
>>> rgb2hex(0,0,1, normalised=True)
'#0000FF'
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
if normalised:
r, g, b = _denormalise(r, g, b, mode="rgb")
r = int(r)
g = int(g)
b = int(b)
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
return '#%02X%02X%02X' % (r, g, b)
| 25,550
|
def test_hookrelay_registry(pm):
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api:
@hookspec
def hello(self, arg):
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin:
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == []
| 25,551
|
def flac_stream_file(filename: str, frames_to_read: int = 1024,
seek_frame: int = 0) -> Generator[array.array, None, None]:
"""Streams the flac audio file as interleaved 16 bit signed integer sample arrays segments.
This uses a fixed chunk size and cannot be used as a generic miniaudio decoder input stream.
Consider using stream_file() instead."""
filenamebytes = _get_filename_bytes(filename)
flac = lib.drflac_open_file(filenamebytes, ffi.NULL)
if not flac:
raise DecodeError("could not open/decode file")
if seek_frame > 0:
result = lib.drflac_seek_to_pcm_frame(flac, seek_frame)
if result <= 0:
raise DecodeError("can't seek")
try:
with ffi.new("drflac_int16[]", frames_to_read * flac.channels) as decodebuffer:
buf_ptr = ffi.cast("drflac_int16 *", decodebuffer)
while True:
num_samples = lib.drflac_read_pcm_frames_s16(flac, frames_to_read, buf_ptr)
if num_samples <= 0:
break
buffer = ffi.buffer(decodebuffer, num_samples * 2 * flac.channels)
samples = _create_int_array(2)
samples.frombytes(buffer)
yield samples
finally:
lib.drflac_close(flac)
| 25,552
|
def main():
"""Run the main job."""
cliffs = ee.FeatureCollection(definitions.EE_CLIFF_FOOTPRINTS)
cliffs = cliffs.map(get_data)
# export results to drive for local download
task1 = ee.batch.Export.table.toDrive(
collection=cliffs,
description='exporting cliff data to drive',
fileFormat='CSV',
folder='earth-engine',
fileNamePrefix='cliff_data'
)
# export results to ee asset
task2 = ee.batch.Export.table.toAsset(
collection=cliffs,
description='exporting cliff data as asset',
assetId=definitions.EE_CLIFFS
)
# call ee.batch.Task.list() to see current status of exports
task1.start()
task2.start()
| 25,553
|
def creates_user(page_users, new_user) -> None:
"""I create a new user."""
page_users.set_user(new_user)
# Fill the fields
p_action = FillUserAction(_page=page_users)
p_action.fill_name() \
.fill_password() \
.confirm_password()
del p_action
# Create
p_action = CreateUserAction(_page=page_users)
p_action.click()
| 25,554
|
def model_choices_from_protobuf_enum(protobuf_enum):
"""Protobufs Enum "items" is the opposite order djagno requires"""
return [(x[1], x[0]) for x in protobuf_enum.items()]
| 25,555
|
def load_boxes_and_labels(cfg, mode):
"""
Loading boxes and labels from csv files.
Args:
cfg (CfgNode): config.
mode (str): 'train', 'val', or 'test' mode.
Returns:
all_boxes (dict): a dict which maps from `video_name` and
`frame_sec` to a list of `box`. Each `box` is a
[`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
"""
gt_lists = cfg.AVA.TRAIN_GT_BOX_LISTS if mode == "train" else []
pred_lists = (
cfg.AVA.TRAIN_PREDICT_BOX_LISTS
if mode == "train"
else cfg.AVA.TEST_PREDICT_BOX_LISTS
)
ann_filenames = [
os.path.join(cfg.AVA.ANNOTATION_DIR, filename)
for filename in gt_lists + pred_lists
]
ann_is_gt_box = [True] * len(gt_lists) + [False] * len(pred_lists)
detect_thresh = cfg.AVA.DETECTION_SCORE_THRESH
all_boxes = {}
count = 0
unique_box_count = 0
for filename, is_gt_box in zip(ann_filenames, ann_is_gt_box):
with PathManager.open(filename, "r") as f:
for line in f:
row = line.strip().split(",")
# When we use predicted boxes to train/eval, we need to
# ignore the boxes whose scores are below the threshold.
if not is_gt_box:
score = float(row[7])
if score < detect_thresh:
continue
video_name, frame_sec = row[0], int(row[1])
# Only select frame_sec % 4 = 0 samples for validation if not
# set FULL_TEST_ON_VAL.
if (
mode == "val"
and not cfg.AVA.FULL_TEST_ON_VAL
and frame_sec % 4 != 0
):
continue
# Box with format [x1, y1, x2, y2] with a range of [0, 1] as float.
box_key = ",".join(row[2:6])
box = list(map(float, row[2:6]))
label = -1 if row[6] == "" else int(row[6])
if video_name not in all_boxes:
all_boxes[video_name] = {}
for sec in AVA_VALID_FRAMES:
all_boxes[video_name][sec] = {}
if box_key not in all_boxes[video_name][frame_sec]:
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
# Save in format of a list of [box_i, box_i_labels].
all_boxes[video_name][frame_sec] = list(
all_boxes[video_name][frame_sec].values()
)
logger.info(
"Finished loading annotations from: %s" % ", ".join(ann_filenames)
)
logger.info("Detection threshold: {}".format(detect_thresh))
logger.info("Number of unique boxes: %d" % unique_box_count)
logger.info("Number of annotations: %d" % count)
return all_boxes
| 25,556
|
def test_registrationform_userprofile_disable_csrf(app_with_userprofiles_csrf,
form_test_data):
"""App with CSRF enabled and UserProfile, test if reg. form removes it."""
remote_apps = current_oauthclient.oauth.remote_apps
first_remote_app = list(remote_apps.values())[0]
filled_form = _fill_form(app_with_userprofiles_csrf,
create_csrf_disabled_registrationform,
form_test_data,
remote=first_remote_app)
assert 'profile' in filled_form
assert 'csrf_token' not in filled_form.profile
_assert_no_csrf_token(filled_form)
| 25,557
|
def kmeans(boxes, k):
"""
Group into k clusters the BB in boxes.
http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans
:param boxes: The BB in format Nx4 where (x1,y1,x2,y2)
:param k: the number of clusters.
:return: k clusters with the element indexes of each clusters.
"""
model = KMeans(n_clusters=k).fit(boxes)
pred = model.labels_
indexes = [[]] * k
for i, v in enumerate(pred):
indexes[v] = indexes[v] + [i]
return indexes
| 25,558
|
def observation_min_max_in_hex_grid_json(request: HttpRequest):
"""Return the min, max observations count per hexagon, according to the zoom level. JSON format.
This can be useful to dynamically color the grid according to the count
"""
zoom = extract_int_request(request, "zoom")
species_ids, datasets_ids, start_date, end_date, area_ids = filters_from_request(
request
)
sql_template = readable_string(
Template(
"""
WITH grid AS ($jinjasql_fragment_aggregated_grid)
SELECT MIN(count), MAX(count) FROM grid;
"""
).substitute(
jinjasql_fragment_aggregated_grid=JINJASQL_FRAGMENT_AGGREGATED_GRID
)
)
sql_params = {
"hex_size_meters": ZOOM_TO_HEX_SIZE[zoom],
"grid_extent_viewport": False,
"species_ids": species_ids,
"datasets_ids": datasets_ids,
"area_ids": area_ids,
}
if start_date:
sql_params["start_date"] = start_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
if end_date:
sql_params["end_date"] = end_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
j = JinjaSql()
query, bind_params = j.prepare_query(sql_template, sql_params)
with connection.cursor() as cursor:
cursor.execute(query, bind_params)
r = cursor.fetchone()
return JsonResponse({"min": r[0], "max": r[1]})
| 25,559
|
def is_path(value, default="", expand=None):
"""Parse a value as a path
Parameters
----------
expand:
expandvars and expandhome on loaded path
**Warning: expand currently can't work with interpolation**
"""
# TODO: fix interpolation and expand !
if str(value) == "None":
return None
if value == "":
value = default
if expand and isinstance(value, str):
return os.path.expandvars(os.path.expanduser(value))
return value
| 25,560
|
def find_suites():
"""
Return a dict of suitename and path, e.g.
{"heat_equation": /home/safl/bechpress/suites/cpu/heat_equation.py"}
"""
p = subprocess.Popen(
["bp-info", "--suites"],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, err = p.communicate()
suitesdir = out.strip()
if err:
raise Exception("Error when trying to find suites-dir.")
suites = {}
for root, dirs, files in os.walk(suitesdir):
for filename in files:
if "__init__" in filename:
continue
if not filename.endswith(".py"):
continue
suitepath = os.sep.join([root, filename])
suitename = os.path.splitext(filename)[0]
suites[suitename] = suitepath
return (suitesdir, suites)
| 25,561
|
def test_tag_retention_without_tag(event, glsc):
"""tag retention should be null if tags is null"""
choices = get_event_model_form_choices(event)
event_dict = create_event_dict(event)
event_dict["fish_tags"] = None
event_dict["tag_ret"] = 50
form = StockingEventForm(event_dict, choices=choices, user=glsc)
status = form.is_valid()
assert status is False
msg = "At least one Fish Tag Type must be selected if Tag Retention is provided."
assert msg in form.errors["__all__"]
| 25,562
|
def parseHtml(html):
"""
BeautifulSoup でパースする
Parameters
----------
html : str
HTML ソース文字列
Returns
-------
soup : BeautifulSoup
BeautifulSoup オブジェクト
"""
soup = BeautifulSoup(html, 'html.parser')
return soup
| 25,563
|
def test_str():
"""test Parameter.__str__()"""
node = Parameter(wraptext("1"), wraptext("foo"), showkey=False)
assert "foo" == str(node)
node2 = Parameter(wraptext("foo"), wraptext("bar"))
assert "foo=bar" == str(node2)
| 25,564
|
def am_score(probs_data, probs_gen):
"""
Calculate AM Score
"""
mean_data = np.mean(probs_data, axis=0)
mean_gen = np.mean(probs_gen, axis=0)
entropy_gen = np.mean(entropy(probs_gen, axis=1))
am_score = entropy(mean_data, mean_gen) + entropy_gen
return am_score
| 25,565
|
def load_image(path_image, size=None, bgr_mean=[103.939, 116.779, 123.68]):
"""
Loads and pre-process the image for SalGAN model.
args:
path_image: abs path to image
size: size to input to the network (it not specified, uses SalGAN predifined)
bgr_mean: mean values (BGR) to extract from images
returns:
torch tensor with processed image
original size of the image
"""
# image = cv2.imread(path_image)
image = cv2.imread(path_image) # BGR format
H, W, C = image.shape
if size is None:
size = SALGAN_RESIZE
image = cv2.resize(image, (size[1], size[0]), interpolation=cv2.INTER_AREA)
image = image.astype(np.float32)
bgr_mean=np.array(bgr_mean)
image -= bgr_mean
# convert to torch Tensor
image = torch.FloatTensor(image)
# swap channel dimensions
image = image.permute(2,0,1)
return image, (H, W)
| 25,566
|
def GetWsdlNamespace(version):
""" Get wsdl namespace from version """
return "urn:" + serviceNsMap[version]
| 25,567
|
def is_PC(parcels):
"""
Dummy for Pinal County.
"""
return (parcels.county == 'PC').astype(int)
| 25,568
|
def test_create_default_instance_through_method():
""" This test creates the default instance with the default plugins, several times for profiling purposes """
parse_with_default_method2 = try_to_annotate_with_profile(parse_with_default_method)
# TODO one day use pytest benchmark instead
start = perf_counter()
for i in range(0,100):
parse_with_default_method2()
elapsed = perf_counter() - start
assert elapsed < 5
| 25,569
|
def check_password(password: str) -> int:
"""Use Have I Been Pwned to determine whether a password is bad.
If the request fails, this function will assume the password is fine, but
log an error so that administrators can diagnose it later.
:param password: The password to validate.
:return: A positive integer indicating the number of times the password has
been found in a breach. Zero is good, >0 is bad.
"""
sha1_hash = hashlib.sha1()
sha1_hash.update(password.encode("utf-8"))
digest = sha1_hash.hexdigest()
digest = digest.upper()
response = requests.get("https://api.pwnedpasswords.com/range/" + digest[0:5])
if response.status_code != 200:
# The docs say this shouldn't happen, but just in case.
return 0
return suffix_in_text(digest[5:], response.text)
| 25,570
|
def rqpos(A):
"""
RQ decomp. of A, with phase convention such that R has only positive
elements on the main diagonal.
If A is an MPS tensor (d, chiL, chiR), it is reshaped and
transposed appropriately
before the throughput begins. In that case, Q will be a tensor
of the same size, while R will be a chiL x chiL matrix.
"""
Ashp = A.shape
if len(Ashp) == 2:
return rqmat(A)
elif len(Ashp) != 3:
print("A had invalid dimensions, ", A.shape)
A = fuse_right(A) #chiL, d*chiR
R, Q = qrmat(A, mode="economic")
Q = unfuse_right(Q, Ashp)
return (Q, R)
| 25,571
|
def rate_text(chunk, rating):
"""
Update the given rating with what is found in the given text.
Returns None.
"""
for word in chunk.split():
word = ''.join(filter((lambda c: c.isalnum()), word))
rating['__total__'] += 1
for lang in COLLECTED_DIGESTED.get(word, []):
rating[lang] += 1
| 25,572
|
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
show_model_arch=True,
print_keys=True):
""" Note that official pre-trained models use `GroupNorm` in backbone.
"""
if not osp.isfile(filename):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('backbone.'):
state_dict = {}
for k, v in checkpoint['state_dict'].items():
new_k = k
if 'bbox_head.' in new_k:
if 'deconv_layers' in new_k:
new_k = new_k.replace("conv_offset_mask.", "conv_offset.")
new_k = new_k.replace("bbox_head.deconv_layers.", "neck.upsamples.")
if '.0.0.' in new_k:
new_k = new_k.replace(".0.0.", ".0.dcn.")
if '.0.1.' in new_k:
new_k = new_k.replace(".0.1.", ".0.dcn_bn.")
if '.1.0.' in new_k:
new_k = new_k.replace(".1.0.", ".1.dcn.")
if '.1.1.' in new_k:
new_k = new_k.replace(".1.1.", ".1.dcn_bn.")
if '.2.0.' in new_k:
new_k = new_k.replace(".2.0.", ".2.dcn.")
if '.2.1.' in new_k:
new_k = new_k.replace(".2.1.", ".2.dcn_bn.")
if '.shortcut_layers.' in new_k:
new_k = new_k.replace("bbox_head.shortcut_layers.", "neck.shortcuts.")
new_k = new_k.replace(".layers.", ".")
if '.hm.' in new_k:
new_k = new_k.replace(".hm.", ".ct_hm_head.")
if '.wh.' in new_k:
new_k = new_k.replace(".wh.", ".ct_wh_head.")
if print_keys:
print('> key = ', k, ' -> ', new_k)
state_dict[new_k] = v
if show_model_arch:
print('> model = ', model)
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint
| 25,573
|
def basic_image_2():
"""
A 10x10 array with a square (3x3) feature
Equivalent to results of rasterizing basic_geometry with all_touched=True.
Borrowed from rasterio/tests/conftest.py
Returns
-------
numpy ndarray
"""
image = np.zeros((20, 20), dtype=np.uint8)
image[2:5, 2:5] = 1
return image
| 25,574
|
def save_tiled_tsdf_comparison_image(out_path, good_case_sdfs, bad_case_sdfs, vertical_tile_count=4, padding_width=1,
scale=2):
"""
:param out_path: path (directory + filename) where to save the image
:param good_case_sdfs: a list of tuples in form (canonical_tsdf, live_tsdf, max_warp_coordinate)
:param bad_case_sdfs: a list of tuples in form (canonical_tsdf, live_tsdf, max_warp_coordinate)
:param vertical_tile_count:
:param padding_width: width between groups
:param scale: integer >= 1, factor for visualy scaling the tsdf fields up
:return:
"""
# Assumes all tiles are square and have equal size!
vertical_tile_count = vertical_tile_count
horizontal_tile_count = vertical_tile_count
group_border_width = 1 # drawn as lines, not rectangles, so currently cannot be changed from 1
tile_height = tile_width = good_case_sdfs[0][0].shape[0] * scale
group_width = tile_width * 2 + group_border_width * 3
group_height = tile_height + group_border_width * 2
canvas_height = ((vertical_tile_count + 1) * padding_width + vertical_tile_count * group_height)
canvas_width = ((horizontal_tile_count + 2) * padding_width + horizontal_tile_count * group_width)
canvas_dimensions = (canvas_height, canvas_width, 3)
canvas = np.zeros(canvas_dimensions, dtype=np.uint8)
group_border_color = (34, 240, 12)
# draws an regular arrangement of groups of tiles, 2 tiles horizontally in each group, one for canonical and one for
# live TSDF fields corresponding to the same case
def make_half(x_offset, sdfs):
i_case = 0
for group_x in range(horizontal_tile_count // 2):
for group_y in range(vertical_tile_count):
if i_case < len(sdfs):
canonical_sdf_image = mark_point_on_sdf_image(sdf_field_to_image(sdfs[i_case][0], scale=scale),
sdfs[i_case][2], scale=scale)
live_sdf_image = mark_point_on_sdf_image(sdf_field_to_image(sdfs[i_case][1], scale=scale),
sdfs[i_case][2], scale=scale)
i_case += 1
# fill in tsdfs
pixel_y_start = group_y * group_height + (group_y + 1) * padding_width + group_border_width
pixel_y_end = pixel_y_start + tile_height
pixel_x_start = x_offset + group_x * group_width + (group_x + 1) * \
padding_width + group_border_width
pixel_x_end = pixel_x_start + tile_width
canvas[pixel_y_start:pixel_y_end, pixel_x_start:pixel_x_end] = canonical_sdf_image
pixel_x_start = pixel_x_end + 1
pixel_x_end = pixel_x_start + tile_width
canvas[pixel_y_start:pixel_y_end, pixel_x_start:pixel_x_end] = live_sdf_image
# fill in group borders ----------------------------------------------------------------------------
# order:
# ______4______
# | | |
# 1 | |2 | 3
# |______|______|
# 5
# --------------------------------------------------------------------------------------------------
# 1
pixel_y_start = group_y * group_height + (group_y + 1) * padding_width
pixel_y_end = pixel_y_start + group_height
pixel_x = x_offset + group_x * group_width + (group_x + 1) * padding_width
canvas[pixel_y_start:pixel_y_end, pixel_x] = group_border_color
# 2
pixel_x = pixel_x + tile_width + group_border_width
canvas[pixel_y_start:pixel_y_end, pixel_x] = group_border_color
# 3
pixel_x = pixel_x + tile_width + group_border_width
canvas[pixel_y_start:pixel_y_end, pixel_x] = group_border_color
# 4
pixel_x_start = x_offset + group_x * group_width + (group_x + 1) * padding_width
pixel_x_end = pixel_x_start + tile_width * 2 + group_border_width * 3
pixel_y = group_y * group_height + (group_y + 1) * padding_width
canvas[pixel_y, pixel_x_start:pixel_x_end] = group_border_color
# 5
pixel_y = pixel_y + group_height - 1
canvas[pixel_y, pixel_x_start:pixel_x_end] = group_border_color
make_half(0, good_case_sdfs)
make_half(canvas_width // 2, bad_case_sdfs)
cv2.imwrite(out_path, canvas)
| 25,575
|
def download(url, verbose, user_agent='wswp', num_retries=2, decoding_format='utf-8', timeout=5):
"""
Function to download contents from a given url
Input:
url: str
string with the url to download from
user_agent: str
Default 'wswp'
num_retries: int
Number of times to retry downloading
if there is an error
verbose: bool
Print out url and errors
decoding: "utf-8"
Output:
returns: str
string with contents of given url
"""
# html_error = False
if verbose:
print('Downloading:', url)
headers = {'User-agent': user_agent}
request_obj = request.Request(url, headers=headers)
try:
with request.urlopen(request_obj, timeout=timeout) as response:
html = response.read()
except error.URLError as e:
if verbose:
print('Download error:', e.reason)
# html = None
# if num_retries > 0:
# if hasattr(e, 'code') and 500 <= e.code < 600:
# # retry 5XX HTTP errors
# return download(url, user_agent, num_retries - 1)[0]
# # elif hasattr(e, 'code') and e.code == 404:
# else:
# html_error = True
raise IOError(e.reason)
return html.decode(decoding_format)
| 25,576
|
def split_checkbox_responses(dict,keys,delimiter=";",prefix=" ",padding="\n"):
"""Break out comma-delimited responses into indented (or prefixed)
lines.
Arguments:
dict (dictionary) : dictionary on which to do this substitution
keys (list) : list of keys to be so replaced
delimiter (string,optional) : checkbox item delimiter on input
prefix (string,optional) : initial padding
padding (string,optional) : terminal padding
"""
for key in keys:
if (dict[key].strip()==""):
values = [] # prevent spurious "".split(";") => [""]
else:
values = dict[key].split(delimiter)
values = [
"{}{}{}".format(prefix,value,padding)
for value in values
]
dict[key] = "".join(values)
| 25,577
|
def _write_deform(model: Union[BDF, OP2Geom], name: str,
loads: List[AEROS], ncards: int,
op2_file, op2_ascii, endian: bytes, nastran_format: str='nx') -> int:
"""
(104, 1, 81)
NX 2019.2
Word Name Type Description
1 SID I Deformation set identification number
2 EID I Element number
3 D RS Deformation
"""
key = (104, 1, 81)
nfields = 3
structi = Struct(endian + b'iif')
nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii)
for load in loads:
data = [load.sid, load.eid, load.deformation]
#flutter = model.loads[flutter_id] # type: FLUTTER
#print(flutter.get_stats())
assert None not in data, data
op2_ascii.write(f' DEFORM data={data}\n')
op2_file.write(structi.pack(*data))
return nbytes
| 25,578
|
def polygonize(geometries, **kwargs):
"""Creates polygons formed from the linework of a set of Geometries.
Polygonizes an array of Geometries that contain linework which
represents the edges of a planar graph. Any type of Geometry may be
provided as input; only the constituent lines and rings will be used to
create the output polygons.
Lines or rings that when combined do not completely close a polygon
will result in an empty GeometryCollection. Duplicate segments are
ignored.
This function returns the polygons within a GeometryCollection.
Individual Polygons can be obtained using ``get_geometry`` to get
a single polygon or ``get_parts`` to get an array of polygons.
MultiPolygons can be constructed from the output using
``pygeos.multipolygons(pygeos.get_parts(pygeos.polygonize(geometries)))``.
Parameters
----------
geometries : array_like
An array of geometries.
axis : int
Axis along which the geometries are polygonized.
The default is to perform a reduction over the last dimension
of the input array. A 1D array results in a scalar geometry.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Returns
-------
GeometryCollection or array of GeometryCollections
See Also
--------
get_parts, get_geometry
polygonize_full
Examples
--------
>>> lines = [
... Geometry("LINESTRING (0 0, 1 1)"),
... Geometry("LINESTRING (0 0, 0 1)"),
... Geometry("LINESTRING (0 1, 1 1)"),
... ]
>>> polygonize(lines)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))>
"""
return lib.polygonize(geometries, **kwargs)
| 25,579
|
def slerp(input_latent1, input_latent2, interpolation_frames=100):
"""Spherical linear interpolation ("slerp", amazingly enough).
Parameters
----------
input_latent1, input_latent2 : NumPy arrays
Two arrays which will be interpolated between.
interpolation_frames : int, optional
Number of frame returned during interpolation.
Returns
-------
list
List of vectors of size interpolation_frames
"""
output_latents = []
for idx in range(interpolation_frames):
val = float(idx) / interpolation_frames
if np.allclose(input_latent1, input_latent2):
output_latents += [input_latent2]
continue
omega = np.arccos(np.dot(input_latent1 / np.linalg.norm(input_latent1), input_latent2 / np.linalg.norm(input_latent2)))
so = np.sin(omega)
output_latents += [np.sin((1.0 - val) * omega) / so * input_latent1 + np.sin(val * omega) / so * input_latent2]
return output_latents
| 25,580
|
def parent_version_config():
"""Return a configuration for an experiment."""
config = dict(
_id="parent_config",
name="old_experiment",
version=1,
algorithms="random",
metadata={
"user": "corneauf",
"datetime": datetime.datetime.utcnow(),
"user_args": ["--x~normal(0,1)"],
},
)
backward.populate_space(config)
return config
| 25,581
|
def sanitize_k8s_name(name):
"""From _make_kubernetes_name
sanitize_k8s_name cleans and converts the names in the workflow.
"""
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-')
| 25,582
|
def handle_connect(event):
"""Connect events occur when a device is responding to a MDM command. They
contain the raw responses from the device.
https://developer.apple.com/enterprise/documentation/MDM-Protocol-Reference.pdf
"""
xml = base64.b64decode(event['acknowledge_event']['raw_payload'])
if 'InstalledApplicationList' in xml:
app.logger.info(xml)
| 25,583
|
def test_dt_tz_column_naive_input_pg(pg_dt):
"""Included here as a way of showing that this mimics postgres' behavior"""
pg_dt.execute(DTTable.__table__.insert().values(id=1, dt_tz=datetime(2018, 1, 1, 5, 0, 0)))
result = pg_dt.execute(sqlalchemy.select([dt_table.c.dt_tz])).scalar()
assert result == datetime(2018, 1, 1, 5, 0, 0, tzinfo=utc)
| 25,584
|
def main():
""" Process command line arguments and run x86 """
run = X86Run()
result = run.Run()
return result
| 25,585
|
def gen_key(uid, section='s'):
"""
Generate store key for own user
"""
return f'cs:{section}:{uid}'.encode()
| 25,586
|
def convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,
box_width: float, voxel_width: float) -> np.ndarray:
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices: np.ndarray
A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates
of specified atom.
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
logger.warning('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, coordinates[atom_index], indices))
return indices
| 25,587
|
def get_toolset_url():
"""URL of a platform specific Go toolset archive."""
# TODO(vadimsh): Support toolset for cross-compilation.
arch = {
'amd64': 'x86-64',
'x86_64': 'x86-64',
'i386': 'x86-32',
'x86': 'x86-32',
}.get(platform.machine().lower())
variant = TOOLSET_VARIANTS.get((sys.platform, arch))
if not variant:
# TODO(vadimsh): Compile go lang from source.
raise Failure('Unrecognized platform')
return '%s/%s.%s' % (DOWNLOAD_URL_PREFIX, TOOLSET_VERSION, variant)
| 25,588
|
def plot_det_curve(y_true_arr, y_pred_proba_arr, labels_arr, pos_label=None, plot_thres_for_idx=None,
log_wandb=False):
"""Function for plotting DET curve
Args:
y_true_arr (list/np.array): list of all GT arrays
y_pred_proba_arr (list/np.array): list of all predicted probabilities
labels_arr (list/np.array): list of labels
pos_label (str, optional): What is the label of the positive class. Defaults to 'Yes'.
plot_thres_for_idx (int, optional): If true, best threshold (F1) is plotted
for the DET curve corresponding to this index. Defaults to None.
log_wandb (bool, optional): If true, figure is logged to W&B. Defaults to False.
Returns:
plt.Figure, plt.Axes: The tuple of figure and axes
"""
fig, ax = plt.subplots(figsize=(12, 8))
for i, (y_true, y_pred_proba) in enumerate(zip(y_true_arr, y_pred_proba_arr)):
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
auc_score = auc(fpr, fnr)
ax.plot(norm.ppf(fpr), norm.ppf(fnr),
label=f'{labels_arr[i]} (AUC - {round(auc_score, 3)})')
if plot_thres_for_idx is not None:
y_true = y_true_arr[plot_thres_for_idx]
y_pred_proba = y_pred_proba_arr[plot_thres_for_idx]
_, idx = get_best_threshold_gmean(
y_true, y_pred_proba, pos_label=pos_label)
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
ax.plot([norm.ppf(fpr[idx])], [norm.ppf(fnr[idx])], '-o',
c=ax.lines[plot_thres_for_idx].get_color(),
label=f'Best {labels_arr[plot_thres_for_idx]} Threshold (GMean)')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('False Negative Rate')
ax.set_title('DET Curve')
ax.legend()
ax.grid()
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_locations = norm.ppf(ticks)
tick_labels = [
'{:.0%}'.format(s) if (100*s).is_integer() else '{:.1%}'.format(s)
for s in ticks
]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels)
ax.set_yticks(tick_locations)
ax.set_yticklabels(tick_labels)
if log_wandb:
wandb.log({"det_curve": [wandb.Image(fig)]})
plt.close(fig)
return fig, ax
| 25,589
|
def harmonic_separation(audio, margin=3.0):
"""
Wraps librosa's `harmonic` function, and returns a new Audio object.
Note that this folds to mono.
Parameters
---------
audio : Audio
The Audio object to act on.
margin : float
The larger the margin, the larger the separation.
The default is `3.0`.
"""
harmonic = librosa.effects.harmonic(
librosa.to_mono(audio.raw_samples), margin=margin
)
harmonic_audio = Audio(raw_samples=harmonic, sample_rate=audio.sample_rate)
return harmonic_audio
| 25,590
|
def _apply_op_flag_vals_for_opdef(
opdef,
user_flag_vals,
force_flags,
op_cmd,
args,
resource_flagdefs,
op_flag_vals,
):
"""Applies opdef and user-provided flags to `op_flag_vals`.
Also applies resolved resource flag defs per flag vals
`resource_flagdefs`.
Attempts to resolve operation runs and use resolve run short
IDs as applicable flag values.
Opdef is used to provide missing default values, coerce flag vals,
and validate vals. Opdef-provided flag vals are added to op flag
vals only if they are not already in op flags, or if they are in
user-provided flags. This maintains existing values (e.g. from a
restart) unless a user explicitly provides a flag value.
op_cmd is modified to include CmdFlag with arg-skip=yes for
resolved run IDs provided a flag isn't defined for the resolved
resource name. These flag values are used by Guild to resolve
resources and should not be included in flag args unless the a
flag def is explicitly provided.
"""
flag_vals, resolved_resource_flagdefs = _flag_vals_for_opdef(
opdef, user_flag_vals, force_flags
)
resource_flagdefs.extend(resolved_resource_flagdefs)
_apply_default_dep_runs(opdef, op_cmd, args, flag_vals)
for name, val in flag_vals.items():
if name in user_flag_vals or name not in op_flag_vals:
op_flag_vals[name] = val
| 25,591
|
def merge(
left,
right,
how: str = "inner",
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes=("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate=None,
): # noqa: PR01, RT01, D200
"""
Merge DataFrame or named Series objects with a database-style join.
"""
if isinstance(left, Series):
if left.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
left = left.to_frame()
if not isinstance(left, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(left)} was passed"
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
| 25,592
|
def generate_modal(title, callback_id, blocks):
"""
Generate a modal view object using Slack's BlockKit
:param title: Title to display at the top of the modal view
:param callback_id: Identifier used to help determine the type of modal view in future responses
:param blocks: Blocks to add to the modal view
:return: View object (Dictionary)
"""
modal = {
"type": "modal",
"callback_id": callback_id,
"title": {
"type": "plain_text",
"text": title,
"emoji": False
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": False
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": False
},
"blocks": blocks
}
return modal
| 25,593
|
def dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):
"""
#---------------------
# This function applies Sobel x and y,
# then computes the direction of the gradient,
# and then applies a threshold.
#
"""
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
# and calculate the direction of the gradient
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 255
# Return the binary image
return binary_output.astype(np.uint8)
| 25,594
|
def test_notimplemented_method(name, page_class):
"""All methods raise NotImplementedError"""
t = page_class()
func = getattr(t, name)
with pytest.raises(NotImplementedError):
func()
| 25,595
|
def seconds_to_time( time ):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
if not time:
return "0s"
from datetime import datetime, timedelta
if isinstance( time, timedelta ) or isinstance( time, datetime ):
if time.days < 0:
diff = timedelta( )
else:
diff = time
else:
diff = timedelta( seconds = int(time if time >= 0 else 0) )
second_diff = diff.seconds
if second_diff < 0:
second_diff = 0
if second_diff > 60:
return "%sm%ss" % ( str( second_diff / 60 ), ( second_diff % 60 ) )
else:
return "%ss" % second_diff
| 25,596
|
def bq_use_legacy_sql():
"""
Returns BIGQUERY_LEGACY_SQL if env is set
"""
return os.environ.get('BIGQUERY_LEGACY_SQL', 'TRUE')
| 25,597
|
def load_txt_into_set(path, skip_first_line=True):
"""Load a txt file (one value per line) into a set."""
result = set()
file = open_file_dir_safe(path)
with file:
if skip_first_line:
file.readline()
for line in file:
line = line.strip()
result.add(line)
return result
| 25,598
|
def failed(obj):
"""Returns True if ``obj`` is an instance of ``Fail``."""
return isinstance(obj, Fail)
| 25,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.