content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_publishers():
""" Fetch and return all registered publishers."""
url = current_app.config['DATABASE']
with psycopg2.connect(url) as conn:
with conn.cursor() as cur:
cur.execute("SELECT * FROM userrole WHERE is_publisher = %s ORDER BY reg_date DESC;", ('true',))
res = cur.fetchall()
return res
|
ed6f95ea576e97f6c38074803d447de4db9b8d40
| 3,652,300
|
import curses
def acs_map():
"""call after curses.initscr"""
# can this mapping be obtained from curses?
return {
ord(b'l'): curses.ACS_ULCORNER,
ord(b'm'): curses.ACS_LLCORNER,
ord(b'k'): curses.ACS_URCORNER,
ord(b'j'): curses.ACS_LRCORNER,
ord(b't'): curses.ACS_LTEE,
ord(b'u'): curses.ACS_RTEE,
ord(b'v'): curses.ACS_BTEE,
ord(b'w'): curses.ACS_TTEE,
ord(b'q'): curses.ACS_HLINE,
ord(b'x'): curses.ACS_VLINE,
ord(b'n'): curses.ACS_PLUS,
ord(b'o'): curses.ACS_S1,
ord(b's'): curses.ACS_S9,
ord(b'`'): curses.ACS_DIAMOND,
ord(b'a'): curses.ACS_CKBOARD,
ord(b'f'): curses.ACS_DEGREE,
ord(b'g'): curses.ACS_PLMINUS,
ord(b'~'): curses.ACS_BULLET,
ord(b','): curses.ACS_LARROW,
ord(b'+'): curses.ACS_RARROW,
ord(b'.'): curses.ACS_DARROW,
ord(b'-'): curses.ACS_UARROW,
ord(b'h'): curses.ACS_BOARD,
ord(b'i'): curses.ACS_LANTERN,
ord(b'p'): curses.ACS_S3,
ord(b'r'): curses.ACS_S7,
ord(b'y'): curses.ACS_LEQUAL,
ord(b'z'): curses.ACS_GEQUAL,
ord(b'{'): curses.ACS_PI,
ord(b'|'): curses.ACS_NEQUAL,
ord(b'}'): curses.ACS_STERLING,
}
|
2121ab5dd650019ae7462d2ab34cf436966cffe9
| 3,652,301
|
def get_polymorphic_ancestors_models(ChildModel):
"""
ENG: Inheritance chain that inherited from the PolymorphicModel include self model.
RUS: Наследуется от PolymorphicModel, включая self.
"""
ancestors = []
for Model in ChildModel.mro():
if isinstance(Model, PolymorphicModelBase):
if not Model._meta.abstract:
ancestors.append(Model)
return reversed(ancestors)
|
aa7129dd81009e72d9fe86787a2d4516dcca3b29
| 3,652,302
|
def plot_load_vs_fractional_freq_shift(all_data,ax=None):
"""
Plot fractional frequency shift as a function of load temperature for all resonators
"""
if ax is None:
fig,ax = plt.subplots(figsize=(8,8))
for name, group in all_data.groupby('resonator_index'):
ax.plot(group.sweep_primary_load_temperature,group.fractional_delta_f_0,'.')
ax.grid()
ax.set_ylim(-2e-4,1e-5)
ax.set_ylabel('Fractional Frequency Shift')
ax.set_xlabel('Load Temperature (K)')
return fig
|
24f5bdbee5a904fb72d27986634244f380202a34
| 3,652,303
|
def encode_dist_anchor_free_np(gt_ctr, gt_offset, anchor_ctr, anchor_offset=None):
"""
3DSSD anchor-free encoder
:param:
gt_ctr: [bs, points_num, 3]
gt_offset: [bs, points_num, 3]
anchor_ctr: [bs, points_num, 3]
anchor_offset: [bs, points_num, 3]
:return:
encoded_ctr: [bs, points_num, 3]
encoded_offset: [bs, points_num, 3]
"""
target_ctr_half = gt_offset / 2.
# translate to center
padding_half_height = target_ctr_half[:, :, 1]
padding_zeros = np.zeros_like(padding_half_height)
padding_translate = np.stack([padding_zeros, padding_half_height, padding_zeros], axis=-1) # [bs, points_num, 3]
encoded_ctr = gt_ctr - padding_translate # to object center
encoded_ctr = encoded_ctr - anchor_ctr
return encoded_ctr, target_ctr_half
|
96f3fa686a4b89dac35a4a4725831f550a62107b
| 3,652,304
|
def BCrand(h, hu, t, side, mean_h, amplitude, period, phase):
""" Conditions aux limites du modele direct, avec plus de paramètres"""
if side == 'L':
h[0] = mean_h + amplitude * np.sin((t * (2 * np.pi) / period) + phase)
hu[0] = 0.0
elif side == 'R':
h[-1] = h[-2]
hu[-1] = hu[-2] * 0.0
return [h] + [hu]
|
7113fd81a375bbabc2ac8e1bf4e5c6706d3198c6
| 3,652,305
|
from bs4 import BeautifulSoup
from matplotlib import pyplot as mpl_pyplot
def pyplot(
figure=None,
scale: float = 0.8,
clear: bool = True,
aspect_ratio: typing.Union[list, tuple] = None
) -> str:
"""
:param figure:
:param scale:
:param clear:
:param aspect_ratio:
:return:
"""
environ.abort_thread()
try:
except Exception:
mpl_pyplot = None
if not figure:
figure = mpl_pyplot.gcf()
if aspect_ratio:
figure.set_size_inches(
aspect_ratio[0],
aspect_ratio[1]
)
else:
figure.set_size_inches(12, 8)
buffer = io.StringIO()
figure.savefig(
buffer,
format='svg',
dpi=300
)
buffer.seek(0)
svg_data = buffer.read()
if clear:
figure.clear()
soup = BeautifulSoup(svg_data, 'html.parser')
svg_tag = soup.find_all('svg')[0]
svg_tag['width'] = '100%'
svg_tag['height'] = '100%'
classes = svg_tag.get('class', '').strip().split(' ')
classes.append('cd-pylab-svg')
svg_tag['class'] = '\n'.join(classes)
styles = [
s for s in svg_tag.get('style', '').split(';')
if len(s.strip()) > 1
]
styles.append('max-height:{}vh;'.format(int(100.0 * scale)))
svg_tag['style'] = ';'.join(styles)
return '<div class="cd-pylab-plot">{}</div>'.format(soup.prettify())
|
04375ad36cefc83a332c3d183b3a97b5f149518a
| 3,652,306
|
import itertools
def simplify(graph):
""" helper that simplifies the xy to mere node ids."""
d = {}
cnt = itertools.count(1)
c2 = []
for s, e, dst in graph.edges():
if s not in d:
d[s] = next(cnt)
if e not in d:
d[e] = next(cnt)
c2.append((d[s], d[e], dst))
g = Graph(from_list=c2)
return g
|
3ec7950922c5e49bad68f19666eb0381247d8f8f
| 3,652,307
|
from typing import Collection
def _get_class_for(type):
"""Returns a :type:`class` corresponding to :param:`type`.
Used for getting a class from object type in JSON response. Usually, to
instantiate the Python object from response, this function is called in
the form of ``_get_class_for(data['object']).from_data(data)``.
:type type: str
:rtype: class
"""
return {
'account': Account,
'balance': Balance,
'bank_account': BankAccount,
'capability': Capability,
'card': Card,
'chain': Chain,
'charge': Charge,
'customer': Customer,
'dispute': Dispute,
'document': Document,
'event': Event,
'forex': Forex,
'link': Link,
'list': Collection,
'occurrence': Occurrence,
'receipt': Receipt,
'recipient': Recipient,
'refund': Refund,
'schedule': Schedule,
'search': Search,
'source': Source,
'token': Token,
'transfer': Transfer,
'transaction': Transaction,
}.get(type)
|
d122fe4dabd8a0486b0ed441b064f5c3d08c6b9b
| 3,652,308
|
import requests
def _query_jupyterhub_api(method, api_path, post_data=None):
"""Query Jupyterhub api
Detects Jupyterhub environment variables and makes a call to the Hub API
Parameters
----------
method : string
HTTP method, e.g. GET or POST
api_path : string
relative path, for example /users/
post_data : dict
JSON arguments for the API call
Returns
-------
response : dict
JSON response converted to dictionary
"""
hub_api_url = get_jupyterhub_api_url()
user = get_jupyterhub_user()
auth_header = get_jupyterhub_authorization()
api_path = api_path.format(authenticated_user=user)
req = requests.request(
url=hub_api_url + api_path,
method=method,
headers=auth_header,
json=post_data,
)
if not req.ok:
raise JupyterhubApiError("JupyterhubAPI returned a status code of: " + str(req.status_code) + " for api_path: " + api_path)
return req.json()
|
22ba33ae53908aabae44bf591f9f3f1efcd97219
| 3,652,309
|
def PoolingOutputShape(input_shape, pool_size=(2, 2),
strides=None, padding='VALID'):
"""Helper: compute the output shape for the pooling layer."""
dims = (1,) + pool_size + (1,) # NHWC
spatial_strides = strides or (1,) * len(pool_size)
strides = (1,) + spatial_strides + (1,)
pads = convolution.PadtypeToPads(input_shape, dims, strides, padding)
operand_padded = onp.add(input_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(onp.subtract(operand_padded, dims), strides) + 1
return tuple(t)
|
185b97b308b8c15aa7fab2701ad40e1ee8241248
| 3,652,310
|
from pathlib import Path
from typing import List
import json
import this
import jsonschema
def parse_config_file(config_file_path: Path) -> List[TabEntry]:
""" Parse the json config file, validate and convert to object structure """
app_config = None
Logger().info(f"Loading file '{config_file_path}'...")
if not config_file_path.is_file():
Logger().error(f"Config file '{config_file_path}' does not exist.")
return []
with open(str(config_file_path)) as fp:
try:
app_config = json.load(fp)
with open(this.base_path / "assets" / "config_schema.json") as schema_file:
json_schema = json.load(schema_file)
jsonschema.validate(instance=app_config, schema=json_schema)
except BaseException as error:
Logger().error(f"Config file:\n{str(error)}")
return []
# build the object model and update
tabs = []
for tab in app_config.get("tabs"):
tab_entry = TabEntry(tab.get("name"))
for app in tab.get("apps"):
# TODO: not very robust, but enough for small changes
update_app_info(app)
app_entry = AppEntry(app, config_file_path)
tab_entry.add_app_entry(app_entry)
tabs.append(tab_entry)
# auto Update version to next version:
app_config["version"] = json_schema.get("properties").get("version").get("enum")[-1]
# write it back with updates
with open(str(config_file_path), "w") as config_file:
json.dump(app_config, config_file, indent=4)
return tabs
|
8ce41d517eed17539b19f6caddb6dc8b2ce50cee
| 3,652,311
|
def create_app():
""" 工厂函数 """
app = Flask(__name__)
register_blueprint(app)
# register_plugin(app)
register_filter(app)
register_logger()
return app
|
06ad3e8c07fd823b0f8ec5f4a18d49a0a430fa93
| 3,652,312
|
def passwordbox(**kwargs):
"""
This wrapper is for making a dialog for changing your password.
It will return the old password, the new password, and a confirmation.
The remaining keywords are passed on to the autobox class.
"""
additional_fields = kwargs.get("additional_fields") and kwargs.pop("additional_fields") or []
title = kwargs.get("title_string", "Change your password")
header = kwargs.get("header_string") and kwargs.pop("header_string") or "Change your password"
default_fields = [
{"type" : "label", "label" : "First type your old password"},
{"name" : "old_password", "type" : "hidden_text", "label" : "Old Password: "},
{"type" : "label", "label": "Now enter your new password twice"},
{"name" : "new_password", "type" : "hidden_text", "label" : "New Password: "},
{"name" : "confirm_password", "type" : "hidden_text", "label" : "Confirm Password: "}
]
fields = default_fields + additional_fields
return autobox(fields = fields, title_string = title, header_string = header, **kwargs)
|
ff38d854a8d7303bbf58654e220c0b24b3ede105
| 3,652,313
|
def unravel_hpx_index(idx, npix):
"""Convert flattened global map index to an index tuple.
Parameters
----------
idx : `~numpy.ndarray`
Flat index.
npix : `~numpy.ndarray`
Number of pixels in each band.
Returns
-------
idx : tuple of `~numpy.ndarray`
Index array for each dimension of the map.
"""
if npix.size == 1:
return tuple([idx])
dpix = np.zeros(npix.size, dtype="i")
dpix[1:] = np.cumsum(npix.flat[:-1])
bidx = np.searchsorted(np.cumsum(npix.flat), idx + 1)
pix = idx - dpix[bidx]
return tuple([pix] + list(np.unravel_index(bidx, npix.shape)))
|
c7fa097ffeae3219d59526ed76d62383277d317b
| 3,652,314
|
import os
import sqlite3
import csv
def map2sqldb(map_path, column_names, sep='\t'):
"""Determine the mean and 2std of the length distribution of a group
"""
table_name = os.path.basename(map_path).rsplit('.', 1)[0]
sqldb_name = table_name + '.sqlite3db'
sqldb_path = os.path.join(os.path.dirname(map_path), sqldb_name)
conn = sqlite3.connect(sqldb_path) # @UndefinedVariable
c = conn.cursor()
# If table already exist, return the connector and the table_name
SQL = '''
SELECT count(*) FROM sqlite_master WHERE name == \"{}\"
'''.format(table_name)
c.execute(SQL)
exists_flag = False
if c.fetchone()[0] == 1:
c.fetchall() #get rid of the remainder
exists_flag=True
if exists_flag:
return c, table_name
# Create table
SQL = '''
create table if not exists {0} ({1});
'''.format(table_name, '\"' + '\" text,\"'.join([str(n).lower() for n in column_names]) + '\" text')
c.execute(SQL)
c.close()
# Fill table
SQL = '''
insert into {0} values ({1})
'''.format(table_name, ' ,'.join(['?']*len(column_names)))
with open(map_path, 'r') as map_file:
csv.field_size_limit(2147483647)
csv_reader = csv.reader(map_file, delimiter=sep, quoting=csv.QUOTE_NONE)
with sqlite3.connect(sqldb_path) as conn: # @UndefinedVariable
c = conn.cursor()
c.executemany(SQL, csv_reader)
return c, table_name
|
656db3aa8797231ba09f6ddd08d00c0308be9285
| 3,652,315
|
def parse_revdep(value):
"""Value should be an atom, packages with deps intersecting that match."""
try:
targetatom = atom.atom(value)
except atom.MalformedAtom as e:
raise argparser.error(e)
val_restrict = values.FlatteningRestriction(
atom.atom,
values.AnyMatch(values.FunctionRestriction(targetatom.intersects)))
return packages.OrRestriction(*list(
packages.PackageRestriction(dep, val_restrict)
for dep in ('bdepend', 'depend', 'rdepend', 'pdepend')))
|
eb2118af7644fac15fa4ebedba6684d20ab18d47
| 3,652,316
|
def is_context_word(model, word_a, word_b):
"""Calculates probability that both words appear in context with each
other by executing forward pass of model.
Args:
model (Mode): keras model
word_a (int): index of first word
word_b (int): index of second word
"""
# define inputs
input_a = np.zeros((1,))
input_b = np.zeros((1,))
input_a[0,] = word_a
input_b[0,] = word_b
# compute forward pass of model
prediction = model.predict_on_batch([input_a, input_b])
# retrieve value from tf tensor
prediction = prediction.numpy()[0][0]
return prediction
|
a7b0642cfc97b21e53f8b42eaddbed69689a0f1d
| 3,652,317
|
import pathlib
def map_and_save_gene_ids(hit_genes_location, all_detectable_genes_location=''):
"""
Maps gene names/identifiers into internal database identifiers (neo4j ids) and saves them
:param hit_genes_location: genes in the set we would like to analyse
:param all_detectable_genes_location: genes in the set that can be detected (background)
:return: list of internal db ids for hits, list of internal db ids for background
"""
standardized_hits = [] # [primary_set]
standardized_secondary_hits = [] # [secondary_set=None]
if type(hit_genes_location) == str or isinstance(hit_genes_location, pathlib.PurePath):
# log.info('codepath 1')
standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location)]
standardized_secondary_hits = [None]
if type(hit_genes_location) == tuple:
# log.info('codepath 2')
standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location[0])]
standardized_secondary_hits = [cast_external_refs_to_internal_ids(hit_genes_location[1])]
if type(hit_genes_location) == list:
# log.info('codepath 3')
for sub_hit_genes_location in hit_genes_location:
# log.info('codepath 3.0')
if type(sub_hit_genes_location) == str or isinstance(sub_hit_genes_location, pathlib.PurePath):
# log.info('codepath 3.1')
standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location)]
standardized_secondary_hits += [None]
if type(sub_hit_genes_location) == tuple:
# log.info('codepath 3.2')
standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[0])]
standardized_secondary_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[1])]
log.debug('standardized primary hits:\n\t%s' % standardized_hits)
log.debug('standardized secondary_hits:\n\t%s' % standardized_secondary_hits)
dump_object(Dumps.analysis_set_bulbs_ids, (standardized_hits, standardized_secondary_hits))
if all_detectable_genes_location:
background_set = cast_external_refs_to_internal_ids(all_detectable_genes_location)
# print(background_set)
primary_set = [y for x in standardized_hits for y in x] # flattens the mapped ids list
# print(primary_set)
formatted_secondary_hits = [_l
if _l is not None
else []
for _l in standardized_secondary_hits]
sec_set = [y for x in formatted_secondary_hits for y in x]
re_primary_set = set()
for _id in primary_set:
if type(_id) == str or type(_id) == int:
re_primary_set.add(_id)
else:
re_primary_set.add(_id[0])
primary_set = re_primary_set
re_secondary_set = set()
for _id in sec_set:
if type(_id) == str or type(_id) == int:
re_secondary_set.add(_id)
else:
re_secondary_set.add(_id[0])
sec_set = re_primary_set
if type(background_set[0]) == str or type(background_set[0]) == int: # unweighted
background_set = list(set(background_set).union(primary_set).union(sec_set))
else:
bck_set = {_id[0] for _id in background_set}
bck_set = list(bck_set)
if not primary_set.issubset(bck_set):
log.info('Nodes ids %s are missing in background set and are added with weight 0' %
(primary_set - bck_set))
background_set += [(_id, 0) for _id in (primary_set - bck_set)]
if not sec_set.issubset(bck_set):
log.info('Secondary set nodes ids %s are missing in background set and are added '
'with weight 0' % (sec_set - bck_set))
background_set += [(_id, 0) for _id in (sec_set - bck_set)]
else:
background_set = []
dump_object(Dumps.background_set_bulbs_ids, background_set)
return standardized_hits, standardized_secondary_hits, background_set
|
db29322d61b12cf7a3d266b831b553305327bed3
| 3,652,318
|
def next_method():
"""next, for: Get one item of an iterators."""
class _Iterator:
def __init__(self):
self._stop = False
def __next__(self):
if self._stop:
raise StopIteration()
self._stop = True
return "drums"
return next(_Iterator())
|
85cdd08a65ae66c2869ba2067db81ff37f40d0b8
| 3,652,319
|
import json
def get_ingress_deployment(
serve_dag_root_node: DAGNode, pipeline_input_node: PipelineInputNode
) -> Deployment:
"""Return an Ingress deployment to handle user HTTP inputs.
Args:
serve_dag_root_node (DAGNode): Transformed as serve DAG's root. User
inputs are translated to serve_dag_root_node.execute().
pipeline_input_node (DAGNode): Singleton PipelineInputNode instance that
contains input preprocessor info.
Returns:
ingress (Deployment): Generated pipeline ingress deployment to serve
user HTTP requests.
"""
serve_dag_root_json = json.dumps(serve_dag_root_node, cls=DAGNodeEncoder)
preprocessor_import_path = pipeline_input_node.get_preprocessor_import_path()
serve_dag_root_deployment = serve.deployment(Ingress).options(
name=DEFAULT_INGRESS_DEPLOYMENT_NAME,
init_args=(
serve_dag_root_json,
preprocessor_import_path,
),
)
return serve_dag_root_deployment
|
33f7ca9218e59af168fccdd8e0d0392964febaf2
| 3,652,320
|
def get_project_settings(project):
"""Gets project's settings.
Return value example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/settings', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project settings " + response.text
)
res = response.json()
for val in res:
if val['attribute'] == 'ImageQuality':
if val['value'] == 60:
val['value'] = 'compressed'
elif val['value'] == 100:
val['value'] = 'original'
else:
raise SABaseException(0, "NA ImageQuality value")
return res
|
298d00eedff7c70ae8745e47f2eff48642988c7b
| 3,652,321
|
def guard(M, test):
"""Monadic guard.
What it does::
return M.pure(Unit) if test else M.empty()
https://en.wikibooks.org/wiki/Haskell/Alternative_and_MonadPlus#guard
"""
return M.pure(Unit) if test else M.empty()
|
9184310fcebec10ca1cc7cdb25e36831b327cbb0
| 3,652,322
|
def get_git_hash() -> str:
"""Get the git hash."""
rv = _run("git", "rev-parse", "HEAD")
if rv is None:
return "UNHASHED"
return rv
|
978eca015aeb534e500dbbc5e9ab7aad5b487865
| 3,652,323
|
def primary_style():
""" a blue green style """
return color_mapping(
'bg:#449adf #ffffff',
'bg:#002685 #ffffff',
'#cd1e10',
'#007e3a',
'#fe79d1',
'#4cde77',
'#763931',
'#64d13e',
'#7e77d2',
'bg:#000000 #ffffff',
)
|
aecbe4cccb18763cf961ba08d6d9c04188080989
| 3,652,324
|
def decrypt_files(rsa_key):
"""
Decrypt all encrypted files on host machine
`Required`
:param str rsa_key: RSA private key in PEM format
"""
try:
if not isinstance(rsa_key, Crypto.PublicKey.RSA.RsaKey):
rsa_key = Crypto.PublicKey.RSA.importKey(rsa_key)
if not rsa_key.has_private():
return "Error: RSA key cannot decrypt"
globals()['threads']['iter_files'] = _iter_files(rsa_key)
globals()['threads']['decrypt_files'] = _threader()
return "Decrypting files"
except Exception as e:
util.log("{} error: {}".format(decrypt_files.__name__, str(e)))
|
ccc5d253b5ab7a7851195751a798ba4e18fef983
| 3,652,325
|
def _bivariate_uc_uc(
lhs,rhs,
z,
dz_dl, # (dz_re_dl_re, dz_re_dl_im, dz_im_dl_re, dz_im_dl_im)
dz_dr # (dz_re_dr_re, dz_re_dr_im, dz_im_dr_re, dz_im_dr_im)
):
"""
Create an uncertain complex number as a bivariate function
This is a utility method for implementing mathematical
functions of uncertain complex numbers.
The parameters 'lhs' and 'rhs' are the UncertainComplex
arguments to the function, 'z' is the complex value of the
function and 'dz_dl' and 'dz_dr' are the Jacobian matrices
of the function value z with respect to the real and imaginary
components of the function's left and right arguments.
Parameters
----------
lhs, rhs : :class:`UncertainComplex`
z : complex
dz_dl, dz_dr : 4-element sequence of float
Returns
-------
:class:`UncertainComplex`
"""
lhs_r = lhs.real
lhs_i = lhs.imag
rhs_r = rhs.real
rhs_i = rhs.imag
u_lhs_real, u_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._u_components,(dz_dl[0],dz_dl[2]),
lhs_i._u_components,(dz_dl[1],dz_dl[3])
)
u_rhs_real, u_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._u_components,(dz_dr[0],dz_dr[2]),
rhs_i._u_components,(dz_dr[1],dz_dr[3])
)
d_lhs_real, d_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._d_components,(dz_dl[0],dz_dl[2]),
lhs_i._d_components,(dz_dl[1],dz_dl[3])
)
d_rhs_real, d_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._d_components,(dz_dr[0],dz_dr[2]),
rhs_i._d_components,(dz_dr[1],dz_dr[3])
)
i_lhs_real, i_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._i_components,(dz_dl[0],dz_dl[2]),
lhs_i._i_components,(dz_dl[1],dz_dl[3])
)
i_rhs_real, i_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._i_components,(dz_dr[0],dz_dr[2]),
rhs_i._i_components,(dz_dr[1],dz_dr[3])
)
return UncertainComplex(
UncertainReal(
z.real,
vector.merge_vectors(
u_lhs_real, u_rhs_real
),
vector.merge_vectors(
d_lhs_real, d_rhs_real
),
vector.merge_vectors(
i_lhs_real, i_rhs_real
)
),
UncertainReal(
z.imag,
vector.merge_vectors(
u_lhs_imag,u_rhs_imag
),
vector.merge_vectors(
d_lhs_imag,d_rhs_imag
),
vector.merge_vectors(
i_lhs_imag, i_rhs_imag
)
)
)
|
f3b2c778cd1152910c951e893861f0c900978a4e
| 3,652,326
|
def smoothing_filter(time_in, val_in, time_out=None, relabel=None, params=None):
"""
@brief Smoothing filter with relabeling and resampling features.
@details It supports evenly sampled multidimensional input signal.
Relabeling can be used to infer the value of samples at
time steps before and after the explicitly provided samples.
As a reminder, relabeling is a generalization of periodicity.
@param[in] time_in Time steps of the input signal (1D numpy array)
@param[in] val_in Sampled values of the input signal
(2D numpy array: row = sample, column = time)
@param[in] time_out Time steps of the output signal (1D numpy array)
@param[in] relabel Relabeling matrix (identity for periodic signals)
Optional: Disable if omitted
@param[in] params Parameters of the filter. Dictionary with keys:
'mixing_ratio_1': Relative time at the begining of the signal
during the output signal corresponds to a
linear mixing over time of the filtered and
original signal. (only used if relabel is omitted)
'mixing_ratio_2': Relative time at the end of the signal
during the output signal corresponds to a
linear mixing over time of the filtered and
original signal. (only used if relabel is omitted)
'smoothness'[0]: Smoothing factor to filter the begining of the signal
(only used if relabel is omitted)
'smoothness'[1]: Smoothing factor to filter the end of the signal
(only used if relabel is omitted)
'smoothness'[2]: Smoothing factor to filter the middle part of the signal
@return Filtered signal (2D numpy array: row = sample, column = time)
"""
if time_out is None:
time_out = time_in
if params is None:
params = dict()
params['mixing_ratio_1'] = 0.12
params['mixing_ratio_2'] = 0.04
params['smoothness'] = [0.0,0.0,0.0]
params['smoothness'][0] = 5e-3
params['smoothness'][1] = 5e-3
params['smoothness'][2] = 3e-3
if relabel is None:
mix_fit = [None,None,None]
mix_fit[0] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_1']*((t-time_in[0])/(time_in[-1]-time_in[0]))*np.pi-np.pi/2))
mix_fit[1] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_2']*((t-(1-params['mixing_ratio_2'])*time_in[-1])/(time_in[-1]-time_in[0]))*np.pi+np.pi/2))
mix_fit[2] = lambda t: 1
val_fit = []
for jj in range(val_in.shape[0]):
val_fit_jj = []
for kk in range(len(params['smoothness'])):
val_fit_jj.append(UnivariateSpline(time_in, val_in[jj], s=params['smoothness'][kk]))
val_fit.append(val_fit_jj)
time_out_mixing = [None, None, None]
time_out_mixing_ind = [None, None, None]
time_out_mixing_ind[0] = time_out < time_out[-1]*params['mixing_ratio_1']
time_out_mixing[0] = time_out[time_out_mixing_ind[0]]
time_out_mixing_ind[1] = time_out > time_out[-1]*(1-params['mixing_ratio_2'])
time_out_mixing[1] = time_out[time_out_mixing_ind[1]]
time_out_mixing_ind[2] = np.logical_and(np.logical_not(time_out_mixing_ind[0]), np.logical_not(time_out_mixing_ind[1]))
time_out_mixing[2] = time_out[time_out_mixing_ind[2]]
val_out = np.zeros((val_in.shape[0],len(time_out)))
for jj in range(val_in.shape[0]):
for kk in range(len(time_out_mixing)):
val_out[jj,time_out_mixing_ind[kk]] = \
(1 - mix_fit[kk](time_out_mixing[kk])) * val_fit[jj][kk](time_out_mixing[kk]) + \
mix_fit[kk](time_out_mixing[kk]) * val_fit[jj][-1](time_out_mixing[kk])
else:
time_tmp = np.concatenate([time_in[:-1]-time_in[-1],time_in,time_in[1:]+time_in[-1]])
val_in_tmp = np.concatenate([relabel.dot(val_in[:,:-1]),val_in,relabel.dot(val_in[:,1:])], axis=1)
val_out = np.zeros((val_in.shape[0],len(time_out)))
for jj in range(val_in_tmp.shape[0]):
f = UnivariateSpline(time_tmp, val_in_tmp[jj], s=params['smoothness'][-1])
val_out[jj] = f(time_out)
return val_out
|
7af0f6925d255c0445c7b5dfdfb330f4058f8afc
| 3,652,327
|
def get_selector_qty(*args):
"""get_selector_qty() -> int"""
return _idaapi.get_selector_qty(*args)
|
82ea62d3220893456358c42b0ec931e5c2cf9053
| 3,652,328
|
from typing import Optional
from typing import Dict
from typing import Any
import requests
def get(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
"""
Send a GET request to the remote API.
"""
return do_request(
"GET",
host,
path,
params=params,
headers=headers,
authenticated=authenticated,
stream=stream,
)
|
7f0188ad2d678c0edef5d4fce623a5faee5c13db
| 3,652,329
|
def inner_xml(xml_text):
"""
Get the inner xml of an element.
>>> inner_xml('<div>This is some <i><b>really</b> silly</i> text!</div>')
u'This is some <i><b>really</b> silly</i> text!'
"""
return unicode(INNER_XML_RE.match(xml_text).groupdict()['body'])
|
dcba13de5a75d4b9956c2a27f02a289212d9789e
| 3,652,330
|
def store_tags():
"""Routing: Stores the (updated) tag data for the image."""
data = {
"id": request.form.get("id"),
"tag": request.form.get('tags'),
"SHOWN": 0
}
loader.store(data)
next_image = loader.next_data()
if next_image is None:
return redirect("/finished")
target = "/"
if next_image:
target = f"/?image_id={next_image['id']}"
return redirect(location=target)
|
ec433586e7ad60d2b85ac8ff2ccc209f4c00a110
| 3,652,331
|
def getAssets(public_key: str) -> list:
"""
Get all the balances an account has.
"""
balances = server.accounts().account_id(public_key).call()['balances']
balances_to_return = [ {"asset_code": elem.get("asset_code"), "issuer": elem.get("asset_issuer"), "balance": elem.get("balance")} for elem in balances ]
balances_to_return[-1]["asset_code"] = "XLM"
return balances_to_return
|
71c1b89edd79f0dc4092b909c2d7f505b35d5391
| 3,652,332
|
def parse_pattern(format_string, env, wrapper=lambda x, y: y):
""" Parse the format_string and return prepared data according to the env.
Pick each field found in the format_string from the env(ironment), apply
the wrapper on each data and return a mapping between field-to-replace and
values for each.
"""
formatter = Formatter()
fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None]
prepared_env = {}
# Create a prepared environment with only used fields, all as list:
for field in fields:
# Search for a movie attribute for each alternative field separated
# by a pipe sign:
for field_alt in (x.strip() for x in field.split('|')):
# Handle default values (enclosed by quotes):
if field_alt[0] in '\'"' and field_alt[-1] in '\'"':
field_values = field_alt[1:-1]
else:
field_values = env.get(field_alt)
if field_values is not None:
break
else:
field_values = []
if not isinstance(field_values, list):
field_values = [field_values]
prepared_env[field] = wrapper(field_alt, field_values)
return prepared_env
|
fdd5057929ed06f5ee984019e967df45d683fb75
| 3,652,333
|
def u1_series_summation(xarg, a, kmax):
"""
5.3.2 ROUTINE - U1 Series Summation
PLATE 5-10 (p32)
:param xarg:
:param a:
:param kmax:
:return: u1
"""
du1 = 0.25*xarg
u1 = du1
f7 = -a*du1**2
k = 3
while k < kmax:
du1 = f7*du1 / (k*(k-1))
u1old = u1
u1 = u1+du1
if u1 == u1old:
break
k = k+2
return u1
|
e54cb5f68dd5ecba5dd7f540ac645ff8d70ae0e3
| 3,652,334
|
def mask_iou(masks_a, masks_b, iscrowd=False):
"""
Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].
The output is of size [a, b].
Wait I thought this was "box_utils", why am I putting this in here?
"""
masks_a = masks_a.view(masks_a.size(0), -1)
masks_b = masks_b.view(masks_b.size(0), -1)
matmul = nn.MatMul()
intersection = matmul(masks_a, masks_b.T)
mask_iou_sum = P.ReduceSum()
expand_dims = P.ExpandDims()
area_a = expand_dims(mask_iou_sum(masks_a, 1), 1)
area_b = expand_dims(mask_iou_sum(masks_b, 1), 0)
return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a
|
585bb48b3b8460660739acd102d8a0f5e1716078
| 3,652,335
|
import torch
def normalized_grid_coords(height, width, aspect=True, device="cuda"):
"""Return the normalized [-1, 1] grid coordinates given height and width.
Args:
height (int) : height of the grid.
width (int) : width of the grid.
aspect (bool) : if True, use the aspect ratio to scale the coordinates, in which case the
coords will not be normalzied to [-1, 1]. (Default: True)
device : the device the tensors will be created on.
"""
aspect_ratio = width/height if aspect else 1.0
window_x = torch.linspace(-1, 1, steps=width, device=device) * aspect_ratio
window_y = torch.linspace(1, -1, steps=height, device=device)
coord = torch.stack(torch.meshgrid(window_x, window_y, indexing='ij')).permute(2,1,0)
return coord
|
7ddd1c5eda2e28116e40fa99f6cd794d9dfd48cc
| 3,652,336
|
from typing import Optional
from pathlib import Path
from typing import Iterable
from typing import List
from typing import Any
import ray
import traceback
def ray_map(task: Task, *item_lists: Iterable[List[Any]], log_dir: Optional[Path] = None) -> List[Any]:
"""
Initialize ray, align item lists and map each item of a list of arguments to a callable and executes in parallel.
:param task: callable to be run
:param item_lists: items to be parallelized
:param log_dir: directory to store worker logs
:return: list of outputs
"""
try:
results = _ray_map_items(task, *item_lists, log_dir=log_dir)
return results
except (RayTaskError, Exception) as exc:
ray.shutdown()
traceback.print_exc()
raise RuntimeError(exc)
|
a033bb1f2d84b7a37bffd4db4643ed5c2291b3ba
| 3,652,337
|
def consensus_kmeans(data=None,
k=0,
linkage='average',
nensemble=100,
kmin=None,
kmax=None):
"""Perform clustering based on an ensemble of k-means partitions.
Parameters
----------
data : array
An m by n array of m data samples in an n-dimensional space.
k : int, optional
Number of clusters to extract; if 0 uses the life-time criterion.
linkage : str, optional
Linkage criterion for final partition extraction; one of 'average',
'centroid', 'complete', 'median', 'single', 'ward', or 'weighted'.
nensemble : int, optional
Number of partitions in the ensemble.
kmin : int, optional
Minimum k for the k-means partitions; defaults to :math:`\\sqrt{m}/2`.
kmax : int, optional
Maximum k for the k-means partitions; defaults to :math:`\\sqrt{m}`.
Returns
-------
clusters : dict
Dictionary with the sample indices (rows from 'data') for each found
cluster; outliers have key -1; clusters are assigned integer keys
starting at 0.
"""
# check inputs
if data is None:
raise TypeError("Please specify input data.")
N = len(data)
if kmin is None:
kmin = int(round(np.sqrt(N) / 2.))
if kmax is None:
kmax = int(round(np.sqrt(N)))
# initialization grid
grid = {
'k': np.random.random_integers(low=kmin, high=kmax, size=nensemble)
}
# run consensus
clusters, = consensus(data=data,
k=k,
linkage=linkage,
fcn=kmeans,
grid=grid)
return utils.ReturnTuple((clusters,), ('clusters',))
|
25ee74ac24883a4981db98c730c9010d13866840
| 3,652,338
|
def to_cftime(date, calendar="gregorian"):
"""Convert datetime object to cftime object.
Parameters
----------
date : datetime object
Datetime object.
calendar : str
Calendar of the cftime object.
Returns
-------
cftime : cftime object
Cftime ojbect.
"""
if type(date) == dt.date:
date = dt.datetime.combine(date, dt.time())
elif isinstance(date, cfdt.datetime):
# do nothing
return date
return cfdt.datetime(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
date.microsecond,
calendar=calendar,
)
|
cfd968e1fd74f105ef7b44ce6700d646d4470910
| 3,652,339
|
def poly_to_mask(mask_shape, vertices):
"""Converts a polygon to a boolean mask with `True` for points
lying inside the shape. Uses the bounding box of the vertices to reduce
computation time.
Parameters
----------
mask_shape : np.ndarray | tuple
1x2 array of shape of mask to be generated.
vertices : np.ndarray
Nx2 array of the vertices of the polygon.
Returns
-------
mask : np.ndarray
Boolean array with `True` for points inside the polygon
"""
return polygon2mask(mask_shape, vertices)
|
13dec3d1057cff4823fa989e268f5103756bc263
| 3,652,340
|
def get_nn_edges(
basis_vectors,
extent,
site_offsets,
pbc,
distance_atol,
order,
):
"""For :code:`order == k`, generates all edges between up to :math:`k`-nearest
neighbor sites (measured by their Euclidean distance). Edges are colored by length
with colors between 0 and `order - 1` in order of increasing length."""
positions, ids = create_padded_sites(
basis_vectors, extent, site_offsets, pbc, order
)
naive_edges_by_order = get_naive_edges(
positions,
order * np.linalg.norm(basis_vectors, axis=1).max() + distance_atol,
order,
)
colored_edges = []
for k, naive_edges in enumerate(naive_edges_by_order):
true_edges = set()
for node1, node2 in naive_edges:
# switch to real node indices
node1 = ids[node1]
node2 = ids[node2]
if node1 == node2:
raise RuntimeError(
f"Lattice contains self-referential edge {(node1, node2)} of order {k}"
)
elif node1 > node2:
node1, node2 = node2, node1
true_edges.add((node1, node2))
for edge in true_edges:
colored_edges.append((*edge, k))
return colored_edges
|
dfc55a3696c18769bbe3d4b15f068afbc763b6bf
| 3,652,341
|
from datetime import datetime
import pytz
import dateutil
def expand(vevent, default_tz, href=''):
"""
:param vevent: vevent to be expanded
:type vevent: icalendar.cal.Event
:param default_tz: the default timezone used when we (icalendar)
don't understand the embedded timezone
:type default_tz: pytz.timezone
:param href: the href of the vevent, used for more informative logging
:type href: str
:returns: list of start and end (date)times of the expanded event
:rtyped list(tuple(datetime, datetime))
"""
# we do this now and than never care about the "real" end time again
if 'DURATION' in vevent:
duration = vevent['DURATION'].dt
else:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
# dateutil.rrule converts everything to datetime
allday = not isinstance(vevent['DTSTART'].dt, datetime)
# icalendar did not understand the defined timezone
if (not allday and 'TZID' in vevent['DTSTART'].params and
vevent['DTSTART'].dt.tzinfo is None):
vevent['DTSTART'].dt = default_tz.localize(vevent['DTSTART'].dt)
if 'RRULE' not in vevent.keys():
return [(vevent['DTSTART'].dt, vevent['DTSTART'].dt + duration)]
events_tz = None
if getattr(vevent['DTSTART'].dt, 'tzinfo', False):
events_tz = vevent['DTSTART'].dt.tzinfo
vevent['DTSTART'].dt = vevent['DTSTART'].dt.astimezone(pytz.UTC)
rrulestr = vevent['RRULE'].to_ical()
rrule = dateutil.rrule.rrulestr(rrulestr, dtstart=vevent['DTSTART'].dt)
if not set(['UNTIL', 'COUNT']).intersection(vevent['RRULE'].keys()):
# rrule really doesn't like to calculate all recurrences until
# eternity, so we only do it 15years into the future
dtstart = vevent['DTSTART'].dt
if isinstance(dtstart, date):
dtstart = datetime(*list(dtstart.timetuple())[:-3])
rrule._until = dtstart + timedelta(days=15 * 365)
if ((not getattr(rrule._until, 'tzinfo', True)) and
(getattr(vevent['DTSTART'].dt, 'tzinfo', False))):
rrule._until = vevent['DTSTART'].dt.tzinfo \
.localize(rrule._until)
logger.debug('calculating recurrence dates for {0}, '
'this might take some time.'.format(href))
dtstartl = list(rrule)
if len(dtstartl) == 0:
raise UnsupportedRecursion
if events_tz is not None:
dtstartl = [start.astimezone(events_tz) for start in dtstartl]
elif allday:
dtstartl = [start.date() for start in dtstartl]
dtstartend = [(start, start + duration) for start in dtstartl]
return dtstartend
|
4d158051b95befed575f1243fd905d545c0cdabb
| 3,652,342
|
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate
========== ============= ======= ====
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 1.0
2 (1-0) 1 (0-1) right motion 1.0
3 (1-1) (none) - -
4 (0/0) (none) - -
5 (0/1) 6 (1/0) down motion 1.1
6 (1/0) 5 (0/1) up motion 0.9
7 (1/1) (none) - -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,0,0), 1., 'left motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 1., 'right motion') )
xn_list.append( Transition((0,1,1), (1,0,1), 1.1, 'down motion') )
xn_list.append( Transition((1,0,1), (0,1,1), 0.9, 'up motion') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
|
d820502beefc6065f1d5624dc0c0749fc65a0ae9
| 3,652,343
|
import math
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
see: https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py
"""
if out is None:
data = np.array(data, dtype=np.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(np.dot(data, data))
return data
else:
if out is not data:
out[:] = np.array(data, copy=False)
data = out
length = np.atleast_1d(np.sum(data*data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
if out is None:
return data
|
eb29e86d33ff576f290ea11aa6e2e6180a04d56e
| 3,652,344
|
import torch
def negative_f1_score(probs, labels):
"""
Computes the f1 score between output and labels for k classes.
args:
probs (tensor) (size, k)
labels (tensor) (size, 1)
"""
probs = torch.nn.functional.softmax(probs, dim=1)
probs = probs.numpy()
labels = labels.numpy()
pred = np.argmax(probs, axis=1)
return skl.f1_score(labels, pred, pos_label=0)
|
bd308d70934ed5ada0868f454b07c0f554384f32
| 3,652,345
|
import requests
def search_usb_devices_facets():
"""Facet USB Devices"""
data = {"terms": {"fields": ["status"]}}
usb_url = USB_DEVICES_FACETS.format(HOSTNAME, ORG_KEY)
return requests.post(usb_url, json=data, headers=HEADERS)
|
d4f09b8374fe2461ac5e7c121822287bf8e80494
| 3,652,346
|
import struct
def pack4(v):
"""
Takes a 32 bit integer and returns a 4 byte string representing the
number in little endian.
"""
assert 0 <= v <= 0xffffffff
# The < is for little endian, the I is for a 4 byte unsigned int.
# See https://docs.python.org/2/library/struct.html for more info.
return struct.pack('<I', v)
|
bbaeb0026624a7ec30ec379466ef11398f93d573
| 3,652,347
|
def index():
"""
"""
category = Category.get_categories()
pitch = Pitch.get_all_pitches()
title = "Welcome to Pitch Hub"
return render_template('index.html', title = title, category = category, pitch =pitch)
|
6758964bf9a304d62d9048e9b9248cee39d04742
| 3,652,348
|
def maximum_sum_increasing_subsequence(numbers, size):
"""
Given an array of n positive integers. Write a program to find the sum of
maximum sum subsequence of the given array such that the integers in the
subsequence are sorted in increasing order.
"""
results = [numbers[i] for i in range(size)]
for i in range(1, size):
for j in range(i):
if numbers[i] > numbers[j] and results[i] < results[j] + numbers[i]:
results[i] = results[j] + numbers[i]
return max(results)
|
a684ead4dcd9acbf8c796f5d24a3bf826fb5ad9d
| 3,652,349
|
def lstsqb(a, b):
"""
Return least-squares solution to a = bx.
Similar to MATLAB / operator for rectangular matrices.
If b is invertible then the solution is la.solve(a, b).T
"""
return la.lstsq(b.T, a.T, rcond=None)[0].T
|
4b046896ce29b79e9edcb434b1a01c652654867c
| 3,652,350
|
def multivariateGaussian(X, mu, sigma2):
"""
多元高斯分布
:param X:
:param mu:
:param sigma2:
:return:
"""
k = len(mu)
if sigma2.shape[0] > 1:
sigma2 = np.diag(sigma2)
X = X - mu
argu = (2 * np.pi) ** (-k / 2) * np.linalg.det(sigma2) ** (-0.5)
p = argu * np.exp(-0.5 * np.sum(np.dot(X, np.linalg.inv(sigma2)) * X, axis=1))
return p
|
67a466318c473eef2749bf23e26d45de1149c5dc
| 3,652,351
|
from datetime import datetime
def get_day(input):
"""
Convert input to a datetime object and extract the Day part
"""
if isinstance(input, str):
input = parse_iso(input)
if isinstance(input, (datetime.date, datetime.datetime)):
return input.day
return None
|
8a18e1832b85faf0612667ce3431176301502523
| 3,652,352
|
def read_ds(tier, pos_source=None):
"""
Like read_pt above, given a DS tier, return the DepTree object
:param tier:
:type tier: RGTier
"""
# First, assert that the type we're looking at is correct.
assert tier.type == DS_TIER_TYPE
# --1) Root the tree.
root = DepTree.root()
# --2) We will build up a list of edges, then attach the edges to the tree.
edges = []
# --2b) Retrieve the POS tier, if it exists, in advance.
pos_tier = tier.igt.get_pos_tags(tier.attributes.get(DS_DEP_ATTRIBUTE), tag_method=pos_source)
for item in tier:
dep = item.attributes.get(DS_DEP_ATTRIBUTE)
head = item.attributes.get(DS_HEAD_ATTRIBUTE)
# Get the POS tag if it exists
pos = None
if pos_tier:
pos_item = pos_tier.find(alignment=dep)
if pos_item:
pos = pos_item.value()
# Get the word value...
dep_w = tier.igt.find(id=dep)
dep_t = Terminal(dep_w.value(), dep_w.index)
if head is not None:
head_w = tier.igt.find(id=head)
head_t = Terminal(head_w.value(), head_w.index)
else:
head_t = Terminal('ROOT', 0)
e = DepEdge(head=head_t, dep=dep_t, type=item.value(), pos=pos)
edges.append(e)
dt = build_dep_edges(edges)
return dt
|
797503380a3ff697440da8cd5d409b5c89384f4f
| 3,652,353
|
def get_local_ontology_from_file(ontology_file):
""" return ontology class from a local OWL file """
return ow.get_ontology("file://" + ontology_file).load()
|
c022aac464c4afdbc088455a5edf8a4d91bc5586
| 3,652,354
|
import urllib
def get_wolframalpha_imagetag(searchterm):
""" Used to get the first image tag from the Wolfram Alpha API. The return value is a dictionary
with keys that can go directly into html.
Takes in:
searchterm: the term to search with in the Wolfram Alpha API
"""
base_url = 'http://api.wolframalpha.com/v2/query?'
app_id = credentials['wolframkey'] # api key
url_params = {'input': searchterm, 'appid': app_id}
headers = {'User-Agent': None}
data = urllib.urlencode(url_params)
req = urllib2.Request(base_url, data, headers)
xml = urllib2.urlopen(req).read()
tree = ET.fromstring(xml)
for e in tree.findall('pod'):
for item in [ef for ef in list(e) if ef.tag == 'subpod']:
for it in [i for i in list(item) if i.tag == 'img']:
if it.tag == 'img':
if float(it.attrib['width']) > 50 and float(it.attrib['height']) > 50:
return it.attrib['src']
|
958e09d6498b1f1d98de72fe9089e45e48988f20
| 3,652,355
|
def get_synset_definitions(word):
"""Return all possible definitions for synsets in a word synset ring.
:param word (str): The word to lookup.
:rtype definitions (list): The synset definitions list.
"""
definitions = []
synsets = get_word_synsets(word)
for _synset in synsets:
definitions.append(_synset.definition().split())
return definitions
|
70d522777cd413902157df6c0d96bdf378d7cf69
| 3,652,356
|
import json
def getResourceDefUsingSession(url, session, resourceName, sensitiveOptions=False):
"""
get the resource definition - given a resource name (and catalog url)
catalog url should stop at port (e.g. not have ldmadmin, ldmcatalog etc...
or have v2 anywhere
since we are using v1 api's
returns rc=200 (valid) & other rc's from the get
resourceDef (json)
"""
print(
"getting resource for catalog:-"
+ url
+ " resource="
+ resourceName
)
apiURL = url + "/access/1/catalog/resources/" + resourceName
if sensitiveOptions:
apiURL += "?sensitiveOptions=true"
# print("\turl=" + apiURL)
header = {"Accept": "application/json"}
tResp = session.get(apiURL, params={}, headers=header, )
print("\tresponse=" + str(tResp.status_code))
if tResp.status_code == 200:
# valid - return the jsom
return tResp.status_code, json.loads(tResp.text)
else:
# not valid
return tResp.status_code, None
|
883a393018b068b8f15a8c0ea5ac6969c1a386b6
| 3,652,357
|
def _merge_sse(sum1, sum2):
"""Merge the partial SSE."""
sum_count = sum1 + sum2
return sum_count
|
0aae96262cfb56c6052fdbe5bbd92437d37b1f76
| 3,652,358
|
def earliest_deadline_first(evs, iface):
""" Sort EVs by departure time in increasing order.
Args:
evs (List[EV]): List of EVs to be sorted.
iface (Interface): Interface object. (not used in this case)
Returns:
List[EV]: List of EVs sorted by departure time in increasing order.
"""
return sorted(evs, key=lambda x: x.departure)
|
f1a57586b9993d890ddda6c309dafbea4ae16554
| 3,652,359
|
import re
def auto_load(filename):
"""Load any supported raw battery cycler file to the correct Datapath automatically.
Matches raw file patterns to the correct datapath and returns the datapath object.
Example:
auto_load("2017-05-09_test-TC-contact_CH33.csv")
>>> <ArbinDatapath object>
auto_load("PreDiag_000287_000128short.092")
>>> <MaccorDatapath object>
Args:
filename (str, Pathlike): string corresponding to battery cycler file filename.
Returns:
(beep.structure.base.BEEPDatapath): The datapath child class corresponding to this file.
"""
if re.match(ARBIN_CONFIG["file_pattern"], filename) or re.match(FastCharge_CONFIG["file_pattern"], filename):
return ArbinDatapath.from_file(filename)
elif re.match(MACCOR_CONFIG["file_pattern"], filename) or re.match(xTesladiag_CONFIG["file_pattern"], filename):
return MaccorDatapath.from_file(filename)
elif re.match(INDIGO_CONFIG["file_pattern"], filename):
return IndigoDatapath.from_file(filename)
elif re.match(BIOLOGIC_CONFIG["file_pattern"], filename):
return BiologicDatapath.from_file(filename)
elif re.match(NEWARE_CONFIG["file_pattern"], filename):
return NewareDatapath.from_file(filename)
else:
raise ValueError("{} does not match any known file pattern".format(filename))
|
6b3ccf40296f62c15ea005cfe5e87e397d8e9f88
| 3,652,360
|
def print_param_list(param_list, result, decimal_place=2, unit=''):
"""
Return a result string with parameter data appended. The input `param_list` is a list of a tuple
(param_value, param_name), where `param_value` is a float and `param_name` is a string. If `param_value`
is None, it writes 'N/A'.
"""
for param_value, param_name in param_list:
result += '<tr>'
result += r' <td class = "key"><span>{0}</span></td>'.format(param_name)
result += r' <td class="equals">=</td>'
if param_value is None:
result += r' <td class="value">N/A</td>'
else:
param_value = '%.*f' % (decimal_place, param_value)
result += r' <td class="value"><script type="math/tex">{0} \ \mathrm{{ {1!s} }}</script></td>'.format(
param_value, unit)
result += '</tr>\n'
return result
|
f92fd926eaf312e625058c394c42e9909cac7a43
| 3,652,361
|
def get_veh_id(gb_data):
"""
Mapping function for vehicle id
"""
veh_ref = gb_data['Vehicle_Reference']
acc_id = get_acc_id_from_data(gb_data)
veh_id = common.get_gb_veh_id(acc_id, int(veh_ref))
return veh_id
|
de3a8f99a099737cedb00534ad21bc7dd1a900c5
| 3,652,362
|
def linreg_qr_gramschmidt_unencrypted(clientMap, coordinator, encryLv=3, colTrunc=False):
"""
Compute vertical federated linear regression using QR.
QR decomposition is computed by means of Numpy/Scipy builtin algorithm and Gram-Schmidt method.
Parameters
----------
clientMap : List
The list of qrClient objects.
clientInfos : List
The list of machine information of the corresponding qrClient objects.
encryLv : int
The least number of columns the feature matrix of a single client should have to protect its privacy.
colTrunc : bool
Do the column pivoting and truncation or not.
Returns
-------
numpy.array
The computed weights of all the clients. The weights corresponding to the constant term is at the last position.
"""
preprocessing_wo_constaint(clientMap, coordinator.machine_info_client, encryLv, colTrunc)
compute_qr_gramschmidt_unencrypted(clientMap, coordinator.machine_info_client)
apply_q_unencrypted(clientMap, coordinator.machine_info_client)
weights = apply_back_solve_wo_constraint(clientMap, coordinator.machine_info_client)
return weights
|
59fee17cff911a22c4e6cfc6daf13ce7559d32a7
| 3,652,363
|
def has_soa_perm(user_level, obj, ctnr, action):
"""
Permissions for SOAs
SOAs are global, related to domains and reverse domains
"""
return {
'cyder_admin': True, #?
'ctnr_admin': action == 'view',
'user': action == 'view',
'guest': action == 'view',
}.get(user_level, False)
|
6b32c9f3411d9341d9692c46e84a7506d649f36d
| 3,652,364
|
import os
def parse_test(project, path):
"""Compares the dynamic graph to the parsed one."""
inputs, outputs, built_by, graph = parse_graph(project.graph)
fuzzed = sorted([f for f in inputs - outputs if project.filter_in(f)])
count = len(fuzzed)
root = project.buildPath
G = defaultdict(list)
with open(path, 'r') as f:
for line in f.readlines():
src, deps = line.strip().split(':')
src = os.path.normpath(os.path.join(root, src))
for dep in (w.strip() for w in deps.split(', ')):
G[os.path.normpath(os.path.join(root, dep))].append(src)
def traverse_graph(node, viz):
if node in viz:
return viz
for next in G[node]:
viz.add(node)
traverse_graph(next, viz)
return viz
for idx, input in zip(range(count), fuzzed):
print('[{0}/{1}] {2}:'.format(idx + 1, count, input))
expected = graph.find_deps(input) & outputs
actual = traverse_graph(input, set())
if actual != expected:
for f in sorted(actual):
if f not in expected:
print(' +', f)
for f in sorted(expected):
if f not in actual:
print(' -', f)
|
0f55d8123c1984faccef9b41e9807cc82d17492b
| 3,652,365
|
from typing import Any
from typing import Dict
import os
import base64
def upload_artifact(args: Any, file_path: str, org_id: Any = None) -> Dict[str, Any]:
"""
Upload artifact using Pyxis API
Args:
args (Any): CLI arguments
file_path (str): Path to a artifact file
org_id (Any): organization ID - optional
Returns:
Dict[str, Any]: Pyxis response
"""
upload_url = urljoin(
args.pyxis_url, f"v1/projects/certification/id/{args.cert_project_id}/artifacts"
)
file_name = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
with open(file_path, "rb") as artifact:
content = artifact.read()
base64_content = base64.b64encode(content).decode("utf8")
mime = magic.from_file(file_path, mime=True)
artifact_payload = {
"content": base64_content,
"certification_hash": args.certification_hash,
"content_type": mime,
"filename": file_name,
"file_size": file_size,
"operator_package_name": args.operator_package_name,
"version": args.operator_version,
}
if org_id:
artifact_payload["org_id"] = org_id
return pyxis.post(upload_url, artifact_payload)
|
c6f2dfc94581028ccff0d2d3a86008a18b3816aa
| 3,652,366
|
def check_skyscrapers(input_path: str) -> bool:
"""
Main function to check the status of skyscraper game board.
Return True if the board status is compliant with the rules,
False otherwise.
"""
board = read_input(input_path)
return check_not_finished_board(board) and check_uniqueness_in_rows(board) and \
check_horizontal_visibility(board) and check_columns(board)
|
a4a2c77049bad429e548c749ef3e34ef27081de4
| 3,652,367
|
from typing import Optional
async def get_station(station: avwx.Station, token: Optional[Token]) -> dict:
"""Log and returns station data as dict"""
await app.station.add(station.lookup_code, "station")
return await station_data_for(station, token=token) or {}
|
659bf56ff274ccd460dfdf240d6f4776fb7586a6
| 3,652,368
|
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Note: This API is not compatible with the use of `tf.cond` or
`tf.while_loop`, and will raise a `ValueError` if you attempt to call it
in such a graph.
Returns:
A `group` op depending on all `check_numerics` ops added.
Raises:
ValueError: If the graph contains any numeric operations in a control flow
structure.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To check for `Inf`s and `NaN`s under
eager execution, call tfe.seterr(inf_or_nan='raise') once before executing
the checked operations.
@enc_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"add_check_numerics_ops() is not compatible with eager execution. "
"To check for Inf's and NaN's under eager execution, call "
"tfe.seterr(inf_or_nan='raise') once before executing the "
"checked operations.")
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
if op._get_control_flow_context() is not None: # pylint: disable=protected-access
raise ValueError("`tf.add_check_numerics_ops() is not compatible "
"with TensorFlow control flow operations such as "
"`tf.cond()` or `tf.while_loop()`.")
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
|
8a5026ff07a0cfce7f0acac58641996cef76fb2e
| 3,652,369
|
def get_text(part):
"""Gmailの本文をdecode"""
if not part['filename'] and \
part['body']['size'] > 0 and \
'data' in part['body'].keys():
content_type = header(part['headers'], 'Content-Type')
encode_type = header(part['headers'], 'Content-Transfer-Encoding')
data = decode_data(content_type, encode_type, part['filename'], part['body']['data'])
if data["data_type"]=="text":
return data['data']
return ''
|
2d32b30539c39dc89cb3680e2d21e14eb9ce24c4
| 3,652,370
|
import dataclasses
def run(ex: "interactivity.Execution"):
"""Specify the target function(s) and/or layer(s) to target."""
selection: "definitions.Selection" = ex.shell.selection
is_exact = ex.args.get("exact", False)
functions = ex.args.get("functions", False)
layers = ex.args.get("layers", False)
both = not functions and not layers
names = _get_names(ex)
if both and names == ["*"]:
status = "ALL"
message = "Selection has been cleared. All items are now selected."
ex.shell.selection = dataclasses.replace(
selection,
function_needles=["*"],
layer_needles=["*"],
bundle_all=True,
)
elif is_exact:
status = "EXACT"
message = "Exact selection has been applied."
ex.shell.selection = _update_exact_selection(
names=names,
functions=functions,
layers=layers,
selection=selection,
)
else:
status = "MATCH"
message = "Matching items have been selected."
ex.shell.selection = _update_fuzzy_selection(
names=names,
functions=functions,
layers=layers,
selection=selection,
)
targets = ex.shell.context.get_selected_targets(ex.shell.selection)
return ex.finalize(
status=status,
message=message,
echo=True,
info={
"functions": _to_names(targets.function_targets),
"layers": _to_names(targets.layer_targets),
},
)
|
9389ada1c657b1f2794650e9b2b2d9a40039b64f
| 3,652,371
|
def get_mixture_mse_accuracy(output_dim, num_mixes):
"""Construct an MSE accuracy function for the MDN layer
that takes one sample and compares to the true value."""
# Construct a loss function with the right number of mixtures and outputs
def mse_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')
out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
num_mixes * output_dim,
num_mixes],
axis=1, name='mdn_coef_split')
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
in zip(mus, sigs)]
mixture = tfd.Mixture(cat=cat, components=coll)
samp = mixture.sample()
mse = tf.reduce_mean(tf.square(samp - y_true), axis=-1)
# Todo: temperature adjustment for sampling functon.
return mse
# Actually return the loss_func
with tf.name_scope('MDNLayer'):
return mse_func
|
ac37233f14afb7aa13b8afc74e04d3d8adc89ff5
| 3,652,372
|
def ByName(breakdown_metric_name):
"""Return a BreakdownMetric class by name."""
breakdown_mapping = {
'distance': ByDistance,
'num_points': ByNumPoints,
'rotation': ByRotation,
'difficulty': ByDifficulty
}
if breakdown_metric_name not in breakdown_mapping:
raise ValueError('Invalid breakdown name: %s, valid names are %s' %
(breakdown_metric_name, list(breakdown_mapping.keys())))
return breakdown_mapping[breakdown_metric_name]
|
06a1a44f8453375cfc83729339062948829d950c
| 3,652,373
|
def deserialize_structure(serialized_structure, dtype=np.int32):
"""Converts a string to a structure.
Args:
serialized_structure: A structure produced by `serialize_structure`.
dtype: The data type of the output numpy array.
Returns:
A numpy array with `dtype`.
"""
return np.asarray(
[token for token in serialized_structure.split(domains.SEP_TOKEN)],
dtype=dtype)
|
ec8f3d096f3eedea4343576f7b204da15ae73ca6
| 3,652,374
|
from typing import List
def get_all_text_elements(dataset_name: str) -> List[TextElement]:
"""
get all the text elements of the given dataset
:param dataset_name:
"""
return data_access.get_all_text_elements(dataset_name=dataset_name)
|
fa4c2e0bff9818f1026095b5b6b774b09652b989
| 3,652,375
|
def form_x(form_file,*args):
"""
same as above, except assumes all tags in the form are number, and uses the additional arguments in *args to fill out those tag values.
:param form_file: file which we use for replacements
:param *args: optional arguments which contain the form entries for the file in question, by number.
"""
form_dict = {}
count = 0
for arg in args:
count += 1
form_dict[str(count)] = str(arg)
return form(form_file,form_dict)
|
e2d45e71ff18ce626a89d9a097389fc27b34fa82
| 3,652,376
|
import click
def init():
"""Manage IAM users."""
formatter = cli.make_formatter('aws_user')
@click.group()
def user():
"""Manage IAM users."""
pass
@user.command()
@click.option('--create',
is_flag=True,
default=False,
help='Create if it does not exist')
@click.option('--path',
default='/',
help='Path for user name.')
@click.option('--inline-policy',
type=cli.LIST,
required=False,
help='Inline user policy name:file')
@click.option('--attached-policy',
type=cli.LIST,
required=False,
help='global:PolicyName or local:PolicyName')
@click.option('--attached-policy',
type=cli.LIST,
required=False,
help='global:PolicyName or local:PolicyName')
@click.argument('user-name',
required=True,
callback=aws_cli.sanitize_user_name)
@cli.admin.ON_EXCEPTIONS
def configure(create,
path,
inline_policy,
attached_policy,
user_name):
"""Create/configure/get IAM user."""
iam_conn = awscontext.GLOBAL.iam
try:
user = iamclient.get_user(iam_conn, user_name)
except exc.NotFoundError:
if not create:
raise
user = None
if not user:
user = iamclient.create_user(iam_conn, user_name, path)
if inline_policy:
_set_user_policy(iam_conn, user_name, inline_policy)
if attached_policy:
_set_attached_policy(iam_conn, user_name, attached_policy)
user['UserPolicies'] = iamclient.list_user_policies(iam_conn,
user_name)
user['AttachedPolicies'] = iamclient.list_attached_user_policies(
iam_conn,
user_name)
cli.out(formatter(user))
@user.command(name='list')
@cli.admin.ON_EXCEPTIONS
@click.option('--path',
default='/',
help='Path for user name.')
def list_users(path):
"""List IAM users.
"""
iam_conn = awscontext.GLOBAL.iam
users = iamclient.list_users(iam_conn, path)
cli.out(formatter(users))
@user.command()
@click.option('--force',
is_flag=True,
default=False,
help='Delete user, even is user has policies attached.')
@click.argument('user-name')
@cli.admin.ON_EXCEPTIONS
def delete(force, user_name):
"""Delete IAM user."""
iam_conn = awscontext.GLOBAL.iam
if force:
user_policies = iamclient.list_user_policies(iam_conn,
user_name)
for policy in user_policies:
_LOGGER.info('deleting inline policy: %s', policy)
iamclient.delete_user_policy(iam_conn, user_name, policy)
attached_pols = iamclient.list_attached_user_policies(iam_conn,
user_name)
for policy in attached_pols:
_LOGGER.info('detaching policy: %s', policy['PolicyArn'])
iamclient.detach_user_policy(iam_conn,
user_name,
policy['PolicyArn'])
groups = iamclient.list_groups_for_user(iam_conn,
user_name)
for group in groups:
_LOGGER.info('removing user from group: %s', group)
iamclient.remove_user_from_group(iam_conn,
user_name,
group)
try:
iamclient.delete_user(iam_conn=iam_conn, user_name=user_name)
except iam_conn.exceptions.DeleteConflictException:
raise click.UsageError('User [%s] has inline or attached '
'policies, or is a member of one or '
'more group, use --force to force '
'delete.' % user_name)
del configure
del list_users
del delete
return user
|
b237e6ba7c10aafa1a499944f1553eaceed0fb2a
| 3,652,377
|
def fix_units(dims):
"""Fill in missing units."""
default = [d.get("units") for d in dims][-1]
for dim in dims:
dim["units"] = dim.get("units", default)
return dims
|
d3a47ad84e1b4e44bedebb1e5739778df975a6fe
| 3,652,378
|
def annotate_movement(raw, pos, rotation_velocity_limit=None,
translation_velocity_limit=None,
mean_distance_limit=None, use_dev_head_trans='average'):
"""Detect segments with movement.
Detects segments periods further from rotation_velocity_limit,
translation_velocity_limit and mean_distance_limit. It returns an
annotation with the bad segments.
Parameters
----------
raw : instance of Raw
Data to compute head position.
pos : array, shape (N, 10)
The position and quaternion parameters from cHPI fitting. Obtained
with `mne.chpi` functions.
rotation_velocity_limit : float
Head rotation velocity limit in radians per second.
translation_velocity_limit : float
Head translation velocity limit in radians per second.
mean_distance_limit : float
Head position limit from mean recording in meters.
use_dev_head_trans : 'average' (default) | 'info'
Identify the device to head transform used to define the
fixed HPI locations for computing moving distances.
If ``average`` the average device to head transform is
computed using ``compute_average_dev_head_t``.
If ``info``, ``raw.info['dev_head_t']`` is used.
Returns
-------
annot : mne.Annotations
Periods with head motion.
hpi_disp : array
Head position over time with respect to the mean head pos.
See Also
--------
compute_average_dev_head_t
"""
sfreq = raw.info['sfreq']
hp_ts = pos[:, 0].copy() - raw.first_time
dt = np.diff(hp_ts)
hp_ts = np.concatenate([hp_ts, [hp_ts[-1] + 1. / sfreq]])
orig_time = raw.info['meas_date']
annot = Annotations([], [], [], orig_time=orig_time)
# Annotate based on rotational velocity
t_tot = raw.times[-1]
if rotation_velocity_limit is not None:
assert rotation_velocity_limit > 0
# Rotational velocity (radians / sec)
r = _angle_between_quats(pos[:-1, 1:4], pos[1:, 1:4])
r /= dt
bad_mask = (r >= np.deg2rad(rotation_velocity_limit))
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'ω >= %5.1f°/s (max: %0.1f°/s)'
% (bad_pct, len(onsets), rotation_velocity_limit,
np.rad2deg(r.max())))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_rotat_vel', orig_time=orig_time)
# Annotate based on translational velocity limit
if translation_velocity_limit is not None:
assert translation_velocity_limit > 0
v = np.linalg.norm(np.diff(pos[:, 4:7], axis=0), axis=-1)
v /= dt
bad_mask = (v >= translation_velocity_limit)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'v >= %5.4fm/s (max: %5.4fm/s)'
% (bad_pct, len(onsets), translation_velocity_limit,
v.max()))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_trans_vel', orig_time=orig_time)
# Annotate based on displacement from mean head position
disp = []
if mean_distance_limit is not None:
assert mean_distance_limit > 0
# compute dev to head transform for fixed points
use_dev_head_trans = use_dev_head_trans.lower()
if use_dev_head_trans not in ['average', 'info']:
raise ValueError('use_dev_head_trans must be either' +
' \'average\' or \'info\': got \'%s\''
% (use_dev_head_trans,))
if use_dev_head_trans == 'average':
fixed_dev_head_t = compute_average_dev_head_t(raw, pos)
elif use_dev_head_trans == 'info':
fixed_dev_head_t = raw.info['dev_head_t']
# Get static head pos from file, used to convert quat to cartesian
chpi_pos = sorted([d for d in raw.info['hpi_results'][-1]
['dig_points']], key=lambda x: x['ident'])
chpi_pos = np.array([d['r'] for d in chpi_pos])
# Get head pos changes during recording
chpi_pos_mov = np.array([apply_trans(_quat_to_affine(quat), chpi_pos)
for quat in pos[:, 1:7]])
# get fixed position
chpi_pos_fix = apply_trans(fixed_dev_head_t, chpi_pos)
# get movement displacement from mean pos
hpi_disp = chpi_pos_mov - np.tile(chpi_pos_fix, (pos.shape[0], 1, 1))
# get positions above threshold distance
disp = np.sqrt((hpi_disp ** 2).sum(axis=2))
bad_mask = np.any(disp > mean_distance_limit, axis=1)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'disp >= %5.4fm (max: %5.4fm)'
% (bad_pct, len(onsets), mean_distance_limit, disp.max()))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_dist', orig_time=orig_time)
_adjust_onset_meas_date(annot, raw)
return annot, disp
|
f89e48281cb70da6aa27b7dde737a8a587024f08
| 3,652,379
|
from typing import Any
def run_in_executor(
func: F,
executor: ThreadPoolExecutor = None,
args: Any = (),
kwargs: Any = MappingProxyType({}),
) -> Future:
"""将耗时函数加入到线程池 ."""
loop = get_event_loop()
# noinspection PyTypeChecker
return loop.run_in_executor( # type: ignore
executor, context_partial(func, *args, **kwargs),
)
|
dfa40f30e359d785e3582f48910d3936659bd2fa
| 3,652,380
|
def find_entry_with_minimal_scale_at_prime(self, p):
"""
Finds the entry of the quadratic form with minimal scale at the
prime p, preferring diagonal entries in case of a tie. (I.e. If
we write the quadratic form as a symmetric matrix M, then this
entry M[i,j] has the minimal valuation at the prime p.)
Note: This answer is independent of the kind of matrix (Gram or
Hessian) associated to the form.
INPUT:
`p` -- a prime number > 0
OUTPUT:
a pair of integers >= 0
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [6, 2, 20]); Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 6 2 ]
[ * 20 ]
sage: Q.find_entry_with_minimal_scale_at_prime(2)
(0, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(3)
(1, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(5)
(0, 0)
"""
n = self.dim()
min_val = Infinity
ij_index = None
val_2 = valuation(2, p)
for d in range(n): ## d = difference j-i
for e in range(n - d): ## e is the length of the diagonal with value d.
## Compute the valuation of the entry
if d == 0:
tmp_val = valuation(self[e, e+d], p)
else:
tmp_val = valuation(self[e, e+d], p) - val_2
## Check if it's any smaller than what we have
if tmp_val < min_val:
ij_index = (e,e+d)
min_val = tmp_val
## Return the result
return ij_index
|
737a6dd1c3a1f416f4e22b79440b7731a5048fe0
| 3,652,381
|
import awkward._v2._connect.pyarrow
def from_arrow(array, highlevel=True, behavior=None):
"""
Args:
array (`pyarrow.Array`, `pyarrow.ChunkedArray`, `pyarrow.RecordBatch`,
or `pyarrow.Table`): Apache Arrow array to convert into an
Awkward Array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
"""
out = awkward._v2._connect.pyarrow.handle_arrow(array, pass_empty_field=True)
return ak._v2._util.wrap(out, behavior, highlevel)
|
a3c0cea2f2f3763f8997978e2963654cc08ed4e1
| 3,652,382
|
def _basis_search(equiv_lib, source_basis, target_basis, heuristic):
"""Search for a set of transformations from source_basis to target_basis.
Args:
equiv_lib (EquivalenceLibrary): Source of valid translations
source_basis (Set[Tuple[gate_name: str, gate_num_qubits: int]]): Starting basis.
target_basis (Set[gate_name: str]): Target basis.
heuristic (Callable[[source_basis, target_basis], int]): distance heuristic.
Returns:
Optional[List[Tuple[gate, equiv_params, equiv_circuit]]]: List of (gate,
equiv_params, equiv_circuit) tuples tuples which, if applied in order
will map from source_basis to target_basis. Returns None if no path
was found.
"""
source_basis = frozenset(source_basis)
target_basis = frozenset(target_basis)
open_set = set() # Bases found but not yet inspected.
closed_set = set() # Bases found and inspected.
# Priority queue for inspection order of open_set. Contains Tuple[priority, count, basis]
open_heap = []
# Map from bases in closed_set to predecessor with lowest cost_from_source.
# Values are Tuple[prev_basis, gate_name, params, circuit].
came_from = {}
basis_count = iter_count() # Used to break ties in priority.
open_set.add(source_basis)
heappush(open_heap, (0, next(basis_count), source_basis))
# Map from basis to lowest found cost from source.
cost_from_source = defaultdict(lambda: np.inf)
cost_from_source[source_basis] = 0
# Map from basis to cost_from_source + heuristic.
est_total_cost = defaultdict(lambda: np.inf)
est_total_cost[source_basis] = heuristic(source_basis, target_basis)
logger.debug('Begining basis search from %s to %s.',
source_basis, target_basis)
while open_set:
_, _, current_basis = heappop(open_heap)
if current_basis in closed_set:
# When we close a node, we don't remove it from the heap,
# so skip here.
continue
if {gate_name for gate_name, gate_num_qubits in current_basis}.issubset(target_basis):
# Found target basis. Construct transform path.
rtn = []
last_basis = current_basis
while last_basis != source_basis:
prev_basis, gate_name, gate_num_qubits, params, equiv = came_from[last_basis]
rtn.append((gate_name, gate_num_qubits, params, equiv))
last_basis = prev_basis
rtn.reverse()
logger.debug('Transformation path:')
for gate_name, gate_num_qubits, params, equiv in rtn:
logger.debug('%s/%s => %s\n%s', gate_name, gate_num_qubits, params, equiv)
return rtn
logger.debug('Inspecting basis %s.', current_basis)
open_set.remove(current_basis)
closed_set.add(current_basis)
for gate_name, gate_num_qubits in current_basis:
equivs = equiv_lib._get_equivalences((gate_name, gate_num_qubits))
basis_remain = current_basis - {(gate_name, gate_num_qubits)}
neighbors = [
(frozenset(basis_remain | {(inst.name, inst.num_qubits)
for inst, qargs, cargs in equiv.data}),
params,
equiv)
for params, equiv in equivs]
# Weight total path length of transformation weakly.
tentative_cost_from_source = cost_from_source[current_basis] + 1e-3
for neighbor, params, equiv in neighbors:
if neighbor in closed_set:
continue
if tentative_cost_from_source >= cost_from_source[neighbor]:
continue
open_set.add(neighbor)
came_from[neighbor] = (current_basis, gate_name, gate_num_qubits, params, equiv)
cost_from_source[neighbor] = tentative_cost_from_source
est_total_cost[neighbor] = tentative_cost_from_source \
+ heuristic(neighbor, target_basis)
heappush(open_heap, (est_total_cost[neighbor],
next(basis_count),
neighbor))
return None
|
2911b93f4ea36875c6d5e675028aedcd8caf3929
| 3,652,383
|
def Get_EstimatedRedshifts( scenario={} ):
""" obtain estimated source redshifts written to npy file """
return np.genfromtxt( FilenameEstimatedRedshift( scenario ), dtype=None, delimiter=',', names=True, encoding='UTF-8')
|
0696cfee6783c093b8cf4b7c9703fec18e9799a4
| 3,652,384
|
def get_national_museums(db_connection, export_to_csv, export_path):
"""
Get national museum data from DB
"""
df = pd.read_sql('select * from optourism.state_national_museum_visits', con=db_connection)
if export_to_csv:
df.to_csv(f"{export_path}_nationalmuseums_raw.csv", index=False)
return df
|
d34b9ff8f7f95025f932078e8d6e8b179bcff27e
| 3,652,385
|
from re import A
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Team":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "group":
current.response.s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Members"),
subtitle_create = T("Add New Team Member"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
else:
table.group_head.label = T("Group Leader")
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
if function == "group":
db = current.db
ptable = db.pr_person
controller = request.controller
def hrm_person_represent(id, row=None):
if row:
id = row.id
elif id:
row = db(ptable.id == id).select(ptable.first_name,
limitby=(0, 1)
).first()
else:
return current.messages["NONE"]
return A(row.first_name,
_href=URL(c=controller, f="person", args=id))
table.person_id.represent = hrm_person_represent
list_fields = ["id",
(T("First Name"), "person_id"),
"person_id$middle_name",
"person_id$last_name",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
orderby = "pr_person.first_name"
else:
list_fields = ["id",
"group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure("pr_group_membership",
list_fields=list_fields,
orderby=orderby)
|
f5ec66e00063bf8101505de8b1b8a767227b6bbd
| 3,652,386
|
import torch
def inverse_sphere_distances(batch, dist, labels, anchor_label):
"""
Function to utilise the distances of batch samples to compute their
probability of occurence, and using the inverse to sample actual negatives to the resp. anchor.
Args:
batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape.
dist: torch.Tensor(), computed distances between anchor to all batch samples.
labels: np.ndarray, labels for each sample for which distances were computed in dist.
anchor_label: float, anchor label
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""
bs,dim = len(dist),batch.shape[-1]
#negated log-distribution of distances of unit sphere in dimension <dim>
log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim-3) / 2) * torch.log(1.0 - 0.25 * (dist.pow(2))))
#Set sampling probabilities of positives to zero
log_q_d_inv[np.where(labels==anchor_label)[0]] = 0
q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv)) # - max(log) for stability
#Set sampling probabilities of positives to zero
q_d_inv[np.where(labels==anchor_label)[0]] = 0
### NOTE: Cutting of values with high distances made the results slightly worse.
# q_d_inv[np.where(dist>upper_cutoff)[0]] = 0
#Normalize inverted distance for probability distr.
q_d_inv = q_d_inv/q_d_inv.sum()
return q_d_inv.detach().cpu().numpy()
|
9bcb7f56f08fd850f6a9fa70175e1f83df603705
| 3,652,387
|
def get_recording(sleeps=False):
"""Get list of recorded steps.
:param sleeps: set False to exclude recording sleeps
"""
# TODO. atm will always use CLICK
# TODO. Add examples
global recording # pylint: disable=W0602
output = []
top = None
action_name = "Click"
for item in recording:
if sleeps and item["type"] == "sleep":
output.append(f"Sleep {item['value']}s")
if (
item["type"] == "locator"
and not top
or "top" in item.keys()
and item["top"] != top
):
output.append(
f"Control Window {item['top']} # Handle: {item['top_handle']}"
)
top = item["top"]
if item["type"] == "locator":
output.append(f"{action_name} {item['locator']}")
result = "\n".join(output)
header = (
f"\n{'-'*80}"
"\nCOPY & PASTE BELOW CODE INTO *** Tasks *** or *** Keywords ***"
f"\n{'-'*80}\n\n"
)
footer = f"\n\n{'-'*80}"
return f"{header}{result}{footer}"
|
11ea9e01fc6af731d444b743ea7e56deefff02d4
| 3,652,388
|
from functools import reduce
def wrap_onspace(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line, ' \n'[(len(line[line.rfind('\n')+1:]) +
len(word.split('\n', 1)[0]) >= width)], word),
text.split(' '))
|
13387fa67dcff2b0329463dfe1ab7d6721255afc
| 3,652,389
|
def xsd_simple_type_factory(elem, schema, parent):
"""
Factory function for XSD simple types. Parses the xs:simpleType element and its
child component, that can be a restriction, a list or an union. Annotations are
linked to simple type instance, omitting the inner annotation if both are given.
"""
annotation = None
try:
child = elem[0]
except IndexError:
return schema.maps.types[XSD_ANY_SIMPLE_TYPE]
else:
if child.tag == XSD_ANNOTATION:
annotation = XsdAnnotation(elem[0], schema, child)
try:
child = elem[1]
except IndexError:
schema.parse_error("(restriction | list | union) expected", elem)
return schema.maps.types[XSD_ANY_SIMPLE_TYPE]
if child.tag == XSD_RESTRICTION:
xsd_type = schema.BUILDERS.restriction_class(child, schema, parent)
elif child.tag == XSD_LIST:
xsd_type = XsdList(child, schema, parent)
elif child.tag == XSD_UNION:
xsd_type = schema.BUILDERS.union_class(child, schema, parent)
else:
schema.parse_error("(restriction | list | union) expected", elem)
return schema.maps.types[XSD_ANY_SIMPLE_TYPE]
if annotation is not None:
xsd_type.annotation = annotation
try:
xsd_type.name = get_qname(schema.target_namespace, elem.attrib['name'])
except KeyError:
if parent is None:
schema.parse_error("missing attribute 'name' in a global simpleType", elem)
xsd_type.name = 'nameless_%s' % str(id(xsd_type))
else:
if parent is not None:
schema.parse_error("attribute 'name' not allowed for a local simpleType", elem)
xsd_type.name = None
if 'final' in elem.attrib:
try:
xsd_type._final = get_xsd_derivation_attribute(elem, 'final')
except ValueError as err:
xsd_type.parse_error(err, elem)
return xsd_type
|
27ab47787923fadef6364828e2cc7604b006d76d
| 3,652,390
|
def amen_solve(A, f, x0, eps, kickrank=4, nswp=20, local_prec='n',
local_iters=2, local_restart=40, trunc_norm=1, max_full_size=50, verb=1):
""" Approximate linear system solution in the tensor-train (TT) format
using Alternating minimal energy (AMEN approach)
:References: Sergey Dolgov, Dmitry. Savostyanov
Paper 1: http://arxiv.org/abs/1301.6068
Paper 2: http://arxiv.org/abs/1304.1222
:param A: Matrix in the TT-format
:type A: matrix
:param f: Right-hand side in the TT-format
:type f: tensor
:param x0: TT-tensor of initial guess.
:type x0: tensor
:param eps: Accuracy.
:type eps: float
:Example:
>>> import tt
>>> import tt.amen #Needed, not imported automatically
>>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian
>>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones
>>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8)
amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5
amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9
amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13
amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17
amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21
amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25
amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29
amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33
amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37
amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41
amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45
amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49
amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53
amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57
amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61
amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65
amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69
amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73
>>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm()
5.5152374305127345e-09
"""
m = A.m.copy()
rx0 = x0.r.copy()
psx0 = x0.ps.copy()
if A.is_complex or f.is_complex:
amen_f90.amen_f90.ztt_amen_wrapper(f.d, A.n, m,
A.tt.r, A.tt.ps, A.tt.core,
f.r, f.ps, f.core,
rx0, psx0, x0.core,
eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec)
else:
if x0.is_complex:
x0 = x0.real()
rx0 = x0.r.copy()
psx0 = x0.ps.copy()
amen_f90.amen_f90.dtt_amen_wrapper(f.d, A.n, m,
A.tt.r, A.tt.ps, A.tt.core,
f.r, f.ps, f.core,
rx0, psx0, x0.core,
eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec)
x = tt.tensor()
x.d = f.d
x.n = m.copy()
x.r = rx0
if A.is_complex or f.is_complex:
x.core = amen_f90.amen_f90.zcore.copy()
else:
x.core = amen_f90.amen_f90.core.copy()
amen_f90.amen_f90.deallocate_result()
x.get_ps()
return x
|
15b35bedd6e07f867ae1bae54992f8988b1b56cb
| 3,652,391
|
def get_vss(ts, tau_p):
""" Compute candidates of VS for specified task tau_p """
if tau_p == None:
return []
C, T, D = extract(ts)
R = rta(C, T)
_VS = _get_vs(C, T, R, task_name_to_index(ts, tau_p))
_VS.sort()
VS = []
vs = Server(0, 0, None)
# ignore duplicates
for s in _VS:
if vs.C == s[0] and vs.T == s[1]:
continue
vs = Server(s[0], s[1], tau_p)
VS.append(vs)
return VS
|
a6b0abc26d32d8e62e4026ee59a6491a02dd6a32
| 3,652,392
|
from typing import Iterable
from typing import Dict
from typing import Hashable
from typing import List
def groupby(
entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None
) -> Dict[Hashable, List["DXFEntity"]]:
"""
Groups a sequence of DXF entities by a DXF attribute like ``'layer'``,
returns a dict with `dxfattrib` values as key and a list of entities
matching this `dxfattrib`.
A `key` function can be used to combine some DXF attributes (e.g. layer and
color) and should return a hashable data type like a tuple of strings,
integers or floats, `key` function example::
def group_key(entity: DXFEntity):
return entity.dxf.layer, entity.dxf.color
For not suitable DXF entities return ``None`` to exclude this entity, in
this case it's not required, because :func:`groupby` catches
:class:`DXFAttributeError` exceptions to exclude entities, which do not
provide layer and/or color attributes, automatically.
Result dict for `dxfattrib` = ``'layer'`` may look like this::
{
'0': [ ... list of entities ],
'ExampleLayer1': [ ... ],
'ExampleLayer2': [ ... ],
...
}
Result dict for `key` = `group_key`, which returns a ``(layer, color)``
tuple, may look like this::
{
('0', 1): [ ... list of entities ],
('0', 3): [ ... ],
('0', 7): [ ... ],
('ExampleLayer1', 1): [ ... ],
('ExampleLayer1', 2): [ ... ],
('ExampleLayer1', 5): [ ... ],
('ExampleLayer2', 7): [ ... ],
...
}
All entity containers (modelspace, paperspace layouts and blocks) and the
:class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby`
method.
Args:
entities: sequence of DXF entities to group by a DXF attribute or a
`key` function
dxfattrib: grouping DXF attribute like ``'layer'``
key: key function, which accepts a :class:`DXFEntity` as argument and
returns a hashable grouping key or ``None`` to ignore this entity
"""
if all((dxfattrib, key)):
raise DXFValueError(
"Specify a dxfattrib or a key function, but not both."
)
if dxfattrib != "":
key = lambda entity: entity.dxf.get_default(dxfattrib)
if key is None:
raise DXFValueError(
"no valid argument found, specify a dxfattrib or a key function, "
"but not both."
)
result: Dict[Hashable, List["DXFEntity"]] = dict()
for dxf_entity in entities:
if not dxf_entity.is_alive:
continue
try:
group_key = key(dxf_entity)
except DXFAttributeError:
# ignore DXF entities, which do not support all query attributes
continue
if group_key is not None:
group = result.setdefault(group_key, [])
group.append(dxf_entity)
return result
|
0eecfc2263c1f5716615cb4add6bc092edbb2b8b
| 3,652,393
|
def train_test_split(data_filepath, num_train=10, num_test=10):
"""Split a dataset into training and test sets."""
df = pd.read_csv(data_filepath, sep=',', header=None)
data = df.values
train = data[:2*num_train, :]
test = data[2*num_train:2*(num_train+num_test), :]
ind = np.argsort(train[:,-1])
X_train = train[ind][:,:-1]
y_train = train[ind][:,-1]
ind = np.argsort(test[:,-1])
X_test = test[ind][:,:-1]
y_test = test[ind][:,-1]
return X_train, y_train, X_test, y_test
|
650979e62667ade3f88d89f2058bedf8675a5ae5
| 3,652,394
|
import requests
def get_filings(app: Flask = None):
"""Get a filing with filing_id."""
r = requests.get(f'{app.config["LEGAL_URL"]}/internal/filings')
if not r or r.status_code != 200:
app.logger.error(f'Failed to collect filings from legal-api. {r} {r.json()} {r.status_code}')
raise Exception
return r.json()
|
4b2ba1a3d15918fe5d7b706a20006d0c85818176
| 3,652,395
|
def _uno_struct__setattr__(self, name, value):
"""Sets attribute on UNO struct.
Referenced from the pyuno shared library.
"""
return setattr(self.__dict__["value"], name, value)
|
6b66213e33bb8b882407ff33bcca177701fb98cd
| 3,652,396
|
import io
import warnings
import os
def load_imgs_from_tree(data_dir, img_sub_folder=None, fovs=None, channels=None,
dtype="int16", variable_sizes=False):
"""Takes a set of imgs from a directory structure and loads them into an xarray.
Args:
data_dir (str):
directory containing folders of images
img_sub_folder (str):
optional name of image sub-folder within each fov
fovs (list):
optional list of folders to load imgs from. Default loads all folders
channels (list):
optional list of imgs to load, otherwise loads all imgs
dtype (str/type):
dtype of array which will be used to store values
variable_sizes (bool):
if true, will pad loaded images with zeros to fit into array
Returns:
xarray.DataArray:
xarray with shape [fovs, x_dim, y_dim, tifs]
"""
iou.validate_paths(data_dir, data_prefix=False)
if fovs is None:
# get all fovs
fovs = iou.list_folders(data_dir)
fovs.sort()
if len(fovs) == 0:
raise ValueError(f"No fovs found in directory, {data_dir}")
if img_sub_folder is None:
# no img_sub_folder, change to empty string to read directly from base folder
img_sub_folder = ""
# get imgs from first fov if no img names supplied
if channels is None:
channels = iou.list_files(
path_join(data_dir, fovs[0], img_sub_folder),
substrs=['.tif', '.jpg', '.png']
)
# if taking all channels from directory, sort them alphabetically
channels.sort()
# otherwise, fill channel names with correct file extension
elif not all([img.endswith(("tif", "tiff", "jpg", "png")) for img in channels]):
# need this to reorder channels back because list_files may mess up the ordering
channels_no_delim = [img.split('.')[0] for img in channels]
all_channels = iou.list_files(
path_join(data_dir, fovs[0], img_sub_folder), substrs=channels_no_delim,
exact_match=True
)
# get the corresponding indices found in channels_no_delim
channels_indices = [channels_no_delim.index(chan.split('.')[0]) for chan in all_channels]
# reorder back to original
channels = [chan for _, chan in sorted(zip(channels_indices, all_channels))]
if len(channels) == 0:
raise ValueError("No images found in designated folder")
test_img = io.imread(
path_join(data_dir, fovs[0], img_sub_folder, channels[0], get_filehandle=True)
)
# check to make sure that float dtype was supplied if image data is float
data_dtype = test_img.dtype
if np.issubdtype(data_dtype, np.floating):
if not np.issubdtype(dtype, np.floating):
warnings.warn(f"The supplied non-float dtype {dtype} was overwritten to {data_dtype}, "
f"because the loaded images are floats")
dtype = data_dtype
if variable_sizes:
img_data = np.zeros((len(fovs), 1024, 1024, len(channels)), dtype=dtype)
else:
img_data = np.zeros((len(fovs), test_img.shape[0], test_img.shape[1], len(channels)),
dtype=dtype)
for fov in range(len(fovs)):
for img in range(len(channels)):
if variable_sizes:
temp_img = io.imread(
path_join(data_dir, fovs[fov], img_sub_folder, channels[img],
get_filehandle=True)
)
img_data[fov, :temp_img.shape[0], :temp_img.shape[1], img] = temp_img
else:
img_data[fov, :, :, img] = io.imread(path_join(data_dir, fovs[fov],
img_sub_folder, channels[img],
get_filehandle=True))
# check to make sure that dtype wasn't too small for range of data
if np.min(img_data) < 0:
raise ValueError("Integer overflow from loading TIF image, try a larger dtype")
if variable_sizes:
row_coords, col_coords = range(1024), range(1024)
else:
row_coords, col_coords = range(test_img.shape[0]), range(test_img.shape[1])
# remove .tif or .tiff from image name
img_names = [os.path.splitext(img)[0] for img in channels]
img_xr = xr.DataArray(img_data, coords=[fovs, row_coords, col_coords, img_names],
dims=["fovs", "rows", "cols", "channels"])
return img_xr
|
d92aad79e78ad2ce1c1fdd944d8e2c4049ddca4d
| 3,652,397
|
from datetime import datetime
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('user_home'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or '@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif get_uid(request.form['username']) is not None:
error = 'The username is already taken'
else:
db = get_db()
db.execute('''insert into user (
username, email, pw_hash, day, inc_log, dec_log, phase) values (?, ?, ?, 1, ?, ?, 1)''',
[request.form['username'], request.form['email'],
generate_password_hash(request.form['password']), datetime.datetime.utcnow(), datetime.datetime.utcnow()])
db.commit()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error=error)
|
572ee30c9f4981f6d526f115178ba8988e2b93c1
| 3,652,398
|
def test_single_while_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_while():
x = Tensor(7).astype("int32")
y = Tensor(0).astype("int32")
while x >= y:
y += x
return y
res = control_flow_while()
assert res == 14
|
8334819ee7d4ea24085e2a2f1ab3d18fb732c8cc
| 3,652,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.