content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
import itertools
def config_path(*args, **kwargs):
"""
Join multiple paths from configuration and always use config['path']['base'] as base path.
Use as follows:
```python
config_path('model.train')
```
:param args: Lists of keys to load.
:param kwargs: Keyword arguments. Currently available key: `base_path`. Which path to use as a base path.
:return: Path to file
"""
config_key_path = config_key('path.base', config=kwargs.get('config'))
def add_base_path(items, base_path=config_key_path):
"""
If the key in the configuration file leads to a list, the base path
will be added to all of them.
:param items: List of paths.
:param base_path: Base path to prepend.
:return: List of paths prepended with the base path.
"""
cloud_path = config_key('cloud.bucket', config=kwargs.get('config')) \
if kwargs.get('config', {}).get('execution') == 'cloud' \
else './'
if isinstance(items, list):
return [os.path.join(cloud_path, base_path, item)
for item in items]
# If the input wasn't a list, return a list with that single item.
return [os.path.join(cloud_path, base_path, items)]
paths = [config_key(path, config=kwargs.get('config')) for path in args]
path_bases = list(itertools.chain(
*[
add_base_path(path,
base_path=kwargs.get('base_path', config_key('path.base',
config=kwargs.get('config'))))
for path in paths
]
))
# Return a single value if there's only one value to return.
if len(path_bases) == 1:
return path_bases[0]
return path_bases | c75ffafae583cb2ea9f9d4277c556c9dc4e815d7 | 3,632,300 |
def get_one_hot_predictions(tcdcn, x, dim):
"""
This method gets a model (tcdcn), passes x through it
and gets it's prediction, then it gets one_hot
matrix representation of the predictions. depending on whether
tcdcn is RCN or a structured model, x can be an image
(in the former case) and a one-hot map representation
(in the latter case).
"""
# prediction_2D is a matrix of shape (#batch, #kpts), with
# each value in the range [0, dim**2)
prediction_2D = tcdcn.model_prediction(x, dropout=0)
# getting 4D one_hot maps from 2D predictions
one_hot_Maps_4D = make_2D_predictions_into_one_hot_4D(prediction_2D, dim)
return one_hot_Maps_4D | 27bf3ad70f77e726ba8484dec8770211907dfbd5 | 3,632,301 |
def run_multi_lsp(x, y, err, fts, fmin=0.1, fmax=150, k_band=1, m_base=1, mode='fast', dt_cut=365, k_term_base=0):
"""Run all methods of multiband gatspy Lomb-Scargle Periodogram.
Input
------
x, y, err, fts: phase, magnitudes/flux, error, filter list
fmin, fmax: minimum and maximum search period in the Lomb-Scargle
k (int): Number of Fourier components
mode (str): LSP method. Currently supports 'fast' and "general"
dt_cut (int): Maximum baseline time. Default is 1 year worth of photometry.
Output
------
best_period: Best period found from the highest peak in the LSP
TODO: False Alarm Probability, TOP N peaks?!
"""
try:
# Pre-processing to photometry
dt = x-x[0] # calculate baseline
x, y, err, fts = x[dt<=dt_cut], y[dt<=dt_cut], err[dt<=dt_cut], fts[dt<=dt_cut]
y += -1*min(y) # TODO: PLASTICC light curves can be negative. For now normalize such that they're at least positive
dt = x-x[0] # evaluate baseline again!
# Check fmax limit
if max(dt)<fmax:
fmax = max(dt)-3
except:
return np.nan
if mode=='fast':
try:
model = periodic.LombScargleMultibandFast(fit_period=True,optimizer_kwds={"quiet": True},
Nterms=k)
model.optimizer.set(period_range=(fmin, fmax))
model = model.fit(x, y, dy=err, filts=fts)
return model.best_period
except:
return np.nan
elif mode=='general':
try:
model = periodic.LombScargleMultiband(fit_period=True,optimizer_kwds={"quiet": True},
Nterms_base=m_base, Nterms_band=k_band)
model.optimizer.set(period_range=(fmin, fmax))
model = model.fit(x, y, dy=err, filts=fts)
return model.best_period
except:
return np.nan | 28d0061499fe809f4cce770b8a23c66d8471f741 | 3,632,302 |
def render_tablet_screen():
"""
Serves the page for the tablet backend.
:return: The tablet html file.
"""
return app.send_static_file('tablet.html') | d82f1bf75438e05c4745a7eb7e25c2223f8e98cc | 3,632,303 |
def add_content(resp, param, value):
"""Adds content/body of the response.
ecocnt_html: html body,
ecocnt_css: css body,
ecocnt_js: js body,
ecocnt_img: img body,
ecocnt_vid: video body,
ecocnt_audio: audio body,
"""
if param == "ecocnt_html":
t = loader.get_template("echo/template.html")
context = string_to_dict(value)
resp.content = t.render(context)
elif param == "ecocnt_css":
resp.content = value
elif param == "ecocnt_js":
resp.content = value
elif param == "ecocnt_img":
context = string_to_dict(value)
height = int(context.get("height", 100))
width = int(context.get("width", 100))
ct_type = context.get("type", "png")
img = Image.new("RGB", (height, width))
img.save(resp, ct_type)
elif param == "ecocnt_vid":
context = string_to_dict(value)
width = context.get("width", "50")
height = context.get("height", "50")
duration = context.get("duration", "1")
movie_path = f"echo/static/echo/movie_({width}, {height})_{duration}.mp4"
try:
with open(movie_path, "rb") as f:
resp.content = f.read()
except FileNotFoundError as e:
logger.warning(f"File not found {e}")
resp.content = "Unsupported movie file"
elif param == "ecocnt_audio":
context = string_to_dict(value)
duration = context.get("duration", "1")
audio_path = f"echo/static/echo/audio_{duration}.wav"
try:
with open(audio_path, "rb") as f:
resp.content = f.read()
except FileNotFoundError as e:
logger.warning(f"File not found {e}")
resp.content = "Unsupported audio file"
elif param == "ecocnt_pdf":
# https://stackoverflow.com/a/66905260
SMALL_PDF = (
b"%PDF-1.2 \n"
b"9 0 obj\n<<\n>>\nstream\nBT/ 32 Tf( Leaky )' ET\nendstream\nendobj\n"
b"4 0 obj\n<<\n/Type /Page\n/Parent 5 0 R\n/Contents 9 0 R\n>>\nendobj\n"
b"5 0 obj\n<<\n/Kids [4 0 R ]\n/Count 1\n/Type /Pages\n/MediaBox [ 0 0 250 50 ]\n>>\nendobj\n"
b"3 0 obj\n<<\n/Pages 5 0 R\n/Type /Catalog\n>>\nendobj\n"
b"trailer\n<<\n/Root 3 0 R\n>>\n"
b"%%EOF"
)
resp.content = SMALL_PDF
else:
logger.warning(f"Unsupported body content: {param}")
# Possible to add additional content options, such flash, appcache, applet, webvtt?
return resp | 07bc2bc61d9901ab09b0badaef643622eb270754 | 3,632,304 |
def intersection_over_union(box1, box2):
"""Returns the IoU critera for pct of overlap area
box = (left, right, bot, top), same as matplotlib `extent` format
>>> box1 = (0, 1, 0, 1)
>>> box2 = (0, 2, 0, 2)
>>> print(intersection_over_union(box1, box2))
0.25
>>> print(intersection_over_union(box1, box1))
1.0
"""
ua = union_area(box1, box2)
if ua == 0:
return 0
else:
return intersect_area(box1, box2) / ua | 4825de855bd12fcaaebbfa337fc0f6faa9482a74 | 3,632,305 |
def fpsol(nu,u):
"""
reads the vector normal and slip vector returning strike, rake, dip
"""
dip=np.arccos(-1*nu[2])
if nu[0] ==0. and nu[1] == 0.:
str=0.
else:
str=np.arctan2(-1*nu[0],nu[1])
sstr=np.sin(str)
cstr=np.cos(str)
sdip=np.sin(dip)
cdip=np.cos(dip)
if abs(sdip) > 0.:
rake=np.arcsin(-1*u[2]/np.sin(dip));
else:
arg1=1.
arg2=u[2]
arg=np.sign(arg2)
if arg < 0.:
rake=np.pi
else:
rake=0.
slambda=np.sin(rake)
cdsl=cdip*slambda
if abs(sstr) > abs(cstr):
clambda=(u[1]+cdsl*cstr)/sstr
else:
clambda=(u[0]-cdsl*sstr)/cstr
if slambda == 0. and clambda == 0.:
slip=0.
else:
slip=np.arctan2(slambda,clambda)
if dip > np.pi/2:
dip=np.pi-dip
str=str+np.pi
slip=2*np.pi-slip
if str < 0.:
str=str+2*np.pi
if slip >= np.pi:
slip=slip-2*np.pi;
str=str*180/np.pi
rake=slip*180/np.pi
dip=dip*180/np.pi
return str, rake, dip | 735e99dc9b00c1d22c6a6976892ed709840cc007 | 3,632,306 |
def create_short_ticket(access_token, expire_seconds=2592000, scene_id=0):
"""
创建临时二维码
:param access_token: 微信access_token
:param expire_seconds: 二维码过期时间
:param scene_id: 场景值ID
:return:
"""
target_url = 'https://api.weixin.qq.com/cgi-bin/qrcode/create?access_token=%s' % access_token
result = __get_data_use_api(target_url, False, {"expire_seconds": expire_seconds, "action_info": {"scene": {"scene_id": scene_id}}, "action_name": 'QR_SCENE'})
return result | 30dc627d5319d8145854b568ce8b7a8d4e4eba69 | 3,632,307 |
import os
def gen_compound(name, N=50, nP=10):
"""
(1) Alkane vs Alcohol
N: the #carbon atom in compound, ex: N=50 generates compounds of different length from 1 to 50 carbons
P: # of permutation to relabeling the vertex order for each generated compound
----------------------------------------------
(2) Asymmetric Isomer vs Symmetric Isomer
N: the #carbon atom in compound, ex: N=50 generates compounds of different length from 1 to 50 carbons
P: # of permutation to relabeling the vertex order for each generated compound
"""
def gen_alcohol(nC): # C_n H_2n+1 OH
G, nlabel = nx.Graph(), {}
for i in range(nC):
c = i*3+1
G.add_edge(c,c+1,weight=1)
G.add_edge(c,c+2,weight=1)
nlabel[c] = 'C'
nlabel[c+1] = 'H'
nlabel[c+2] = 'H'
if i == 0:
G.add_edge(c,c-1,weight=1)
nlabel[c-1] = 'H'
else:
G.add_edge(c,c-3,weight=1)
if i == nC-1:
G.add_edge(c,c+3,weight=1)
G.add_edge(c+3,c+4,weight=1)
nlabel[c+3] = 'O'
nlabel[c+4] = 'H'
return G, nlabel
def gen_alkane(nC): # C_n H_2n+2
G, nlabel = nx.Graph(), {}
for i in range(nC):
c = i*3+1
G.add_edge(c,c+1,weight=1)
G.add_edge(c,c+2,weight=1)
nlabel[c] = 'C'
nlabel[c+1] = 'H'
nlabel[c+2] = 'H'
if i == 0:
G.add_edge(c,c-1,weight=1)
nlabel[c-1] = 'H'
else:
G.add_edge(c,c-3,weight=1)
if i == nC-1:
G.add_edge(c,c+3,weight=1)
nlabel[c+3] = 'H'
return G, nlabel
def gen_asym(nC):
G, nlabel = nx.Graph(), {}
cc = np.random.randint(nC)
nlabel[nC*2+1] = 'C'
for i in range(nC*2+1):
nlabel[i] = 'C'
if i > 0:
G.add_edge(i,i-1,weight=1)
if i == cc:
G.add_edge(i,nC*2+1,weight=1)
return G, nlabel
def gen_sym(nC):
G, nlabel = nx.Graph(), {}
nlabel[nC*2+1] = 'C'
for i in range(nC*2+1):
nlabel[i] = 'C'
if i > 0:
G.add_edge(i,i-1,weight=1)
if i == nC:
G.add_edge(i,nC*2+1,weight=1)
return G, nlabel
def permute(G, nlabel):
A = nx.adjacency_matrix(G).todense()
N = A.shape[0]
nids = list(G.nodes())
order = np.random.permutation(nids)
op = {nid:i for i,nid in enumerate(nids)}
mp = {nid:i for i,nid in enumerate(order)}
mm = {nid:nids[i] for i,nid in enumerate(order)}
rA = np.zeros_like(A)
for i in range(N):
for j in range(N):
rA[i,j] = A[mp[nids[i]],mp[nids[j]]]
rnlabel = {mm[nid]:nlabel[mm[nid]] for nid in nids}
rG = nx.from_numpy_matrix(rA)
return rG, rnlabel
cls = name.split('-')
Gs, Ys, nlabels = [], [], []
if name == 'Asym-Sym':
for i in range(N):
G, nlabel = gen_asym(5+i)
Gs.append(G)
Ys.append(0)
nlabels.append(nlabel)
for p in range(nP-1):
pG, pL = permute(G, nlabel)
Gs.append(G)
Ys.append(0)
nlabels.append(pL)
G, nlabel = gen_sym(5+i)
Gs.append(G)
Ys.append(1)
nlabels.append(nlabel)
for p in range(nP-1):
pG, pL = permute(G, nlabel)
Gs.append(G)
Ys.append(1)
nlabels.append(pL)
elif name == 'Alk-Alc':
for n in range(1,N):
for icl,fn in enumerate([gen_alkane, gen_alcohol]):
G, nlabel = fn(2*n)
Gs.append(G)
Ys.append(icl)
nlabels.append(nlabel)
for p in range(nP-1):
pG, pL = permute(G, nlabel)
Gs.append(G)
Ys.append(icl)
nlabels.append(pL)
if not os.path.exists('{}/{}'.format(DATASET_DIR, name)):
os.makedirs('{}/{}'.format(DATASET_DIR, name))
cPickle.dump(Gs, open('{}/{}/N{}-P{}-Gs.pkl'.format(DATASET_DIR, name, N, nP), 'wb'))
cPickle.dump(nlabels, open('{}/{}/N{}-P{}-nlabels.pkl'.format(DATASET_DIR, name, N, nP), 'wb'))
cPickle.dump(Ys, open('{}/{}/N{}-P{}-labels.pkl'.format(DATASET_DIR, name, N, nP), 'wb')) | b5b33b50ea9245669eb98f7f0f3c002dcf79e87e | 3,632,308 |
from datetime import datetime
import os
def build_youtube_search_request():
"""
Building a query based on the following API - https://developers.google.com/youtube/v3/docs/search/list
"""
video_search_params = {
"part": "id,snippet",
"type": "video",
"maxResults": 50,
"publishedAfter": (datetime.now() - timedelta(days=int(os.environ["VIDEO_SEARCH_INGESTION_WINDOW"]))).strftime(
"%Y-%m-%dT%H:%M:%SZ"
), # format required 1970-01-01T00:00:00Z
}
if os.environ.get("QUERY", None):
q = os.environ["QUERY"].replace("|", "%7C") # any use of | has to be url encoded
video_search_params["q"] = q
if os.environ.get("CHANNEL_ID", None):
video_search_params["channelId"] = os.environ["CHANNEL_ID"]
video_search_params["channelType"] = os.environ.get("CHANNEL_TYPE", "any")
if os.environ.get("EVENT_TYPE", None):
video_search_params["eventType"] = os.environ["EVENT_TYPE"]
if os.environ.get("LOCATION", None):
video_search_params["location"] = os.environ["LOCATION"]
if os.environ.get("LOCATION_RADIUS", None):
video_search_params["locationRadius"] = os.environ["LOCATION_RADIUS"]
if os.environ.get("REGION_CODE", None):
video_search_params["regionCode"] = os.environ["REGION_CODE"]
if os.environ.get("RELEVANCE_LANGUAGE", None):
video_search_params["relevanceLanguage"] = os.environ["RELEVANCE_LANGUAGE"]
return video_search_params | 4e79bcc8154f6384a371b236154d8eba7050c43c | 3,632,309 |
from qtpy.QtWidgets import QDesktopWidget # noqa
def get_screen_size():
"""Get **available** screen size/resolution."""
if mpl.get_backend().startswith('Qt'):
# Inspired by spyder/widgets/shortcutssummary.py
widget = QDesktopWidget()
sg = widget.availableGeometry(widget.primaryScreen())
x0 = sg.x()
y0 = sg.y()
w0 = sg.width()
h0 = sg.height()
elif mpl.get_backend() == "TkAgg":
# https://stackoverflow.com/a/42951711/38281
window = plt.get_current_fig_manager().window
x0, y0 = 0, 0
w0, h0 = window.wm_maxsize()
# h = window.winfo_screenheight()
# w = window.winfo_screenwidth()
else:
# Mac Retina Early 2013
x0 = 0
y0 = 23
w0 = 1280
h0 = 773
return x0, y0, w0, h0 | 9575fbf1874d6dcf22e0ebb5a771015dfa570847 | 3,632,310 |
def potential_bond_keys(mgrph):
""" neighboring radical sites of a molecular graph
"""
ridxs = radical_sites(mgrph)
return tuple(frozenset([ridx1, ridx2])
for ridx1, ridx2 in combinations(ridxs, 2)
if ridx2 in atom_neighborhood_indices(mgrph, ridx1)) | ccbff81075cab38b3bc96b39885f1873c733bd93 | 3,632,311 |
def convolve(signal,kernel):
"""
This applies a kernel to a signal through convolution and returns the result.
Some magic is done at the edges so the result doesn't apprach zero:
1. extend the signal's edges with len(kernel)/2 duplicated values
2. perform the convolution ('same' mode)
3. slice-off the ends we added
4. return the same number of points as the original
"""
pad=np.ones(len(kernel)/2)
signal=np.concatenate((pad*signal[0],signal,pad*signal[-1]))
signal=np.convolve(signal,kernel,mode='same')
signal=signal[len(pad):-len(pad)]
return signal | 1eb31d9fdf2a6afa6ea08912f8b28f0ae4af64e6 | 3,632,312 |
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq | 44cbcdeadbfee9b7e802e201ad589f9ebdcb11b3 | 3,632,313 |
from typing import Tuple
from typing import Optional
import io
import textwrap
from re import I
def _define_property_shape(
prop: intermediate.Property,
cls: intermediate.ClassUnion,
url_prefix: Stripped,
class_to_rdfs_range: rdf_shacl_common.ClassToRdfsRange,
constraints_by_property: infer_for_schema.ConstraintsByProperty,
) -> Tuple[Optional[Stripped], Optional[Error]]:
"""Generate the shape of a property ``prop`` of the intermediate ``symbol``."""
stmts = [Stripped("a sh:PropertyShape ;")] # type: List[Stripped]
# Resolve the type annotation to the actual value, regardless if the property is
# mandatory or optional
type_anno = intermediate.beneath_optional(prop.type_annotation)
prop_name = rdf_shacl_naming.property_name(prop.name)
rdfs_range = rdf_shacl_common.rdfs_range_for_type_annotation(
type_annotation=type_anno, class_to_rdfs_range=class_to_rdfs_range
)
cls_name = rdf_shacl_naming.class_name(cls.name)
stmts.append(Stripped(f"sh:path <{url_prefix}/{cls_name}/{prop_name}> ;"))
if rdfs_range.startswith("rdf:") or rdfs_range.startswith("xsd:"):
stmts.append(Stripped(f"sh:datatype {rdfs_range} ;"))
elif rdfs_range.startswith("aas:"):
stmts.append(Stripped(f"sh:class {rdfs_range} ;"))
else:
raise NotImplementedError(f"Unhandled namespace of the {rdfs_range=}")
# region Define cardinality
# noinspection PyUnusedLocal
min_count = None # type: Optional[int]
max_count = None # type: Optional[int]
if isinstance(prop.type_annotation, intermediate.OptionalTypeAnnotation):
if isinstance(type_anno, intermediate.ListTypeAnnotation):
min_count = 0
max_count = None
elif isinstance(
type_anno,
(intermediate.OurTypeAnnotation, intermediate.PrimitiveTypeAnnotation),
):
min_count = 0
max_count = 1
else:
assert_never(type_anno)
elif isinstance(prop.type_annotation, intermediate.ListTypeAnnotation):
min_count = 0
max_count = None
elif isinstance(
prop.type_annotation,
(intermediate.OurTypeAnnotation, intermediate.PrimitiveTypeAnnotation),
):
min_count = 1
max_count = 1
else:
return None, Error(
prop.parsed.node,
f"(mristin, 2021-11-13): "
f"We did not implement how to determine the cardinality based on the type "
f"{prop.type_annotation}. If you see this message, it is time to implement "
f"this logic.",
)
min_length = None # type: Optional[int]
max_length = None # type: Optional[int]
len_constraint = constraints_by_property.len_constraints_by_property.get(prop, None)
if len_constraint is not None:
if isinstance(type_anno, intermediate.ListTypeAnnotation):
if len_constraint.min_value is not None:
if len_constraint.min_value > 0 and isinstance(
prop.type_annotation, intermediate.OptionalTypeAnnotation
):
return None, Error(
prop.parsed.node,
f"(mristin, 2022-02-09): "
f"The property {prop.name} is optional, but the minCount "
f"is given. If you see this message, it is time to consider "
f"how to implement this logic; please contact the developers.",
)
min_count = (
max(min_count, len_constraint.min_value)
if min_count is not None
else len_constraint.min_value
)
if len_constraint.max_value is not None:
max_count = (
min(max_count, len_constraint.max_value)
if max_count is not None
else len_constraint.max_value
)
elif (
isinstance(type_anno, intermediate.PrimitiveTypeAnnotation)
and type_anno.a_type is intermediate.PrimitiveType.STR
):
min_length = len_constraint.min_value
max_length = len_constraint.max_value
elif (
isinstance(type_anno, intermediate.OurTypeAnnotation)
and isinstance(type_anno.symbol, intermediate.ConstrainedPrimitive)
and (
type_anno.symbol.constrainee is intermediate.PrimitiveType.STR
or type_anno.symbol.constrainee is intermediate.PrimitiveType.BYTEARRAY
)
):
min_length = len_constraint.min_value
max_length = len_constraint.max_value
else:
return None, Error(
prop.parsed.node,
f"(mristin, 2022-02-09): "
f"We did not implement how to specify the length constraint on the type "
f"{type_anno}. If you see this message, it is time to implement "
f"this logic.",
)
if min_count is not None:
stmts.append(Stripped(f"sh:minCount {min_count} ;"))
if max_count is not None:
stmts.append(Stripped(f"sh:maxCount {max_count} ;"))
if min_length is not None:
stmts.append(Stripped(f"sh:minLength {min_length} ;"))
if max_length is not None:
stmts.append(Stripped(f"sh:maxLength {max_length} ;"))
# endregion
# region Define patterns
pattern_constraints = constraints_by_property.patterns_by_property.get(prop, [])
for pattern_constraint in pattern_constraints:
pattern_literal = rdf_shacl_common.string_literal(pattern_constraint.pattern)
stmts.append(Stripped(f"sh:pattern {pattern_literal} ;"))
# endregion
writer = io.StringIO()
writer.write("sh:property [")
for stmt in stmts:
writer.write("\n")
writer.write(textwrap.indent(stmt, I))
writer.write("\n] ;")
return Stripped(writer.getvalue()), None | 29483d27548a7fbd208f820e8e0821e0b121132f | 3,632,314 |
def row2string(row, sep=', '):
"""Converts a one-dimensional numpy.ndarray, list or tuple to string
Args:
row: one-dimensional list, tuple, numpy.ndarray or similar
sep: string separator between elements
Returns:
string representation of a row
"""
return sep.join("{0}".format(item) for item in row) | f81a2ec54b8c37285715cadca4458918962440b9 | 3,632,315 |
from sys import path
import time
import requests
import json
def download_profile_picture(user_id, discriminator, avatar_hash=None, cache_dir="cache", default_dir="default",
cert_file=None, game_name=None, game_version=None, game_url=None):
"""
Download a discord user's profile picture.
:param user_id: The discord user's ID
:param discriminator: The discord user's discriminator; required and used for when avatar_hash is None
:param avatar_hash: (optional) The discord user's avatar hash. NOTE: if None, defaults to a default avatar image
:param cache_dir: (optional) Path to store the profile picture
:param default_dir: (optional) The path within the cache_dir to use for default avatars
param cert_file: (optional) The path to the cacert file to use
:param game_name: (optional) The name of the game that is running
:param game_version: (optional) The game's version number
:param game_url: (optional) The game's website
:return: Path to profile picture, or None
"""
global _http_rate_limit
if avatar_hash is None:
url = "https://cdn.discordapp.com/embed/avatars/{}.png".format(int(discriminator) % 5)
# NOTE: we default to "./cache/default/" if no path specified
# NOTE 2: we use a "default" directory to save disk space and download calls in the long run
download_folder = path.join(cache_dir, default_dir)
else:
url = "https://cdn.discordapp.com/avatars/{}/{}.jpg?size=2048".format(user_id, avatar_hash)
# NOTE: we default to "./cache/user_id/" if no path specified
download_folder = path.join(cache_dir, user_id)
if not path.exists(download_folder):
makedirs(download_folder, 0o755)
if avatar_hash is not None:
avatar_file = path.join(download_folder, avatar_hash) + '.jpg'
else:
avatar_file = path.join(download_folder, str(int(discriminator) % 5)) + '.png'
if path.exists(avatar_file):
# technically, we downloaded it, so no need to worry about downloading
return avatar_file
# we check this after just in case we already have a cached image
if _http_rate_limit is not None:
if not _http_rate_limit > time.time():
return None
# we're required to have a ua string
ua_str = "discord-rpc.py ({url}, {version})".format(url=PROJECT_URL, version=VERSION)
if game_name is not None and isinstance(game_name, (bytes, unicode)) and game_name.strip() != '':
# if we have a game name, append that
ua_str += ' {}'.format(game_name)
if all((x is not None and isinstance(x, (bytes, unicode)) and x.strip() != '') for x in (game_version,
game_url)):
# if we have both a url and version number, append those too
ua_str += " ({url}, {version}".format(url=game_url, version=game_version)
headers = {'User-Agent': ua_str}
if is_python3():
if cert_file is not None:
r = Request(
url,
data=None,
headers=headers,
cafile=cert_file
)
else:
r = Request(
url,
data=None,
headers=headers
)
req = urlopen(r)
status_code = req.getcode()
else:
if cert_file is not None:
req = requests.get(url, headers=headers, verify=cert_file)
else:
req = requests.get(url, headers=headers)
status_code = req.status_code
if status_code != 200:
if status_code == 404:
# nonexistent avatar/hash; return None
return None
if 'X-RateLimit-Reset' in req.headers:
_http_rate_limit = int(req.headers['X-RateLimit-Reset'])
else:
try:
if is_python3():
data = req.read()
json_data = json.loads(data.decode(req.info().get_content_charset('utf-8')))
else:
json_data = req.json()
if 'retry_after' in json_data:
_http_rate_limit = time.time() + (int(json_data['retry_after']) / 1000.0)
except Exception:
pass
if _http_rate_limit is None:
# try again in 15 min (Discord shouldn't kill us for waiting 15 min anyways...)
_http_rate_limit = time.time() + (15 * 60)
return None
with open(avatar_file, 'wb') as f:
if is_python3():
f.write(req.read())
else:
f.write(req.content)
return avatar_file | c68cc069a4666d6f693271971d94829e2bb963ae | 3,632,316 |
def apply_kNNO(Xs, Xt, ys=None, yt=None, scaling=True, k=10, contamination=0.1):
""" Apply kNNO.
k-distance is the distance of its k-th nearest neighbour in the dataset
KNNO ranks all instances in a dataset by their k-distance, with higher distances signifying
more anomalous instances
Parameters
----------
Xs : np.array of shape (n_samples, n_features), optional (default=None)
The source instances.
Xt : np.array of shape (n_samples, n_features), optional (default=None)
The target instances.
ys : np.array of shape (n_samples,), optional (default=None)
The ground truth of the source instances.
yt : np.array of shape (n_samples,), optional (default=None)
The ground truth of the target instances.
k : int (default=10)
Number of nearest neighbors.
contamination : float (default=0.1)
The expected contamination in the data.
Returns
-------
yt_scores : np.array of shape (n_samples,)
Anomaly scores for the target instances.
"""
# input
if Xs is not None:
if ys is None:
ys = np.zeros(Xs.shape[0])
Xs, ys = check_X_y(Xs, ys)
if yt is None:
yt = np.zeros(Xt.shape[0])
Xt, yt = check_X_y(Xt, yt)
# scaling
if scaling:
scaler = StandardScaler()
Xt = scaler.fit_transform(Xt)
# no transfer!
# fit
tree = BallTree(Xt, leaf_size=16, metric='euclidean')
# Query the distance (D) of the k-nearest neighbours for all target domain samples Xt
# Note: D is a numpy array of shape (550, k+1) = (550, 11)
# k=k+1 because if k = 1, the sample will just query itself as its nearest neighbour
D, _ = tree.query(Xt, k=k+1)
# predict
# outlier_scores is (550,)
# It contains the distances of the k-th nearest neighbour for each of the target domain samples
outlier_scores = D[:, -1].flatten()
# contamination = 0.1, hence int(100*(1-contamination)) = 90
# Hence, gamma = the 90th percentile value of the distances of the k-th nearest neighbour for each of the target domain samples
gamma = np.percentile(
outlier_scores, int(100 * (1.0 - contamination))) + 1e-10
# yt_scores is a squashed function of outlier_scores with parameter gamma
# Hence, yt_scores is (550,) = predicted probabilities (of an anomaly) from the KNNO algorithm for each sample Xt in target domain
# Higher values = more anomalous (monotonic increasing)
yt_scores = _squashing_function(outlier_scores, gamma)
return yt_scores | 6a90757063ff20dba11075508bce7038295920a1 | 3,632,317 |
import logging
def _tenant_update(name, new_name=None, description=None, default_datastore=None):
""" API to update a tenant """
logging.debug("_tenant_update: name=%s, new_name=%s, descrption=%s, default_datastore=%s",
name, new_name, description, default_datastore)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
if default_datastore:
error_info = check_default_datastore(default_datastore)
if error_info:
return error_info
error_info = set_default_ds(tenant=tenant,
default_datastore=default_datastore,
check_existing=True)
if error_info:
return error_info
if new_name:
if name == auth_data_const.DEFAULT_TENANT:
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAMES)
return error_info
# check whether tenant with new_name already exist or not
error_info = check_tenant_exist(new_name)
if error_info:
return error_info
if not is_tenant_name_valid(name):
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAME_REGEXP)
return error_info
error_msg = tenant.set_name(auth_mgr.conn, name, new_name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
if description:
error_msg = tenant.set_description(auth_mgr.conn, description)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return None | 87b9586b87868c3693b09a4127edc148c6d36bb0 | 3,632,318 |
from typing import Iterable
import os
def _clone_all(urls: Iterable[str], cwd: str):
"""Attempts to clone all urls, sequentially. If a repo is already present,
it is skipped. If any one clone fails (except for fails because the repo
is local), all cloned repos are removed
Args:
urls: HTTPS urls to git repositories.
cwd: Working directory. Use temporary directory for automatic cleanup.
Returns:
local paths to the cloned repos.
"""
if len(set(urls)) != len(urls):
raise ValueError("master_repo_urls contains duplicates")
try:
for url in urls:
LOGGER.info("Cloning into {}".format(url))
git.clone_single(url, cwd=cwd)
except exception.CloneFailedError:
LOGGER.error("Error cloning into {}, aborting ...".format(url))
raise
paths = [os.path.join(cwd, util.repo_name(url)) for url in urls]
assert all(map(util.is_git_repo, paths)), "all repos must be git repos"
return paths | 5a7c10de2fc8e1c4f77e9a7505315b62a94eb28e | 3,632,319 |
def build_aggregation(facet_name, facet_options, min_doc_count=0):
"""Specify an elasticsearch aggregation from schema facet configuration.
"""
exclude = []
if facet_name == 'type':
field = 'embedded.@type'
exclude = ['Item']
elif facet_name.startswith('audit'):
field = facet_name
else:
field = 'embedded.' + facet_name
agg_name = facet_name.replace('.', '-')
facet_type = facet_options.get('type', 'terms')
facet_length = 200
if facet_options.get('length') == 'long':
facet_length = 3000
if facet_type in ['terms', 'typeahead']:
agg = {
'terms': {
'field': field,
'min_doc_count': min_doc_count,
'size': facet_length,
},
}
if exclude:
agg['terms']['exclude'] = exclude
elif facet_type == 'exists':
agg = {
'filters': {
'filters': {
'yes': {
'bool': {
'must': {
'exists': {'field': field}
}
}
},
'no': {
'bool': {
'must_not': {
'exists': {'field': field}
}
}
},
},
},
}
else:
raise ValueError('Unrecognized facet type {} for {} facet'.format(
facet_type, field))
return agg_name, agg | b8c3f337143a229401b9a41a8fde8903027cf67e | 3,632,320 |
def recover_marker_rbt(itf, tgt_mkr_name, cl_mkr_names, log=False):
"""
Recover the trajectory of a marker by rbt(rigid body transformation) using a group (cluster) markers.
The number of cluster marker names is fixed as 3.
This function extrapolates the target marker coordinates for the frames where the cluster markers are available.
The order of the cluster markers will be sorted according to their relative distances from the target marker.
Parameters
----------
itf : win32com.client.CDispatch
COM object of the C3Dserver.
tgt_mkr_name : str
Target marker name.
cl_mkr_names : list or tuple
Cluster (group) marker names.
log : bool, optional
Whether to write logs or not. The default is False.
Returns
-------
bool
True or False.
int
Number of valid frames in the target marker after this function.
Notes
-----
This function is adapted from 'recover_marker_rbt()' function in the GapFill module, see [1] in the References.
References
----------
.. [1] https://github.com/mkjung99/gapfill
"""
try:
if log: logger.debug(f'Start recovery of "{tgt_mkr_name}" ...')
n_total_frs = get_num_frames(itf, log=log)
tgt_mkr_data = get_marker_data(itf, tgt_mkr_name, blocked_nan=False, log=log)
if tgt_mkr_data is None:
err_msg = f'Unable to get the information of "{tgt_mkr_name}"'
raise ValueError(err_msg)
tgt_mkr_coords = tgt_mkr_data[:,0:3]
tgt_mkr_resid = tgt_mkr_data[:,3]
tgt_mkr_valid_mask = np.where(np.isclose(tgt_mkr_resid, -1), False, True)
n_tgt_mkr_valid_frs = np.count_nonzero(tgt_mkr_valid_mask)
if n_tgt_mkr_valid_frs == 0:
if log: logger.info(f'Recovery of "{tgt_mkr_name}" skipped: no valid target marker frame')
return False, n_tgt_mkr_valid_frs
if n_tgt_mkr_valid_frs == n_total_frs:
if log: logger.info('Recovery of "{tgt_mkr_name}" skipped: all target marker frames valid')
return False, n_tgt_mkr_valid_frs
dict_cl_mkr_coords = {}
dict_cl_mkr_valid = {}
cl_mkr_valid_mask = np.ones((n_total_frs), dtype=bool)
for mkr in cl_mkr_names:
mkr_data = get_marker_data(itf, mkr, blocked_nan=False, log=log)
if mkr_data is None:
err_msg = f'Unable to get the information of "{mkr}"'
raise ValueError(err_msg)
dict_cl_mkr_coords[mkr] = mkr_data[:,0:3]
dict_cl_mkr_valid[mkr] = np.where(np.isclose(mkr_data[:,3], -1), False, True)
cl_mkr_valid_mask = np.logical_and(cl_mkr_valid_mask, dict_cl_mkr_valid[mkr])
all_mkr_valid_mask = np.logical_and(cl_mkr_valid_mask, tgt_mkr_valid_mask)
if not np.any(all_mkr_valid_mask):
if log: logger.info('Recovery of "{tgt_mkr_name}" skipped: no common valid frame among markers')
return False, n_tgt_mkr_valid_frs
cl_mkr_only_valid_mask = np.logical_and(cl_mkr_valid_mask, np.logical_not(tgt_mkr_valid_mask))
if not np.any(cl_mkr_only_valid_mask):
if log: logger.info('Recovery of "{tgt_mkr_name}" skipped: cluster markers not helpful')
return False, n_tgt_mkr_valid_frs
all_mkr_valid_frs = np.where(all_mkr_valid_mask)[0]
cl_mkr_only_valid_frs = np.where(cl_mkr_only_valid_mask)[0]
dict_cl_mkr_dist = {}
for mkr_name in cl_mkr_names:
vec_diff = dict_cl_mkr_coords[mkr_name]-tgt_mkr_coords
dict_cl_mkr_dist.update({mkr_name: np.nanmean(np.linalg.norm(vec_diff, axis=1))})
cl_mkr_dist_sorted = sorted(dict_cl_mkr_dist.items(), key=lambda kv: kv[1])
p0 = dict_cl_mkr_coords[cl_mkr_dist_sorted[0][0]]
p1 = dict_cl_mkr_coords[cl_mkr_dist_sorted[1][0]]
p2 = dict_cl_mkr_coords[cl_mkr_dist_sorted[2][0]]
p3 = tgt_mkr_coords
vec0 = p1-p0
vec1 = p2-p0
vec0_norm = np.linalg.norm(vec0, axis=1, keepdims=True)
vec1_norm = np.linalg.norm(vec1, axis=1, keepdims=True)
vec0_unit = np.divide(vec0, vec0_norm, where=(vec0_norm!=0))
vec1_unit = np.divide(vec1, vec1_norm, where=(vec1_norm!=0))
vec2 = np.cross(vec0_unit, vec1_unit)
vec2_norm = np.linalg.norm(vec2, axis=1, keepdims=True)
vec2_unit = np.divide(vec2, vec2_norm, where=(vec2_norm!=0))
vec3 = p3-p0
vec_z = vec2_unit
vec_x = vec0_unit
vec_y = np.cross(vec_z, vec_x)
mat_rot = np.asarray([vec_x.T, vec_y.T, vec_z.T]).T
for idx, fr in np.ndenumerate(cl_mkr_only_valid_frs):
search_idx = np.searchsorted(all_mkr_valid_frs, fr)
if search_idx == 0:
fr0 = all_mkr_valid_frs[0]
rot_fr0_to_fr = np.dot(mat_rot[fr], mat_rot[fr0].T)
vt_fr0 = np.dot(rot_fr0_to_fr, vec3[fr0])
vc = vt_fr0
elif search_idx >= all_mkr_valid_frs.shape[0]:
fr1 = all_mkr_valid_frs[all_mkr_valid_frs.shape[0]-1]
rot_fr1_to_fr = np.dot(mat_rot[fr], mat_rot[fr1].T)
vt_fr1 = np.dot(rot_fr1_to_fr, vec3[fr1])
vc = vt_fr1
else:
fr0 = all_mkr_valid_frs[search_idx-1]
fr1 = all_mkr_valid_frs[search_idx]
rot_fr0_to_fr = np.dot(mat_rot[fr], mat_rot[fr0].T)
rot_fr1_to_fr = np.dot(mat_rot[fr], mat_rot[fr1].T)
vt_fr0 = np.dot(rot_fr0_to_fr, vec3[fr0])
vt_fr1 = np.dot(rot_fr1_to_fr, vec3[fr1])
a = np.float32(fr-fr0)
b = np.float32(fr1-fr)
vc = (b*vt_fr0+a*vt_fr1)/(a+b)
tgt_mkr_coords[fr] = p0[fr]+vc
tgt_mkr_resid[fr] = 0.0
set_marker_pos(itf, tgt_mkr_name, tgt_mkr_coords, None, None, log=log)
set_marker_resid(itf, tgt_mkr_name, tgt_mkr_resid, None, None, log=log)
n_tgt_mkr_valid_frs_updated = np.count_nonzero(np.where(np.isclose(tgt_mkr_resid, -1), False, True))
if log: logger.info(f'Recovery of "{tgt_mkr_name}" finished')
return True, n_tgt_mkr_valid_frs_updated
except pythoncom.com_error as err:
if log: logger.error(err.excepinfo[2])
raise
except ValueError as err:
if log: logger.error(err)
raise | 598cd920494dac9e2847e9fdf7a0b1d66715957b | 3,632,321 |
def route_wrap_01_version(request_mapping: str):
"""
flask 路由映射包裹器
增加API_VERSION
:param request_mapping:
:return:
"""
return '{}/{}'.format(API_VERSION, request_mapping) | 0a3db2ed132c1f2233817a8154c5c1c87872d6dc | 3,632,322 |
def spawn(pool):
"""spawn a greenlet
it will be automatically killed after the test run
"""
return pool.spawn | fadea4b814e77f7fb26af27f0cc7bce1189a7dcf | 3,632,323 |
from typing import Any
import json
def from_json_util(json_str: str) -> Any:
"""Load an arbitrary datatype from its JSON representation.
The Out-of-proc SDK has a special JSON encoding strategy
to enable arbitrary datatypes to be serialized. This utility
loads a JSON with the assumption that it follows that encoding
method.
Parameters
----------
json_str: str
A JSON-formatted string, from durable-extension
Returns
-------
Any:
The original datatype that was serialized
"""
return json.loads(json_str, object_hook=_deserialize_custom_object) | 5da695fb260b1df35dc91dfb8ef9026b04472d6a | 3,632,324 |
def min_row_dist_sum_idx(dists):
"""Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
"""
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin() | 7bddc4e58344e519a2bd928187db3a6b65e17118 | 3,632,325 |
def inline(text):
"""
Convert all newline characters to HTML entities:
This can be used to prevent Hypertag from indenting lines of `text` when rendering parent nodes,
and to safely insert `text` inside <pre>, <textarea>, or similar elements.
"""
return text.replace('\n', ' ') | 658f7e5adbf5747ea069fad8a9599e9bd499a381 | 3,632,326 |
def align_pos(xyz, test_crd, ref_crd, ind=None):
"""Translates a set of atoms such that two positions are coincident.
Parameters
----------
xyz : (N, 3) array_like
The atomic cartesian coordinates.
test_crd : (3,) array_like
Cartesian coordinates of the original position.
test_crd : (3,) array_like
Cartesian coordinates of the final position.
ind : array_like, optional
List of atomic indices to specify which atoms are displaced. If
`ind == None` (default) then all atoms are displaced.
Returns
-------
(N, 3) ndarray
The atomic cartesian coordinates of the displaced molecule.
"""
transax = ref_crd - test_crd
dist = np.linalg.norm(transax)
return translate(xyz, dist, transax, ind=ind) | ffa6001e20a4e4a1379e6a95139946aa91a5bdd0 | 3,632,327 |
from typing import Optional
from typing import Sequence
def get_orderable_db_instance(availability_zone_group: Optional[str] = None,
engine: Optional[str] = None,
engine_version: Optional[str] = None,
instance_class: Optional[str] = None,
license_model: Optional[str] = None,
preferred_engine_versions: Optional[Sequence[str]] = None,
preferred_instance_classes: Optional[Sequence[str]] = None,
storage_type: Optional[str] = None,
supports_enhanced_monitoring: Optional[bool] = None,
supports_global_databases: Optional[bool] = None,
supports_iam_database_authentication: Optional[bool] = None,
supports_iops: Optional[bool] = None,
supports_kerberos_authentication: Optional[bool] = None,
supports_performance_insights: Optional[bool] = None,
supports_storage_autoscaling: Optional[bool] = None,
supports_storage_encryption: Optional[bool] = None,
vpc: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrderableDbInstanceResult:
"""
Information about RDS orderable DB instances and valid parameter combinations.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.rds.get_orderable_db_instance(engine="mysql",
engine_version="5.7.22",
license_model="general-public-license",
preferred_instance_classes=[
"db.r6.xlarge",
"db.m4.large",
"db.t3.small",
],
storage_type="standard")
```
Valid parameter combinations can also be found with `preferred_engine_versions` and/or `preferred_instance_classes`.
```python
import pulumi
import pulumi_aws as aws
test = aws.rds.get_orderable_db_instance(engine="mysql",
license_model="general-public-license",
preferred_engine_versions=[
"5.6.35",
"5.6.41",
"5.6.44",
],
preferred_instance_classes=[
"db.t2.small",
"db.t3.medium",
"db.t3.large",
])
```
:param str availability_zone_group: Availability zone group.
:param str engine: DB engine. Engine values include `aurora`, `aurora-mysql`, `aurora-postgresql`, `docdb`, `mariadb`, `mysql`, `neptune`, `oracle-ee`, `oracle-se`, `oracle-se1`, `oracle-se2`, `postgres`, `sqlserver-ee`, `sqlserver-ex`, `sqlserver-se`, and `sqlserver-web`.
:param str engine_version: Version of the DB engine. If none is provided, the AWS-defined default version will be used.
:param str instance_class: DB instance class. Examples of classes are `db.m3.2xlarge`, `db.t2.small`, and `db.m3.medium`.
:param str license_model: License model. Examples of license models are `general-public-license`, `bring-your-own-license`, and `amazon-license`.
:param Sequence[str] preferred_engine_versions: Ordered list of preferred RDS DB instance engine versions. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned.
:param Sequence[str] preferred_instance_classes: Ordered list of preferred RDS DB instance classes. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned.
:param str storage_type: Storage types. Examples of storage types are `standard`, `io1`, `gp2`, and `aurora`.
:param bool supports_enhanced_monitoring: Enable this to ensure a DB instance supports Enhanced Monitoring at intervals from 1 to 60 seconds.
:param bool supports_global_databases: Enable this to ensure a DB instance supports Aurora global databases with a specific combination of other DB engine attributes.
:param bool supports_iam_database_authentication: Enable this to ensure a DB instance supports IAM database authentication.
:param bool supports_iops: Enable this to ensure a DB instance supports provisioned IOPS.
:param bool supports_kerberos_authentication: Enable this to ensure a DB instance supports Kerberos Authentication.
:param bool supports_performance_insights: Enable this to ensure a DB instance supports Performance Insights.
:param bool supports_storage_autoscaling: Enable this to ensure Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.
:param bool supports_storage_encryption: Enable this to ensure a DB instance supports encrypted storage.
:param bool vpc: Boolean that indicates whether to show only VPC or non-VPC offerings.
"""
__args__ = dict()
__args__['availabilityZoneGroup'] = availability_zone_group
__args__['engine'] = engine
__args__['engineVersion'] = engine_version
__args__['instanceClass'] = instance_class
__args__['licenseModel'] = license_model
__args__['preferredEngineVersions'] = preferred_engine_versions
__args__['preferredInstanceClasses'] = preferred_instance_classes
__args__['storageType'] = storage_type
__args__['supportsEnhancedMonitoring'] = supports_enhanced_monitoring
__args__['supportsGlobalDatabases'] = supports_global_databases
__args__['supportsIamDatabaseAuthentication'] = supports_iam_database_authentication
__args__['supportsIops'] = supports_iops
__args__['supportsKerberosAuthentication'] = supports_kerberos_authentication
__args__['supportsPerformanceInsights'] = supports_performance_insights
__args__['supportsStorageAutoscaling'] = supports_storage_autoscaling
__args__['supportsStorageEncryption'] = supports_storage_encryption
__args__['vpc'] = vpc
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getOrderableDbInstance:getOrderableDbInstance', __args__, opts=opts, typ=GetOrderableDbInstanceResult).value
return AwaitableGetOrderableDbInstanceResult(
availability_zone_group=__ret__.availability_zone_group,
availability_zones=__ret__.availability_zones,
engine=__ret__.engine,
engine_version=__ret__.engine_version,
id=__ret__.id,
instance_class=__ret__.instance_class,
license_model=__ret__.license_model,
max_iops_per_db_instance=__ret__.max_iops_per_db_instance,
max_iops_per_gib=__ret__.max_iops_per_gib,
max_storage_size=__ret__.max_storage_size,
min_iops_per_db_instance=__ret__.min_iops_per_db_instance,
min_iops_per_gib=__ret__.min_iops_per_gib,
min_storage_size=__ret__.min_storage_size,
multi_az_capable=__ret__.multi_az_capable,
outpost_capable=__ret__.outpost_capable,
preferred_engine_versions=__ret__.preferred_engine_versions,
preferred_instance_classes=__ret__.preferred_instance_classes,
read_replica_capable=__ret__.read_replica_capable,
storage_type=__ret__.storage_type,
supported_engine_modes=__ret__.supported_engine_modes,
supports_enhanced_monitoring=__ret__.supports_enhanced_monitoring,
supports_global_databases=__ret__.supports_global_databases,
supports_iam_database_authentication=__ret__.supports_iam_database_authentication,
supports_iops=__ret__.supports_iops,
supports_kerberos_authentication=__ret__.supports_kerberos_authentication,
supports_performance_insights=__ret__.supports_performance_insights,
supports_storage_autoscaling=__ret__.supports_storage_autoscaling,
supports_storage_encryption=__ret__.supports_storage_encryption,
vpc=__ret__.vpc) | 6557de19d6c14d903d9f3058f7428ffe700633d0 | 3,632,328 |
def publish_channel_url():
"""open_channel_url: returns url to publish channel
Args: None
Returns: string url to publish channel
"""
return PUBLISH_CHANNEL_URL.format(domain=DOMAIN) | 7ba59e32746b9ffa9c9a46fd073cfbb1c3905e9f | 3,632,329 |
def hist_counts(df_acts=None, lst_acts=None, df_ac=None, y_scale="linear", idle=False,
figsize=None, color=None, file_path=None):
"""
Plot a bar chart displaying how often activities are occurring.
Parameters
----------
df_acts : pd.DataFrame, optional
recorded activities from a dataset. Fore more information refer to the
:ref:`user guide<activity_dataframe>`.
lst_acts : lst of str, optional
A list of activities that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
idle : bool, default: False
Determines whether gaps between activities should be assigned
the activity *idle* or be ignored.
y_scale : {"log", "linear"}, default: linear
The axis scale type to apply.
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
color : str, optional
sets the color of the plot. When not set, the primary theming color is used.
Learn more about theming in the :ref:`user guide <theming>`
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plot import plot_activity_bar_count
>>> plot_activity_bar_count(data.df_activities, idle=True)
.. image:: ../_static/images/plots/act_bar_cnt.png
:height: 300px
:width: 500 px
:scale: 90 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing
"""
assert not (df_acts is None and df_ac is None)
assert y_scale in ['linear', 'log']
title ='Activity occurrences'
col_label = 'occurrence'
x_label = 'counts'
color = (get_primary_color() if color is None else color)
# create statistics if the don't exists
if df_ac is None:
df_acts = df_acts.copy()
if idle:
df_acts = add_idle(df_acts)
df = activities_count(df_acts, lst_acts=lst_acts)
else:
df = df_ac
# prepare dataframe for plotting
df.reset_index(level=0, inplace=True)
df = df.sort_values(by=[col_label], axis=0)
# define plot modalities
num_act = len(df)
figsize = (_num_bars_2_figsize(num_act) if figsize is None else figsize)
# create plot
fig, ax = plt.subplots(figsize=figsize)
plt.title(title)
plt.xlabel(x_label)
ax.barh(df['activity'], df[col_label], color=color)
if y_scale == 'log':
ax.set_xscale('log')
# save or return fig
if file_path is not None:
savefig(fig, file_path)
return None
else:
return fig | 4e9820d25bd2f33de8269f413fbce0b1898fd7fa | 3,632,330 |
def random_dense(shape, fortran):
"""Generate a random qutip Dense matrix of the given shape."""
return qutip.core.data.Dense(random_numpy_dense(shape, fortran)) | 869c8cd2972b40f82d73403ed5c54d170fe3dcb5 | 3,632,331 |
import torch
def bo_step(X, y, objective, bounds, GP=None, acquisition=None, q=1, state_dict=None, *GP_args,
**GP_kwargs):
"""
One iteration of Bayesian optimization:
1. Fit GP model using (X, y)
2. Create acquisition function
3. Optimize acquisition function to obtain candidate point
4. Evaluate objective at candidate point
5. Add new point to the data set
Parameters
----------
X : torch.tensor, shape=(n_samples, dim)
Input values
y : torch.tensor, shape=(n_samples,)
Objective values
objective : callable, argument=torch.tensor
Objective black-box function, accepting as an argument torch.tensor
bounds : torch.tensor, shape=(2, dim)
Box-constraints
GP : callable
GP model class constructor. It is a function that takes as input
2 tensors - X, y - and returns an instance of botorch.models.Model.
acquisition : callable
Acquisition function construction. It is a function that receives
one argument - GP model - and returns an instance of
botorch.acquisition.AcquisitionFunction
q : int
Number of candidate points to find
state_dict : dict
GP model state dict
plot : bool
Flag indicating whether to plot the result
Returns
-------
X : torch.tensor
Tensor of input values with new point
y : torch.tensor
Tensor of output values with new point
gp : botorch.models.Model
Constructed GP model
Example
-------
from botorch.models import FixedNoiseGP
noise_var = 1e-2 * torch.ones_like(y)
GP = lambda X, y: FixedNoiseGP(X, y, noise_var)
acq_func = labmda gp: ExpectedImprovement(gp, y.min(), maximize=False)
X, y = bo_step(X, y, objective, GP=GP, Acquisition=acq_func)
"""
yvar = GP_kwargs.get('train_Yvar')
# Create GP model
mll, gp = initialize_model(X, y, GP=GP, state_dict=state_dict, *GP_args, **GP_kwargs)
fit_gpytorch_model(mll)
# Create acquisition function
acquisition = acquisition(gp)
# Optimize acquisition function
candidate, _ = optimize_acqf(
acquisition, bounds=bounds, q=q, num_restarts=1, raw_samples=1000,
)
# Update data set
X = torch.cat([X, candidate])
y = torch.cat([y, objective(candidate).mean(dim=1).reshape((-1, 1))])
if yvar is not None:
yvar = torch.cat([GP_kwargs['train_Yvar'], objective(candidate).var(dim=1).reshape((-1, 1))])
if yvar is not None:
return X, y, gp, yvar
else:
return X, y, gp | e752091e5aaf3b42e3e8f21a21f0368485f60c25 | 3,632,332 |
def validate_inputs(input_data):
"""Check prediction inputs against schema."""
# set many=True to allow passing in a list
schema = InsuranceDataRequestSchema(strict=True, many=True)
errors = None
try:
schema.load(input_data)
except ValidationError as exc:
errors = exc.messages
if errors:
validated_input = _filter_error_rows(
errors=errors,
validated_input=input_data)
else:
validated_input = input_data
return validated_input, errors | e0faacabb729d191308bdf1d5710525b529d4793 | 3,632,333 |
def published_stats_list(request):
"""
List cumulative stats about projects published.
The request may specify the desired resource type
"""
resource_type = None
# Get the desired resource type if specified
if 'resource_type' in request.GET and request.GET['resource_type'] in ['0', '1']:
resource_type = int(request.GET['resource_type'])
if resource_type is None:
projects = PublishedProject.objects.all().order_by('publish_datetime')
else:
projects = PublishedProject.objects.filter(
resource_type=resource_type).order_by('publish_datetime')
data = []
for year in range(projects[0].publish_datetime.year, timezone.now().year+1):
y_projects = projects.filter(publish_datetime__year=year)
data.append({"year":year, "num_projects":y_projects.count(),
"storage_size":sum(p.main_storage_size for p in y_projects)})
return JsonResponse(data, safe=False) | 308ef79fb6f51d192adb31849dd6bb75a46fe469 | 3,632,334 |
def get_entity(name):
"""Get an entity by the given name.
Args:
name (str): namespace.entity_name
Returns:
OntologyEntity: The entity with the given name.
"""
ns, n = name.split(".")
return _namespace_registry._get(ns)._get(n) | bd00058f8d3af4d29b35bcf0cc336f4c567baf8a | 3,632,335 |
def tst():
"""members page."""
return render_template('users/testing2.html') | 042463af7cc3742e78f744bc66ad09c3060ca877 | 3,632,336 |
def get_bq_col_type(col_type):
"""
Return correct SQL column type representation.
:param col_type: The type of column as defined in json schema files.
:return: A SQL column type compatible with BigQuery
"""
lower_col_type = col_type.lower()
if lower_col_type == 'integer':
return 'INT64'
if lower_col_type == 'string':
return 'STRING'
if lower_col_type == 'float':
return 'FLOAT64'
if lower_col_type == 'numeric':
return 'DECIMAL'
if lower_col_type == 'time':
return 'TIME'
if lower_col_type == 'timestamp':
return 'TIMESTAMP'
if lower_col_type == 'date':
return 'DATE'
if lower_col_type == 'datetime':
return 'DATETIME'
if lower_col_type == 'bool':
return 'BOOL'
return 'UNSET' | 86cac08a04d804cc6addbeee86014f1aa6d35735 | 3,632,337 |
def _do_boundary_search(search_term):
"""
Execute full text search against all searchable boundary layers.
"""
result = []
query = _get_boundary_search_query(search_term)
with connection.cursor() as cursor:
wildcard_term = '%{}%'.format(search_term)
cursor.execute(query, {'term': wildcard_term})
wkb_r = WKBReader()
for row in cursor.fetchall():
id = row[0]
code = row[1]
name = row[2]
rank = row[3]
point = wkb_r.read(row[4])
layer = _get_boundary_layer_by_code(code)
result.append({
'id': id,
'code': code,
'text': name,
'label': layer['short_display'],
'rank': rank,
'y': point.y,
'x': point.x,
})
return result | b2cc74b7ba436c0c57bbb7505d964d79354a36f5 | 3,632,338 |
def prices(identifier, start_date=None, end_date=None, frequency='daily',
sort_order='desc'):
"""
Get historical stock market prices or indices.
Args:
identifier: Stock market symbol or index
start_date: Start date of prices (default no filter)
end_date: Last date (default today)
frequency: Frequency of prices: daily (default) | weekly | monthly |
quarterly | yearly
sort_order: Order of prices: asc | desc (default)
Returns:
Dataset as a Pandas DataFrame
"""
df = get('prices',
identifier=identifier.upper(),
start_date=start_date,
end_date=end_date,
frequency=frequency.lower(),
sort_order=sort_order.lower())
df.index = pd.DatetimeIndex(df.date)
df.drop('date', axis=1, inplace=True)
return df | 145d8ea607a1e374e205ef337345f7b9d6064479 | 3,632,339 |
import requests
import json
def verify_captcha(secret, response, remoteip=None):
"""
From https://developers.google.com/recaptcha/docs/verify:
secret: The shared key between your site and ReCAPTCHA.
response: The user response token provided by the reCAPTCHA to the user
and provided to your site on.
remoteip (optional): The user's IP address.
"""
post_data = {
'secret': secret,
'response': response,
}
if remoteip:
post_data['remote'] = remoteip
try:
r = requests.post(
GOOGLE_CAPTCHA_VERIFY_URL,
data=post_data
)
if r.status_code != 200:
logger.error('Google reCAPTCHA -> %d', r.status_code)
return False
# r.content (bytes) vs. r.text (str):
# JSON parsing requires str,
# logging of bytes is better because it will all be on one line
# (e.g., as b'{\n "success": true\n}')
logger.debug('Google reCAPTCHA -> %s', r.content)
result = json.loads(r.text)
if not result['success']:
logger.error('Google reCAPTCHA -> %s', r.content)
return False
except Exception:
logger.exception('Error fetching or parsing Google reCAPTCHA verification')
return False
return True | fbfaea5a388f5047322cdb05d0b37d7f445a2b17 | 3,632,340 |
def x1y1x2y2_to_xywh(x1y1x2y2):
"""Convert [x1 y1 x2 y2] box format to [x y w h] format."""
if isinstance(x1y1x2y2, (list, tuple)):
# Single box given as a list of coordinates
assert len(x1y1x2y2) == 4
ct_x, ct_y = (x1y1x2y2[3] + x1y1x2y2[1]) / 2, (x1y1x2y2[2] + x1y1x2y2[0]) / 2
w, h = x1y1x2y2[3] - x1y1x2y2[1] + 1, x1y1x2y2[2] - x1y1x2y2[0] + 1
return ct_x, ct_y, w, h
elif isinstance(x1y1x2y2, np.ndarray):
# Multiple boxes given as a 2D ndarray
return np.hstack(
((x1y1x2y2[:, 0:2] + x1y1x2y2[:, 2:4]) / 2, x1y1x2y2[:, 2:4] - x1y1x2y2[:, 0:2] + 1)
)
else:
raise TypeError('Argument x1y1x2y2 must be a list, tuple, or numpy array.') | b5535ace312ca2f790f4dcb66ed9e53223b90649 | 3,632,341 |
def col(loc, strg):
"""
Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) | 0dfc4387e391c4823939350ad19c60d106211a58 | 3,632,342 |
def create_model_fn(model_class, hparams, use_tpu=False):
"""Wraps model_class as an Estimator or TPUEstimator model_fn.
Args:
model_class: AstroModel or a subclass.
hparams: ConfigDict of configuration parameters for building the model.
use_tpu: If True, a TPUEstimator model_fn is returned. Otherwise an
Estimator model_fn is returned.
Returns:
model_fn: A callable that constructs the model and returns a
TPUEstimatorSpec if use_tpu is True, otherwise an EstimatorSpec.
"""
return _ModelFn(model_class, hparams, use_tpu) | 83be600771b8e64c610db0d3d16fe10ba668fbd0 | 3,632,343 |
from typing import Any
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ("1", "true", "yes", "on", "enable"):
return True
if value in ("0", "false", "no", "off", "disable"):
return False
raise vol.Invalid("invalid boolean value {}".format(value))
return bool(value) | 781dbb2e30448065d8dc65b42ab02914a3338b43 | 3,632,344 |
def unmap(data, count, inds, fill=0):
"""
Unmap a subset of item (data) back to the original set of items (of size count)
:param data: input data
:param count: the total count of data
:param inds: the selected indices of input data
:param fill: filled value
:return: unmaped data
"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[inds, :] = data
return ret | 3cc361650644275673e6340f8d6a216071b0c3c9 | 3,632,345 |
def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
"""This function process a bundle, apply effect to/from a repo
It iterates over each part then searches for and uses the proper handling
code to process the part. Parts are processed in order.
Unknown Mandatory part will abort the process.
It is temporarily possible to provide a prebuilt bundleoperation to the
function. This is used to ensure output is properly propagated in case of
an error during the unbundling. This output capturing part will likely be
reworked and this ability will probably go away in the process.
"""
if op is None:
if transactiongetter is None:
transactiongetter = _notransaction
op = bundleoperation(repo, transactiongetter, source=source)
# todo:
# - replace this is a init function soon.
# - exception catching
unbundler.params
if repo.ui.debugflag:
msg = [b'bundle2-input-bundle:']
if unbundler.params:
msg.append(b' %i params' % len(unbundler.params))
if op._gettransaction is None or op._gettransaction is _notransaction:
msg.append(b' no-transaction')
else:
msg.append(b' with-transaction')
msg.append(b'\n')
repo.ui.debug(b''.join(msg))
processparts(repo, op, unbundler)
return op | 4651bc64da5d48d992379301294c480f9d42cb0f | 3,632,346 |
def recursive_feature(G, f, n):
"""
G: iGraph graph with annotations
func: string containing function name
n: int, recursion level
Computes the given function recursively on each vertex
Current precondition: already have run the computation for G, func, n-1.
"""
return np.matrix(recursive_feature_array(G,f,n)) | 4516e85cddea50dc9a1f14448a7b950a0620e3f0 | 3,632,347 |
import unicodedata
def remove_accents(string):
"""
Removes unicode accents from a string, downgrading to the base character
"""
nfkd = unicodedata.normalize('NFKD', string)
return u"".join([c for c in nfkd if not unicodedata.combining(c)]) | 41c8e05aa8982c85cf5cf2135276cdb5e26fefec | 3,632,348 |
from typing import Optional
def __get_a0( # pylint: disable=invalid-name
n: int, a0: Optional[np.ndarray] = None
) -> np.ndarray:
"""
Returns initial parameters for fitting algorithm.
:param n: Number of parameters
:param a0: Initial parameters value. Optional
:return: nd.array
"""
if a0 is not None:
return a0
return np.full(shape=n, fill_value=1.0) | 80b5d974dd6187746215191cc1f4de1ae135b93e | 3,632,349 |
def convert_yaw_to_old_viewpoint(yaw):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'original_theta %15r %.2f -> yaw %15r %.2f -> reconstructed_theta %15r %.2f'
>>> for lbl, theta, radians in old_viewpoint_labels:
>>> yaw = convert_old_viewpoint_to_yaw(theta)
>>> reconstructed_theta = convert_yaw_to_old_viewpoint(yaw)
>>> print(fmtstr % (lbl, theta, lbl, yaw, lbl, reconstructed_theta))
"""
if yaw is None:
return None
view_theta = ((TAU / 2) - yaw) % TAU
view_theta = ut.rad_to_deg(view_theta)
return view_theta | 7659e15bf7f4693ac7c6223b8f4600450bc7f2fe | 3,632,350 |
import os
def uploadimages(path_list,client,csv_list):
"""Uploads images to imgur.
Arguments:
path_list {list} -- [List of files at directory.]
client {ImgurClient} -- [ImgurClient we get from authentication.]
csv_list {list} -- [List of uploaded pictures.]
"""
#we create a dataframe to keep our imgur links
url_list = []
# for every image in path list
iterator = 1
try:
for image in path_list:
title = str(os.path.basename(image))[:-4]
if csv_list[csv_list["title"]== title].empty == True:
config = {
'title': title
}
img = client.upload_from_path(image, config = config)
#upload as title and upload returns us link of url
#print("title: " + title + " link: " + img['link'] + "\t\t\t" + str(iterator) + "/50")
print("title:{title}, link:{link} \t\t\t {iterator}/50".format(title=title, link=img['link'], iterator=str(iterator)))
#we return list to main so we can save it
url_list.append([title, img['link']])
iterator = iterator + 1
else:
continue
except imgurpython.helpers.error.ImgurClientRateLimitError:
print("err1")
return url_list
except Exception as e:
print(e)
return url_list
return url_list | cdc798a60e38fea6e07325b7e50e2c11d80ee6aa | 3,632,351 |
def extract_lexical_features_test(nlp, tweet_list):
"""Provides tokenization, POS and dependency parsing
Args:
nlp (spaCy model): Language processing pipeline
"""
result = []
texts = (tweet for tweet in tweet_list)
for doc in nlp.pipe(texts, batch_size=10000, n_threads=3):
settings.logger.info(doc)
for parsed_doc in doc:
result.append((parsed_doc.orth_, parsed_doc.tag_))
return result | b0f87996558c4b2e1d77b59068e8ad259524a455 | 3,632,352 |
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname') | 367dda60f71d169c6490913a3ee3f8befc83b738 | 3,632,353 |
import http
def geocode_location(api_key, loc):
"""Get a geocoded location from gooogle's geocoding api."""
try:
parsed_json = http.get_json(GEOCODING_URL, address=loc, key=api_key)
except IOError:
return None
return parsed_json | 95c820abc58a13cd57e662e78bfab8fceac5eadf | 3,632,354 |
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-inf
"""
allowed_flags(args, ['gen'])
try:
(F,), opt = parallel_poly_from_expr((f,), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc) from exc
return sympify(F.degree(opt.gen)) | a6b5e2ba9228ea1477234c0e16477dd603fb9598 | 3,632,355 |
from typing import Dict
def fit_model_sector(sector_corpus: Dict[DocId, Token]) -> sbmtm:
"""Fits the model taking the sector corpus data structure as an input
Args:
sector_corpus: a dict where keys are doc ids and
values are the tokenised descriptions
Returns:
The model
"""
model = sbmtm()
model.make_graph(list(sector_corpus.values()), documents=list(sector_corpus.keys()))
model.fit()
return model | d025a0f4faac53b4b21486e159a0310c78dc72b8 | 3,632,356 |
def get_simulated_matches(path, met, sample_to_match, pop_var):
"""Selects initial conditions from cosmic data to match to star sample
Parameters
----------
path : `str`
path to cosmic data
met : `float`
metallicity of cosmic data file
sample_to_match : `DataFrame`
A dataframe containing a population of stars with
metallicities, ages, and positions
pop_var : `int or str`
Can be supplied for populations where sys_type is the same but the
population is varied in some way, like if qmin is different. If no
variants, pop_var = 0
Returns
-------
initC_dat_sample : `DataFrame`
cosmic initial conditions with assigned ages, positions, and metallicities
"""
# read in the simulated binary data that has metallicities which
# are matched to sub_sample_sys_met
sim_dat, initC_dat = utils.sim_data_read(path=path, metallicity=met, var=pop_var)
initC_dat['acc_lim'] = -1
initC_dat['don_lim'] = -1
# sample the same number of systems from sim_dat as sub_sample_sys_met
initC_dat_sample = initC_dat.sample(len(sample_to_match), replace=True)
initC_dat_sample = pd.concat([initC_dat_sample.reset_index(drop=True), sample_to_match.reset_index(drop=True)],
axis=1)
initC_dat_sample['assigned_age'] = np.array(sample_to_match['AGE'].values) * 1000
return initC_dat_sample | ecbb46f284f7b6748031b0907813deaad8967eba | 3,632,357 |
def _get_url_ext(url: str):
"""
>>> _get_url_ext('http://example.com/blog/feed')
'feed'
>>> _get_url_ext('http://example.com/blog/feed.xml')
'xml'
>>> no_error = _get_url_ext('http://example.com')
"""
try:
url_path = urlparse(url).path.strip('/')
except ValueError:
return ''
parts = RE_URL_EXT_SEP.split(url_path[::-1], 1)
if len(parts) > 0:
return parts[0][::-1]
return '' | 61b94bd2c98686192a47ade9bb240bc4c58904c2 | 3,632,358 |
def connect_server():
"""Connect to azure cosmos DB.
Connet to azure cosmos DB.
Need to insert ID, DB name, table name, and key.
Args:
None
Returns:
clt(str): an instance for connecting to azure cosmos DB server
"""
clt = client.Client(
'wss://<YOURID>.gremlin.cosmos.azure.com:443/', 'g',
username="/dbs/<DB_NAME>/colls/<TABLE_NAME>/",
password="<KEY_VALUE>",
message_serializer=serializer.GraphSONSerializersV2d0()
)
return clt | 1826b4676af381882710a8802392f7f1d4f2253e | 3,632,359 |
def checkCpuTime(sleeptime=0.2):
"""Check if cpu time works correctly"""
if checkCpuTime.passed:
return True
# First test that sleeping does not consume cputime
start1 = process_time()
sleep(sleeptime)
t1 = process_time() - start1
# secondly check by comparing to cpusleep (where we actually do something)
start2 = process_time()
cpu_sleep(sleeptime)
t2 = process_time() - start2
if abs(t1) < 0.0001 and t1 < t2 / 10:
checkCpuTime.passed = True
return True
skip("cpu time not reliable on this machine") | 5b7db840f56b5eafdbfa857c470c316c257c9bac | 3,632,360 |
import logging
import os
import csv
def xmind_to_iwork_csv_file(xmind_file):
"""Convert XMind file to a iwork csv file"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to iwork file...', xmind_file)
testcases = get_xmind_testcase_list(xmind_file)
fileheader = ["用例概要*", "用例描述", "测试步骤", "测试数据", "预期结果"]
iwork_testcase_rows = [fileheader]
for testcase in testcases:
# row = gen_a_testcase_row(testcase)
row_list = gen_a_testcase_row_list(testcase)
# print("row_list >> ", row_list)
for row in row_list:
iwork_testcase_rows.append(row)
iwork_file = xmind_file[:-6] + '_iwork' + '.csv'
if os.path.exists(iwork_file):
logging.info('The eiwork csv file already exists, return it directly: %s', iwork_file)
return iwork_file
with open(iwork_file, 'w', encoding='gb18030', newline="") as f:
writer = csv.writer(f)
writer.writerows(iwork_testcase_rows)
logging.info('Convert XMind file(%s) to a iwork csv file(%s) successfully!', xmind_file, iwork_file)
return iwork_file | 56fd6f7cb6413bf25d7bf2200d6972765477a9c0 | 3,632,361 |
def load_ply(path):
"""
Loads a 3D mesh model from a PLY file.
:param path: A path to a PLY file.
:return: The loaded model given by a dictionary with items:
'pts' (nx3 ndarray), 'normals' (nx3 ndarray), 'colors' (nx3 ndarray),
'faces' (mx3 ndarray) - the latter three are optional.
"""
f = open(path, 'r')
n_pts = 0
n_faces = 0
face_n_corners = 3 # Only triangular faces are supported
pt_props = []
face_props = []
is_binary = False
header_vertex_section = False
header_face_section = False
# Read header
while True:
line = f.readline().rstrip('\n').rstrip('\r') # Strip the newline character(s)
if line.startswith('element vertex'):
n_pts = int(line.split(' ')[-1])
header_vertex_section = True
header_face_section = False
elif line.startswith('element face'):
n_faces = int(line.split(' ')[-1])
header_vertex_section = False
header_face_section = True
elif line.startswith('element'): # Some other element
header_vertex_section = False
header_face_section = False
elif line.startswith('property') and header_vertex_section:
# (name of the property, data type)
pt_props.append((line.split(' ')[-1], line.split(' ')[-2]))
elif line.startswith('property list') and header_face_section:
elems = line.split(' ')
# (name of the property, data type)
face_props.append(('n_corners', elems[2]))
for i in range(face_n_corners):
face_props.append(('ind_' + str(i), elems[3]))
elif line.startswith('format'):
if 'binary' in line:
is_binary = True
elif line.startswith('end_header'):
break
# Prepare data structures
model = {}
model['pts'] = np.zeros((n_pts, 3), np.float)
if n_faces > 0:
model['faces'] = np.zeros((n_faces, face_n_corners), np.float)
pt_props_names = [p[0] for p in pt_props]
is_normal = False
if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
is_normal = True
model['normals'] = np.zeros((n_pts, 3), np.float)
is_color = False
if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
is_color = True
model['colors'] = np.zeros((n_pts, 3), np.float)
formats = { # For binary format
'float': ('f', 4),
'double': ('d', 8),
'int': ('i', 4),
'uchar': ('B', 1)
}
# Load vertices
for pt_id in range(n_pts):
prop_vals = {}
load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz', 'red', 'green', 'blue']
if is_binary:
for prop in pt_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] in load_props:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split(' ')
for prop_id, prop in enumerate(pt_props):
if prop[0] in load_props:
prop_vals[prop[0]] = elems[prop_id]
model['pts'][pt_id, 0] = float(prop_vals['x'])
model['pts'][pt_id, 1] = float(prop_vals['y'])
model['pts'][pt_id, 2] = float(prop_vals['z'])
if is_normal:
model['normals'][pt_id, 0] = float(prop_vals['nx'])
model['normals'][pt_id, 1] = float(prop_vals['ny'])
model['normals'][pt_id, 2] = float(prop_vals['nz'])
if is_color:
model['colors'][pt_id, 0] = float(prop_vals['red'])
model['colors'][pt_id, 1] = float(prop_vals['green'])
model['colors'][pt_id, 2] = float(prop_vals['blue'])
# Load faces
for face_id in range(n_faces):
prop_vals = {}
if is_binary:
for prop in face_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] == 'n_corners':
if val != face_n_corners:
print 'Error: Only triangular faces are supported.'
print 'Number of face corners:', val
exit(-1)
else:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split(' ')
for prop_id, prop in enumerate(face_props):
if prop[0] == 'n_corners':
if int(elems[prop_id]) != face_n_corners:
print 'Error: Only triangular faces are supported.'
print 'Number of face corners:', int(elems[prop_id])
exit(-1)
else:
prop_vals[prop[0]] = elems[prop_id]
model['faces'][face_id, 0] = int(prop_vals['ind_0'])
model['faces'][face_id, 1] = int(prop_vals['ind_1'])
model['faces'][face_id, 2] = int(prop_vals['ind_2'])
f.close()
return model | 289c0854bc3270dafab5689bc53d2507ff27c67c | 3,632,362 |
def parse_range(rng, dictvars={}):
"""Parse a string with an integer range and return a list of numbers, replacing special variables in dictvars."""
parts = rng.split('-')
if len(parts) not in [1, 2]:
raise ValueError("Bad range: '%s'" % (rng,))
parts = [int(i) if i not in dictvars else dictvars[i] for i in parts]
start = parts[0]
end = start if len(parts) == 1 else parts[1]
if start > end:
end, start = start, end
return range(start, end + 1) | 214109a71c84d06241e29cacaa052d9ce00302c5 | 3,632,363 |
from typing import Union
from typing import Iterable
from typing import List
from pathlib import Path
def relpaths(basepath: _path_t, pattern: Union[str, Iterable[_path_t]]) -> List[str]:
"""Convert a list of paths to relative paths
Parameters
----------
basepath : Union[str, Path]
Path to use as the reference when calculating relative paths
pattern : Union[str, Iterable[Union[str, Path]]]
Either a pattern relative to ``basepath`` to generate a list of paths,
or a list of paths to convert.
Returns
-------
List[str]
List of relative paths (as ``str``-s)
"""
if isinstance(pattern, str):
basepath = Path(basepath)
return [str(p.relative_to(basepath)) for p in basepath.glob(pattern)]
else: # iterable of "paths"
return [str(p.relative_to(basepath)) for p in map(Path, pattern)] | 80c9febd541d8fd1ab190b2ef36e8929ee387b08 | 3,632,364 |
def binary_backtests_returns(
backtests: pd.DataFrame,
) -> pd.DataFrame:
"""
Converts a Horizon backtest data frame into a binary backtests of directions
"""
return backtests.diff().apply(np.sign).dropna() | 0355cd48620cbdbe54eb4ee48e82f7315f069263 | 3,632,365 |
def calculate_students_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_students_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name) | ccf8eae3b9732535c94381ba1909f36bce52556d | 3,632,366 |
def colorMap(value, name="jet", vmin=None, vmax=None):
"""Map a real value in range [vmin, vmax] to a (r,g,b) color scale.
:param value: scalar value to transform into a color
:type value: float, list
:param name: color map name
:type name: str, matplotlib.colors.LinearSegmentedColormap
:return: (r,g,b) color, or a list of (r,g,b) colors.
.. note:: Most frequently used color maps:
|colormaps|
Matplotlib full list:
.. image:: https://matplotlib.org/1.2.1/_images/show_colormaps.png
.. tip:: Can also use directly a matplotlib color map:
:Example:
.. code-block:: python
from vtkplotter import colorMap
import matplotlib.cm as cm
print( colorMap(0.2, cm.flag, 0, 1) )
(1.0, 0.809016994374948, 0.6173258487801733)
"""
if not _mapscales:
print("-------------------------------------------------------------------")
print("WARNING : cannot import matplotlib.cm (colormaps will show up gray).")
print("Try e.g.: sudo apt-get install python3-matplotlib")
print(" or : pip install matplotlib")
print(" or : build your own map (see example in basic/mesh_custom.py).")
return (0.5, 0.5, 0.5)
if isinstance(name, matplotlib.colors.LinearSegmentedColormap):
mp = name
else:
mp = cm_mpl.get_cmap(name=name)
if _isSequence(value):
values = np.array(value)
if vmin is None:
vmin = np.min(values)
if vmax is None:
vmax = np.max(values)
values = np.clip(values, vmin, vmax)
values -= vmin
values = values / (vmax - vmin)
cols = []
mp = cm_mpl.get_cmap(name=name)
for v in values:
cols.append(mp(v)[0:3])
return np.array(cols)
else:
value -= vmin
value /= vmax - vmin
if value > 0.999:
value = 0.999
elif value < 0:
value = 0
return mp(value)[0:3] | 8e444db6b3e04229dbf8b46cbe68ce6337032b01 | 3,632,367 |
def save_genre(row: dict):
"""Genre's control and save in data base."""
try:
result: Genres = session.query(Genres) \
.filter(Genres.name == row.get('genre_name')) \
.one()
return result
except MultipleResultsFound:
print('Такой жанр уже есть в базе')
except NoResultFound:
result: Genres = Genres(name=row.get('genre_name'))
session.add(result)
session.commit()
return result | 0c7e26082816a2639cc4561a1e9ee5f2697a4175 | 3,632,368 |
def specializations(examples_so_far, h):
"""Specialize the hypothesis by adding AND operations to the disjunctions"""
hypotheses = []
for i, disj in enumerate(h):
for e in examples_so_far:
for k, v in e.items():
if k in disj or k == 'GOAL':
continue
h2 = h[i].copy()
h2[k] = '!' + v
h3 = h.copy()
h3[i] = h2
if check_all_consistency(examples_so_far, h3):
hypotheses.append(h3)
shuffle(hypotheses)
return hypotheses | 5f21edd18477c09d37a03026073c0120b6be41b9 | 3,632,369 |
def formulate_contingency(problem: LpProblem, numerical_circuit: OpfTimeCircuit, flow_f, ratings, LODF, monitor,
lodf_tolerance):
"""
:param problem:
:param numerical_circuit:
:param flow_f:
:param LODF:
:param monitor:
:return:
"""
nbr, nt = ratings.shape
# get the indices of the branches marked for contingency
con_br_idx = numerical_circuit.branch_data.get_contingency_enabled_indices()
# formulate contingency flows
# this is done in a separated loop because all te flow variables must exist beforehand
flow_lst = list()
indices = list() # (t, m, contingency_m)
overload1_lst = list()
overload2_lst = list()
for t, m in product(range(nt), range(nbr)): # for every branch
if monitor[m]: # the monitor variable is pre-computed in the previous loop
_f = numerical_circuit.branch_data.F[m]
_t = numerical_circuit.branch_data.T[m]
for ic, c in enumerate(con_br_idx): # for every contingency
if m != c and abs(LODF[m, c]) >= lodf_tolerance:
# compute the N-1 flow
contingency_flow = flow_f[m, t] + LODF[m, c] * flow_f[c, t]
# rating restriction in the sense from-to
overload1 = LpVariable("n-1_overload1_{0}_{1}_{2}".format(t, m, c), 0, 99999)
problem.add(contingency_flow <= (ratings[m, t] + overload1),
"n-1_ft_up_rating_{0}_{1}_{2}".format(t, m, c))
# rating restriction in the sense to-from
overload2 = LpVariable("n-1_overload2_{0}_{1}_{2}".format(t, m, c), 0, 99999)
problem.add((-ratings[m, t] - overload2) <= contingency_flow,
"n-1_tf_down_rating_{0}_{1}_{2}".format(t, m, c))
# store the variables
flow_lst.append(contingency_flow)
overload1_lst.append(overload1)
overload2_lst.append(overload2)
indices.append((t, m, c))
return flow_lst, overload1_lst, overload2_lst, indices | ecc5deb3b689a5802341d48e81382d2d602f1ebc | 3,632,370 |
def max_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 3D inputs (e.g. volumes).
Arguments:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs) | 49756ea26e58549115408fbcb4ce179442a09942 | 3,632,371 |
def parareal_engine(x_0, U, num_iter, coarse_model="learned"):
"""Rolls out a trajectory using Parareal
Args:
x_0: Initial state
U: Control sequence
num_iter: Number of Parareal iterations to use for prediction
coarse_model: Learned/Analytical coarse model
Returns:
X: The corresponding state sequence
"""
global Q
num_time_slices = U.shape[0]
Q = np.zeros((num_iter+1, num_time_slices+1, N_QPOS+N_QVEL))
for k in range(num_iter+1):
Q[k,0] = x_0.copy()
# Find the initial coarse predictions across time slices
for p in range(num_time_slices):
if coarse_model != "learned":
Q[0,p+1] = coarse_int_analytical(Q[0,p], U[p])
else:
Q[0,p+1] = coarse_int_learned(Q[0,p], U[p])
# Parareal iterations
for k in range(1,num_iter+1):
pool_input = []
for p in range (num_time_slices):
pool_input.append([Q[k-1,p], U[p]])
pool_input = np.array(pool_input)
fine_predictions = pool.map(parallel_fine, pool_input)
for p in range(num_time_slices):
if coarse_model != "learned":
Q[k, p+1] = coarse_int_analytical(Q[k,p], U[p])- coarse_int_analytical(Q[k-1,p], U[p]) + fine_predictions[p]
else:
# Learned coarse model
Q[k, p+1] = coarse_int_learned(Q[k,p], U[p])- coarse_int_learned(Q[k-1,p], U[p]) + fine_predictions[p]
X = Q[num_iter].copy()
return X | bd589650711d510fcffb2033dd5f7a501dc4e041 | 3,632,372 |
def plot_posterior_op(trace_values, ax, kde_plot, point_estimate, round_to,
alpha_level, ref_val, rope, text_size=16, **kwargs):
"""Artist to draw posterior."""
def format_as_percent(x, round_to=0):
return '{0:.{1:d}f}%'.format(100 * x, round_to)
def display_ref_val(ref_val):
less_than_ref_probability = (trace_values < ref_val).mean()
greater_than_ref_probability = (trace_values >= ref_val).mean()
ref_in_posterior = "{} <{:g}< {}".format(
format_as_percent(less_than_ref_probability, 1),
ref_val,
format_as_percent(greater_than_ref_probability, 1))
ax.axvline(ref_val, ymin=0.02, ymax=.75, color='g',
linewidth=4, alpha=0.65)
ax.text(trace_values.mean(), plot_height * 0.6, ref_in_posterior,
size=text_size, horizontalalignment='center')
def display_rope(rope):
ax.plot(rope, (plot_height * 0.02, plot_height * 0.02),
linewidth=20, color='r', alpha=0.75)
text_props = dict(size=text_size, horizontalalignment='center', color='r')
ax.text(rope[0], plot_height * 0.14, rope[0], **text_props)
ax.text(rope[1], plot_height * 0.14, rope[1], **text_props)
def display_point_estimate():
if not point_estimate:
return
if point_estimate not in ('mode', 'mean', 'median'):
raise ValueError(
"Point Estimate should be in ('mode','mean','median')")
if point_estimate == 'mean':
point_value = trace_values.mean()
elif point_estimate == 'mode':
if isinstance(trace_values[0], float):
density, l, u = fast_kde(trace_values)
x = np.linspace(l, u, len(density))
point_value = x[np.argmax(density)]
else:
point_value = mode(trace_values.round(round_to))[0][0]
elif point_estimate == 'median':
point_value = np.median(trace_values)
point_text = '{point_estimate}={point_value:.{round_to}f}'.format(point_estimate=point_estimate,
point_value=point_value, round_to=round_to)
ax.text(point_value, plot_height * 0.8, point_text,
size=text_size, horizontalalignment='center')
def display_hpd():
hpd_intervals = hpd(trace_values, alpha=alpha_level)
ax.plot(hpd_intervals, (plot_height * 0.02,
plot_height * 0.02), linewidth=4, color='k')
ax.text(hpd_intervals[0], plot_height * 0.07,
hpd_intervals[0].round(round_to),
size=text_size, horizontalalignment='right')
ax.text(hpd_intervals[1], plot_height * 0.07,
hpd_intervals[1].round(round_to),
size=text_size, horizontalalignment='left')
ax.text((hpd_intervals[0] + hpd_intervals[1]) / 2, plot_height * 0.2,
format_as_percent(1 - alpha_level) + ' HPD',
size=text_size, horizontalalignment='center')
def format_axes():
ax.yaxis.set_ticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='x', direction='out', width=1, length=3,
color='0.5', labelsize=text_size)
ax.spines['bottom'].set_color('0.5')
def set_key_if_doesnt_exist(d, key, value):
if key not in d:
d[key] = value
if kde_plot and isinstance(trace_values[0], float):
kdeplot(trace_values, alpha=kwargs.pop('alpha', 0.35), ax=ax, **kwargs)
else:
set_key_if_doesnt_exist(kwargs, 'bins', 30)
set_key_if_doesnt_exist(kwargs, 'edgecolor', 'w')
set_key_if_doesnt_exist(kwargs, 'align', 'right')
ax.hist(trace_values, **kwargs)
plot_height = ax.get_ylim()[1]
format_axes()
display_hpd()
display_point_estimate()
if ref_val is not None:
display_ref_val(ref_val)
if rope is not None:
display_rope(rope) | 13d7d12dfac13f803fb08b49970471e4f97c7bff | 3,632,373 |
from operator import sub
from re import M
def change_theme(file: str, theme_name: str, logfile: str) -> bool:
"""
Change Oh My ZSH Theme
"""
if get_zsh_theme(file, logfile):
current_file = read_file_log(file, logfile)
current_theme = get_zsh_theme(file, logfile)[1]
new_theme = f'ZSH_THEME="{theme_name}"'
new_file = sub(rf"{current_theme}", new_theme, current_file, flags=M)
create_file(new_file, file, force=True)
return True
return False | 55438f7d93779417aff4dfa4170997269b200dca | 3,632,374 |
def check_tensor(data):
"""Ensure that data is a numpy 4D array."""
assert isinstance(data, np.ndarray)
if data.ndim == 2:
data = data[np.newaxis,np.newaxis,...]
elif data.ndim == 3:
data = data[np.newaxis,...]
elif data.ndim == 4:
pass
else:
raise RuntimeError('data must be a numpy 4D array')
assert data.ndim==4
return data | d641645e9a2c780d52c5e635859bdfcce61f86ab | 3,632,375 |
from typing import Type
from typing import Optional
def _find_first_ref(ref: Ref, message_type: Type[B]) -> Optional[B]:
""" Finds and returns (if exists) the first instance of the specified message type
within the specified Ref.
"""
# Get the body message, if it exists
body = _get_ref_body(ref)
if isinstance(body, message_type):
return body
if not body or isinstance(body, (Concat, HTTP)):
# If the body message is a Concat or HTTP message stop recursion.
return None
return _find_first_ref(body.inner, message_type) | 474ad2a2c874c6ae226475fc1e4b144fdd4f72a1 | 3,632,376 |
def verify_activation_token(*, uidb64, token):
"""
:param uidb64: (str) Base 64 of user PK.
:param token: (str) Hash.
:return: (bool) True if user verified, False otherwise.
"""
user_id = force_text(urlsafe_base64_decode(uidb64))
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return False
else:
# activate user if token is alright
if default_token_generator.check_token(user=user, token=token):
if not user.is_active:
user.is_active = True
user.save()
return True
else:
return False | f184a85852906526434dc54b2e26491ad3dafc51 | 3,632,377 |
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
(D, C) = W.shape
N = X.shape[0]
# N x C matrix
scores = X.dot(W)
for i in range(N):
scores_i = scores[i, :]
scores_i -= np.max(scores_i, axis=-1)
P_i = np.exp(scores_i)
P_i /= np.sum(P_i, axis=-1)
logP_i = np.log(P_i)
loss_i = -logP_i[y[i]]
dscores_i = P_i
dscores_i[y[i]] -= 1
# dscores_i = np.dot((P_i - T[:, i]). reshape((C, 1)), X[:, i].reshape((1, D)))
loss += loss_i
dW += X[i, :].reshape((D, 1)).dot(dscores_i.reshape((1, C)))
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= N
dW /= N
# regularization term
RW = 0.5 * reg * np.sum(W * W)
dRW = reg * W
loss += RW
dW += dRW
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW | f91d004eefd6e8c2bd7c151628a7aa809b2c40da | 3,632,378 |
def read_protein_from_file(file_pointer):
"""The algorithm Defining Secondary Structure of Proteins (DSSP) uses information on e.g. the
position of atoms and the hydrogen bonds of the molecule to determine the secondary structure
(helices, sheets...).
"""
dict_ = {}
_dssp_dict = {'L': 0, 'H': 1, 'B': 2, 'E': 3, 'G': 4, 'I': 5, 'T': 6, 'S': 7}
_mask_dict = {'-': 0, '+': 1}
while True:
next_line = file_pointer.readline()
if next_line == '[ID]\n':
id_ = file_pointer.readline()[:-1]
dict_.update({'id': id_})
elif next_line == '[PRIMARY]\n':
primary_str = file_pointer.readline()[:-1]
primary = encode_primary_string(primary_str)
dict_.update({'primary': primary})
dict_.update({'seq': primary_str})
elif next_line == '[EVOLUTIONARY]\n':
evolutionary = []
for _residue in range(21):
evolutionary.append([float(step) for step in file_pointer.readline().split()])
dict_.update({'evolutionary': evolutionary})
elif next_line == '[SECONDARY]\n':
secondary = list([_dssp_dict[dssp] for dssp in file_pointer.readline()[:-1]])
dict_.update({'secondary': secondary})
elif next_line == '[TERTIARY]\n':
tertiary = []
# 3 dimension
for _axis in range(3):
tertiary.append([float(coord) for coord in file_pointer.readline().split()])
dict_.update({'tertiary': tertiary})
elif next_line == '[MASK]\n':
mask = list([_mask_dict[aa] for aa in file_pointer.readline()[:-1]])
dict_.update({'mask': mask})
elif next_line == '\n':
return dict_
elif next_line == '':
return None | 071a7b04b652261f83311e250e92884e95a4e818 | 3,632,379 |
def match(string, rule='IRI_reference'):
"""Convenience function for checking if `string` matches a specific rule.
Returns a match object or None::
>>> assert match('%C7X', 'pct_encoded') is None
>>> assert match('%C7', 'pct_encoded')
>>> assert match('%c7', 'pct_encoded')
"""
return get_compiled_pattern('^%%(%s)s$' % rule).match(string) | c6c2079786651bfaa1ba819251e1d51a276bf8cd | 3,632,380 |
from typing import List
def verify_td3(mrz: List[str]) -> bool:
"""Verify TD3 MRZ"""
if mrz[0][0] != "P":
return False
# if mrz[0][1]: # At the discretion of the issuing State or organization or "<"
# if mrz[0][2:5]: # ISSUING STATE OR ORGANIZATION
# if mrz[0][5:44]: # NAME
if calculate_check_digit(mrz[1][0:9]) != mrz[1][9]: # Passport number
return False
# if mrz[1][10:13]: # NATIONALITY
if calculate_check_digit(mrz[1][13:19]) != mrz[1][19]: # Date of birth
return False
# if mrz[1][20]: # SEX
if calculate_check_digit(mrz[1][21:27]) != mrz[1][27]: # Date of expiry
return False
if (mrz[1][28:42] == "<" * 14 and (mrz[1][42] != "<" or mrz[1][42] != "0")) or (
calculate_check_digit(mrz[1][28:42]) != mrz[1][42]
): # Personal number or other optional data elements
return False
composite_check_line = mrz[1][0:10] + mrz[1][13:20] + mrz[1][21:43]
if calculate_check_digit(composite_check_line) != mrz[1][43]:
return False
return True | 13dca85d0f91cac9bbf759f8a5171e73ddfb4836 | 3,632,381 |
def compute_pwcca(acts1, acts2, epsilon=0.):
""" Computes projection weighting for weighting CCA coefficients
Args:
acts1: 2d numpy array, shaped (neurons, num_datapoints)
acts2: 2d numpy array, shaped (neurons, num_datapoints)
Returns:
Original cca coefficient mean and weighted mean
"""
sresults = cca_core.get_cca_similarity(acts1, acts2, epsilon=epsilon,
compute_dirns=False, compute_coefs=True, verbose=False)
if np.sum(sresults["x_idxs"]) <= np.sum(sresults["y_idxs"]):
dirns = np.dot(sresults["coef_x"],
(acts1[sresults["x_idxs"]] - \
sresults["neuron_means1"][sresults["x_idxs"]])) + sresults["neuron_means1"][sresults["x_idxs"]]
coefs = sresults["cca_coef1"]
acts = acts1
idxs = sresults["x_idxs"]
else:
dirns = np.dot(sresults["coef_y"],
(acts1[sresults["y_idxs"]] - \
sresults["neuron_means2"][sresults["y_idxs"]])) + sresults["neuron_means2"][sresults["y_idxs"]]
coefs = sresults["cca_coef2"]
acts = acts2
idxs = sresults["y_idxs"]
P, _ = np.linalg.qr(dirns.T)
weights = np.sum(np.abs(np.dot(P.T, acts[idxs].T)), axis=1)
weights = weights/np.sum(weights)
return np.sum(weights*coefs), weights, coefs | 17f0b6674cd1c45435eb73bc4e754543640543d6 | 3,632,382 |
def read_table(srm_file):
"""
Reads SRM compositional data from file.
For file format information, see:
http://latools.readthedocs.io/en/latest/users/configuration/srm-file.html
Parameters
----------
file : str
Path to SRM file.
Returns
-------
SRM compositions : pandas.DataFrame
"""
return pd.read_csv(srm_file).set_index('SRM').dropna(how='all') | 6ec87cab30162af55e3cce659b3ef7a205857595 | 3,632,383 |
def walker_method(for_class=object, methods_list=None):
"""A decorator to add something to the default walker methods,
selecting on class
"""
if methods_list is None: # handle early binding of defaults
methods_list = fallback_walker_method_list
def wrap(walker):
# wrap a walker with a function which checks the type of the
# thing to be walked against for_class, and calls the walker
# if it is an instance.
methods_list.append((lambda thing:
(walker(thing)
if isinstance(thing, for_class)
else None)))
return walker
return wrap | b500f04510695e5cfd88590feaa75b27aee3b5b8 | 3,632,384 |
import pandas as pd
from IPython.display import display
def get_data(name_dataset='index',
verbose=True,
address="../datasets/",):
"""
This function is to load dataset from the git repository
:param name_dataset: name of dataset (str)
:param verbose:
:param address: url of dataset in the git repo
:return data: loaded dataset (pandas.DataFrame)
"""
extension = ".csv"
filename = str(name_dataset) + extension
full_address = address + filename
data = pd.read_csv(full_address)
if verbose:
display(data.head())
return data | fa518e744d43950ebcf65d00a5aa474d4fb36443 | 3,632,385 |
import torch
def copy_valid_indices(
acts, # type: torch.Tensor
target, # type: List[List[int]]
act_lens, # type: List[int]
valid_indices, # type: List[int]
):
# type: (...) -> (torch.Tensor, List[List[int]], List[int])
"""Copy the CTC inputs without the erroneous samples"""
if len(valid_indices) == 0:
return None, [], [], []
valid_indices = torch.tensor(valid_indices, device=acts.device)
return (
# Note: The batch size must be in the second dimension
torch.index_select(acts, 1, valid_indices),
[target[i] for i in valid_indices],
[act_lens[i] for i in valid_indices],
) | 9eac2b7304ff5157ca13fae9d790af6c2b67d9c7 | 3,632,386 |
def is_odd(num: int) -> bool:
"""Is num odd?
:param num: number to check.
:type num: int
:returns: True if num is odd.
:rtype: bool
:raises: ``TypeError`` if num is not an int.
"""
if not isinstance(num, int):
raise TypeError("{} is not an int".format(num))
return num % 2 == 1 | 0e5781596a99909e58583859948332c3afb06fb0 | 3,632,387 |
def cross_entropy_loss(y_hat, y):
"""
Cross entropy loss
y_hat: predict y after softmax, shape:(M,d), M is the #of samples
y: shape(M,d)
"""
loss = np.mean(np.sum(- y * np.log(y_hat), axis=-1))
dy = y_hat - y
return loss, dy | 81f6dc61d0ed9d9e5eac4a41042679b3ea01167d | 3,632,388 |
import string
def string_list(resource_name, encoding='utf-8'):
"""Package resource wrapper for obtaining resource contents as list of
strings.
The function uses the 'string' method and splits the resulting string
in lines.
Params:
resource_name: Relative path to the resource in the package (from the
package root)
encoding: String defining the encoding for interpreting the binary
string. None refers to the default encoding (typically UTF-8).
Returns: contents of of resource interpreted as list of text strings
(default encoding)
"""
return string(resource_name, encoding).splitlines() | 88e7103056e38020670a74a19bfeee90326e7d38 | 3,632,389 |
import os
def get_plugin_names():
""" Get the list of names of registered plugins.
"""
pluginlist = []
# Read
if os.path.isfile(regfile):
with open(regfile, 'rb') as f:
pluginlist = f.read().decode().splitlines()
# Clean
pluginlist = [line.strip() for line in pluginlist]
pluginlist = [line for line in pluginlist if line]
return pluginlist | b86908b2789598f1bd46249ab31f9d1990a87d68 | 3,632,390 |
from typing import Optional
def rpc_get_name() -> Optional[str]:
"""Retrieve the JsonRpc id name."""
global _RpcName
return _RpcName | 2b7d7e9b37b00281b0791e446e5ad4bacee78c4b | 3,632,391 |
def _epd_platform_from_raw_spec(raw_spec):
""" Create an EPDPlatform instance from the metadata info returned by
parse_rawspec.
if no platform is defined ('platform' and 'osdist' set to None), then
None is returned.
"""
platform = raw_spec[_TAG_PLATFORM]
osdist = raw_spec[_TAG_OSDIST]
if platform is None and osdist is None:
return None
else:
return EPDPlatform._from_spec_depend_data(
platform=platform,
osdist=osdist,
arch_name=raw_spec[_TAG_ARCH],
platform_abi=raw_spec.get(_TAG_PLATFORM_ABI, 'None'),
platform_tag=raw_spec.get(_TAG_PLATFORM_PEP425_TAG, 'None'),
python_version=raw_spec[_TAG_PYTHON]) | b6757206e6753441478629519531b0ec5adfc83a | 3,632,392 |
def index(request):
"""The home page for Distance Tracker."""
if request.user.is_authenticated:
today = date.today()
cur_week = today.isocalendar()[1]
cur_month = today.month
cur_year = today.year
exercises_week = Exercise.objects.filter(owner=request.user,
date__week=cur_week, date__year=cur_year).all().order_by('-date')
exercises_month = Exercise.objects.filter(owner=request.user,
date__month=cur_month, date__year=cur_year).all().order_by('-date')
exes10 = Exercise.objects.filter(owner=request.user).all().order_by('-date')[:10]
distance_week = Stats.totals(exercises_week)
time_week = Stats.totaltime(exercises_week)
distance_month = Stats.totals(exercises_month)
time_month = Stats.totaltime(exercises_month)
ret_url = 'distances:index'
if request.method != 'POST':
form = ExerciseForm()
else:
modaln = new_exercise_modal(request, ret_url)
if modaln[1]:
return HttpResponseRedirect(reverse(modaln[2]))
form = modaln[0]
context = {'dist': distance_week, 'time': time_week, 'distm': distance_month,
'timem': time_month, 'exercises': exes10, 'form': form,
'subsports': spo.get_sports_json()}
else:
context = {'link': 'https://www.youtube.com/watch?v=tENiCpaIk9A'}
return render(request, 'distances/index.html', context) | 279f6e7c1cb9f2821ebee7b138c7b8b22e60768e | 3,632,393 |
import runpy
import imp
def mod_from_file(mod_name, path):
"""Runs the Python code at path, returns a new module with the resulting globals"""
attrs = runpy.run_path(path, run_name=mod_name)
mod = imp.new_module(mod_name)
mod.__dict__.update(attrs)
return mod | 3ea8109d912582555b76816f55fbb632ba82f189 | 3,632,394 |
def interpolation(x0: float, y0: float, x1: float, y1: float, x: float) -> float:
"""
Performs interpolation.
Parameters
----------
x0 : float.
The coordinate of the first point on the x axis.
y0 : float.
The coordinate of the first point on the y axis.
x1 : float.
The coordinate of the second point on the x axis.
y1 : float.
The coordinate of the second point on the y axis.
x : float.
A value in the interval (x0, x1).
Returns
-------
float.
Is the interpolated or extrapolated value.
Example
-------
>>> from pymove.utils.math import interpolation
>>> x0, y0, x1, y1, x = 2, 4, 3, 6, 3.5
>>> print(interpolation(x0,y0,x1,y1,x), type(interpolation(x0,y0,x1,y1,x)))
7.0 <class 'float'>
"""
return y0 + (y1 - y0) * ((x - x0) / (x1 - x0)) | f8fc96c6dc6c2eeeeceb22f92b32023f3873fe3e | 3,632,395 |
def do2ptblinding(unblindedfile, cosmfile, inifor2pt, outftag = 'bl', seed='blinded'):
"""
Given unblinded data file, computes and applies blinding factors.
Factors are computed doing [shift cosm 2pt fn]/[ref cosm 2pt fn]
where cosm parameters are taken from pregenerated
cosmfile, and the shifted cosmology is selected pseudorandomly
using a string seed.
"""
# get relevant sets of cosm parameters in form of Cosmology objects
cosmdict = read_npzfile(cosmfile)
refcosm = get_cosm_forind(cosmdict,0)
shiftcosm = get_cosm_forseedstr(cosmdict,seed)
# run cosmosis to get factors
factordict = gen_blindingfactors(refcosm,shiftcosm,inifor2pt,unblindedfile)
#^TODO make sure this looks into proper file for dn/dz, etc
# currenlty does not and just looks in the ini file for this
# apply blinding factors to dat, create output blinded file
blindedfile = apply2ptblinding_tofits(factordict, origfitsfile = 'public_output/two_pt_cov.fits', outftag = outftag, justfname = False)
return blindedfile | 64f1cc3d684f18f6cd0443a4333167aba6585054 | 3,632,396 |
import collections
def product_counter_v3(products):
"""Get count of products in descending order."""
return collections.Counter(products) | 22c57d50dc36d3235e6b8b642a4add95c9266687 | 3,632,397 |
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*np.amax(line))
labels,n = ndimage.label(bin)
sums = ndimage.sum(bin,labels,range(n+1))
sums = sums[labels]
good = np.minimum(bin,1-(sums>0)*(sums<minsize))
return good | 1e19fad38a081db35ac31e3e19d224f4cb8e7d59 | 3,632,398 |
import textwrap
def _ParseFormatDocString(printer):
"""Parses the doc string for printer.
Args:
printer: The doc string will be parsed from this resource format printer.
Returns:
A (description, attributes) tuple:
description - The format description.
attributes - A list of (name, description) tuples, one tuple for each
format-specific attribute.
Example resource printer docstring parsed by this method:
'''This line is skipped. Printer attributes and Example sections optional.
These lines describe the format.
Another description line.
Printer attributes:
attribute-1-name: The description for attribute-1-name.
attribute-N-name: The description for attribute-N-name.
Example:
One or more example lines for the 'For example:' section.
'''
"""
descriptions = []
attributes = []
example = []
if not printer.__doc__:
return '', '', ''
_, _, doc = printer.__doc__.partition('\n')
collect = _DOC_MAIN
attribute = None
attribute_description = []
for line in textwrap.dedent(doc).split('\n'):
if not line.startswith(' ') and line.endswith(':'):
# The start of a new section.
if attribute:
# The current attribute description is done.
attributes.append((attribute, ' '.join(attribute_description)))
attribute = None
if line == 'Printer attributes:':
# Now collecting Printer attributes: section lines.
collect = _DOC_ATTRIBUTES
elif line == 'Example:':
# Now collecting Example: section lines.
collect = _DOC_EXAMPLE
else:
collect = _DOC_SKIP
continue
if not line or collect == _DOC_SKIP:
# Only interested in the description body and the Printer args: section.
continue
elif collect == _DOC_MAIN:
# The main description line.
descriptions.append(line.strip())
elif line.startswith(' '):
if collect == _DOC_ATTRIBUTES:
# An attribute description line.
attribute_description.append(line.strip())
elif collect == _DOC_EXAMPLE and line.startswith(' '):
# An example section line.
example.append(line.strip())
else:
# The current attribute description is done.
if attribute:
attributes.append((attribute, ' '.join(attribute_description)))
# A new attribute description.
attribute, _, text = line.partition(':')
attribute = attribute.strip()
attribute = attribute.lstrip('*')
attribute_description = [text.strip()]
if attribute:
attributes.append((attribute, ' '.join(attribute_description)))
return ' '.join(descriptions), attributes, example | f5aaffb91cbedbc1b6e0da64cdfb3ff4adfa684e | 3,632,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.