content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_requirements():
"""
Obtenir la liste de toutes les dépences
:return: la liste de toutes les dépences
"""
requirements = []
with open(REQUIREMENTS_TXT, encoding="utf-8") as frequirements:
for requirement_line in frequirements.readlines():
requirement_line = requirement_line.strip()
if not requirement_line.startswith("#"):
if "#" in requirement_line:
requirement_line = requirement_line.split("#")[0]
if requirement_line:
requirements.append(requirement_line)
return requirements
| 5,337,300
|
def equ2gal(ra, dec):
"""Converts Equatorial J2000d coordinates to the Galactic frame.
Note: it is better to use AstroPy's SkyCoord API for this.
Parameters
----------
ra, dec : float, float [degrees]
Input J2000 coordinates (Right Ascension and Declination).
Returns
-------
glon, glat: float, float [degrees]
"""
import math as m
from math import sin, cos, atan, asin, floor
OB = m.radians(23.4333334);
dec = m.radians(dec)
ra = m.radians(ra)
a = 27.128251 # The RA of the North Galactic Pole
d = 192.859481 # The declination of the North Galactic Pole
l = 32.931918 # The ascending node of the Galactic plane on the equator
sdec = sin(dec)
cdec = cos(dec)
sa = sin(m.radians(a))
ca = cos(m.radians(a))
GT = asin(cdec * ca * cos(ra - m.radians(d)) + sdec * sa)
GL = m.degrees(atan((sdec - sin(GT) * sa) / (cdec * sin(ra - m.radians(d)) * ca)))
TP = sdec - sin(GT) * sa
BT = cdec * sin(ra - m.radians(d)) * ca
if (BT < 0):
GL += 180
else:
if (TP < 0):
GL += 360
GL += l
if (GL > 360):
GL -= 360
LG = floor(GL)
LM = floor((GL - floor(GL)) * 60)
LS = ((GL - floor(GL)) * 60 - LM) * 60
GT = m.degrees(GT)
D = abs(GT)
if (GT > 0):
BG = floor(D)
else:
BG = -1*floor(D)
BM = floor((D - floor(D)) * 60)
BS = ((D - floor(D)) * 60 - BM) * 60
if (GT < 0):
BM = -BM
BS = -BS
#if GL > 180:
# GL -= 360
return (GL, GT)
| 5,337,301
|
def require_pyoptsparse(optimizer=None):
"""
Decorate test to raise a skiptest if a required pyoptsparse optimizer cannot be imported.
Parameters
----------
optimizer : String
Pyoptsparse optimizer string. Default is None, which just checks for pyoptsparse.
Returns
-------
TestCase or TestCase.method
The decorated TestCase class or method.
"""
def decorator(obj):
import unittest
try:
from pyoptsparse import OPT
except Exception:
msg = "pyoptsparse is not installed."
if not isinstance(obj, type):
@functools.wraps(obj)
def skip_wrapper(*args, **kwargs):
raise unittest.SkipTest(msg)
obj = skip_wrapper
obj.__unittest_skip__ = True
obj.__unittest_skip_why__ = msg
return obj
try:
OPT(optimizer)
except Exception:
msg = "pyoptsparse is not providing %s" % optimizer
if not isinstance(obj, type):
@functools.wraps(obj)
def skip_wrapper(*args, **kwargs):
raise unittest.SkipTest(msg)
obj = skip_wrapper
obj.__unittest_skip__ = True
obj.__unittest_skip_why__ = msg
return obj
return decorator
| 5,337,302
|
def addition(a:Union[int, float], b:Union[int, float]) -> Union[int, float]:
"""
A simple addition function. Add `a` to `b`.
"""
calc = a + b
return calc
| 5,337,303
|
def print_version():
"""
Print the module version information
:return: returns 1 for for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s"
""" % (__docname__, __version__, __release__))
return 1
| 5,337,304
|
def splitpath(path):
""" Split a path """
drive, path = '', _os.path.normpath(path)
try:
splitunc = _os.path.splitunc
except AttributeError:
pass
else:
drive, path = splitunc(path)
if not drive:
drive, path = _os.path.splitdrive(path)
elems = []
try:
sep = _os.path.sep
except AttributeError:
sep = _os.path.join('1', '2')[1:-1]
while 1:
prefix, path = _os.path.split(path)
elems.append(path)
if prefix in ('', sep):
drive = _os.path.join(drive, prefix)
break
path = prefix
elems.reverse()
return drive, elems
| 5,337,305
|
def vmf1_zenith_wet_delay(dset):
"""Calculates zenith wet delay based on gridded zenith wet delays from VMF1
Uses gridded zenith wet delays from VMF1, which are rescaled from the gridded height to actual station height by
using Equation(5) described in Kouba :cite:`kouba2007`.
Args:
dset (Dataset): Model data.
Returns:
numpy.ndarray: Zenith wet delay for each observation in [m]
"""
# Get gridded VMF1 data
vmf1 = apriori.get("vmf1", time=dset.time)
lat, lon, height = dset.site_pos.pos.llh.T
grid_zwd = vmf1["zw"](dset.time, lon, lat) # Interpolation in time and space in VMF1 grid
grid_height = vmf1["ell"](lon, lat, grid=False)
# Zenith Wet delay. Eq. (5) in Kouba :cite:`kouba2007`
zwd = grid_zwd * np.exp(-(height - grid_height) / 2000)
return zwd
| 5,337,306
|
def docs(session):
"""Build the docs."""
session.install("-r", os.path.join("docs", "requirements-docs.txt"))
session.install("-e", ".")
shutil.rmtree(os.path.join("docs", "source", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "source", "_build", "doctrees", ""),
os.path.join("docs", "source", ""),
os.path.join("docs", "source", "_build", "html", ""),
)
| 5,337,307
|
def get_hash(dictionary):
"""Takes a dictionary as input and provides a unique hash value based on the
values in the dictionary. All the values in the dictionary after
converstion to string are concatenated and then the HEX hash is generated
:param dictionary: A python dictionary
:return: A HEX hash
Credit: https://gitlab.com/calledbymountains/cvdatasetmanagement/blob/master/utils/gen_utils.py
"""
if not isinstance(dictionary, dict):
raise ValueError('The argument must be ap ython dictionary.')
str_input = reduce(lambda x, y: str(x) + str(y), list(dictionary.values()))
str_input = ''.join(random.sample(str_input, len(str_input)))
hash_object = hashlib.shake_128(str_input.encode())
output = hash_object.hexdigest(12)
return output
| 5,337,308
|
def convert_to_tensor(value, dtype=None, device = None):
"""
Converts the given value to a Tensor.
Parameters
----------
value : object
An object whose type has a registered Tensor conversion function.
dtype : optional
Optional element type for the returned tensor. If missing, the type is inferred from the type of value.
Returns
-------
A Tensor based on value.
"""
return pd.to_tensor(value, dtype=dtype)
| 5,337,309
|
def send_email(to_address, from_address,subject, message,cc=[],files=[],host="localhost", port=587, username='', password=''):
"""Sending an email with python.
Args:
to_address (str): the recipient of the email
from_address (str): the originator of the email
subject (str): message subject
message (str): message body
cc (list[str]): list emails to be copied on the email
files (list[str]): attachment files
host (str):the host name
port (int): the port
username (str): server auth username
password (str): server auth password
"""
today = date.today()
msg = MIMEMultipart("alternative")
msg['From'] = email.utils.formataddr(('From Name', from_address))
msg['To'] = to_address
msg['Cc'] = ",".join(cc)
msg['Subject'] = subject+"- {}".format(today.strftime("%b-%Y"))
to_address = [to_address] + cc
body = "Good day, \n Please find the attached report for {}".format(
today.strftime("%b-%Y"))
html = """\
<!doctype html>
<html lang="en">
<head>
<title>
</title>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style type="text/css">
#outlook a{padding: 0;}
.ReadMsgBody{width: 100%;}
.ExternalClass{width: 100%;}
.ExternalClass *{line-height: 100%;}
body{margin: 0; padding: 0; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;}
table, td{border-collapse: collapse; mso-table-lspace: 0pt; mso-table-rspace: 0pt;}
img{border: 0; height: auto; line-height: 100%; outline: none; text-decoration: none; -ms-interpolation-mode: bicubic;}
p{display: block; margin: 13px 0;}
</style>
<!--[if !mso]><!-->
<style type="text/css">
@media only screen and (max-width:480px) {
@-ms-viewport {width: 320px;}
@viewport { width: 320px; }
}
</style>
<!--<![endif]-->
<!--[if mso]>
<xml>
<o:OfficeDocumentSettings>
<o:AllowPNG/>
<o:PixelsPerInch>96</o:PixelsPerInch>
</o:OfficeDocumentSettings>
</xml>
<![endif]-->
<!--[if lte mso 11]>
<style type="text/css">
.outlook-group-fix{width:100% !important;}
</style>
<![endif]-->
<style type="text/css">
@media only screen and (min-width:480px) {
.dys-column-per-100 {
width: 100.000000% !important;
max-width: 100.000000%;
}
}
@media only screen and (min-width:480px) {
.dys-column-per-100 {
width: 100.000000% !important;
max-width: 100.000000%;
}
}
</style>
</head>
<body>
<div>
<table align='center' border='0' cellpadding='0' cellspacing='0' role='presentation' style='background:#f7f7f7;background-color:#f7f7f7;width:100%;'>
<tbody>
<tr>
<td>
<div style='margin:0px auto;max-width:600px;'>
<table align='center' border='0' cellpadding='0' cellspacing='0' role='presentation' style='width:100%;'>
<tbody>
<tr>
<td style='direction:ltr;font-size:0px;padding:20px 0;text-align:center;vertical-align:top;'>
<!--[if mso | IE]>
<table role="presentation" border="0" cellpadding="0" cellspacing="0"><tr><td style="vertical-align:top;width:600px;">
<![endif]-->
<div class='dys-column-per-100 outlook-group-fix' style='direction:ltr;display:inline-block;font-size:13px;text-align:left;vertical-align:top;width:100%;'>
<table border='0' cellpadding='0' cellspacing='0' role='presentation' style='vertical-align:top;' width='100%'>
<tr>
<td align='center' style='font-size:0px;padding:10px 25px;word-break:break-word;'>
<div style='color:#4d4d4d;font-family:Oxygen, Helvetica neue, sans-serif;font-size:18px;font-weight:700;line-height:37px;text-align:center;'>
Ronnie The Dev
</div>
</td>
</tr>
<tr>
<td align='center' style='font-size:0px;padding:10px 25px;word-break:break-word;'>
<div style='color:#777777;font-family:Oxygen, Helvetica neue, sans-serif;font-size:14px;line-height:21px;text-align:center;'>
"""+message+"""
</div>
</td>
</tr>
</table>
</div>
<!--[if mso | IE]>
</td></tr></table>
<![endif]-->
</td>
</tr>
</tbody>
</table>
</div>
</td>
</tr>
</tbody>
</table>
<table align='center' border='0' cellpadding='0' cellspacing='0' role='presentation' style='background:#f7f7f7;background-color:#f7f7f7;width:100%;'>
<tbody>
<tr>
<td>
<div style='margin:0px auto;max-width:600px;'>
<table align='center' border='0' cellpadding='0' cellspacing='0' role='presentation' style='width:100%;'>
<tbody>
<tr>
<td style='direction:ltr;font-size:0px;padding:20px 0;text-align:center;vertical-align:top;'>
<!--[if mso | IE]>
<table role="presentation" border="0" cellpadding="0" cellspacing="0"><tr><td style="vertical-align:top;width:600px;">
<![endif]-->
<div class='dys-column-per-100 outlook-group-fix' style='direction:ltr;display:inline-block;font-size:13px;text-align:left;vertical-align:top;width:100%;'>
<table border='0' cellpadding='0' cellspacing='0' role='presentation' style='vertical-align:top;' width='100%'>
<tr>
<td align='center' style='font-size:0px;padding:10px 25px;word-break:break-word;'>
<div style='color:#4d4d4d;font-family:Oxygen, Helvetica neue, sans-serif;font-size:32px;font-weight:700;line-height:37px;text-align:center;'>
</div>
</td>
</tr>
<tr>
<td align='center' style='font-size:0px;padding:10px 25px;word-break:break-word;'>
<div style='color:#777777;font-family:Oxygen, Helvetica neue, sans-serif;font-size:14px;line-height:21px;text-align:center;'>
<strong>
RonnieTheDev|Digital Transformation Developer|
<br />
Company | Information Technology Department
<br />
| +263 777 777 777| +263 444 444 444 |
</strong>
<strong>
</strong>
<br />
</div>
</td>
</tr>
</table>
</div>
<!--[if mso | IE]>
</td></tr></table>
<![endif]-->
</td>
</tr>
</tbody>
</table>
</div>
</td>
</tr>
</tbody>
</table>
</div>
</body>
</html>
"""
part1 = MIMEText(body, 'plain')
part2 = MIMEText(html, 'html')
# msg.attach(MIMEText(body, 'plain'))
msg.attach(part1)
msg.attach(part2)
for filepath in files:
ctype, encoding = mimetypes.guess_type(filepath)
if ctype is None or encoding is not None:
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
if maintype in ['image', 'audio']:
add_attachment(filepath)
else:
baseName = os.path.basename(filepath)
att = MIMEApplication(open(filepath, 'rb').read())
att.add_header('Content-Disposition', 'attachment', filename=baseName)
msg.attach(att)
print(filepath, 'added')
server = smtplib.SMTP(host, port)
server.ehlo()
# server.set_debuglevel(3)
server.starttls()
server.login(username, password)
server.sendmail(from_address, to_address, msg.as_string())
server.quit()
print("Email has been sent")
| 5,337,310
|
def aggregate_extrema(features, Th, percentage = True) :
"""
Summary:
Function that tries to remove false minima aggregating closeby extrema
Arguments:
features - pandas series containing the extrema to be aggregated.
The series is of the form: Max, Min, Max, Max, Min, ...
Th - threshold used to remove 'false' minima
percentage - tells if the thrshold is expressed as percentage of the distance
between adjacent maxima and minima
Returns:
aggregatedFeat - pandas vector with aggregated features
"""
# Keep the first maximum and minimum
ind = [0]
# Factor used to scale the threshold depending on 'percentage'
d = 1
skipNext = False
# For each minima check if it can be merged with the right node
for ii in range(1, len(features), 3) :
if skipNext :
skipNext = False
continue
# check if are at the end of the feature vector
if ii + 2 >= len( features ) :
# Record the point which is the last in the list
ind.append(ii) # Current minima
ind.append(ii + 1) # Following maxima
break
aggregate = False
# check if the next two maxima coincide
if features[ii+1] == features[ii+2] :
# find what is lowest minimum
if features[ ii ] > features[ii + 3] :
# try to aggregate on the left
if percentage :
d = features[ii - 1] - features[ii + 3]
if (features[ii-1] > features[ii+1]) and (features[ii+1] - features[ii] < Th * d):
aggregate = True
# in this case, the point and the next 2 coincident maxima
# should not be included in the output list
else :
# try to aggregate on the right
if percentage :
d = features[ii + 4] - features[ii]
if (features[ii+4] > features[ii+2]) and (features[ii+2] - features[ii+3] < Th * d):
aggregate = True
# in this case, the point should be included but the next should not
ind.append(ii) # Current minima
ind.append(ii+4)
if ii + 5 < len(features) :
ind.append(ii+5)
skipNext = True # skip the next minima that has already been processed
if not aggregate:
# Record the point
ind.append(ii) # Current minima
ind.append(ii + 1) # Following maxima
ind.append(ii + 2) # Maxima of the next minima
# check if the last max was copied twice
if features[ind[-1]] == features[ind[-2]]:
ind.pop()
return features[ind].copy()
| 5,337,311
|
def load_embedded_frame_data(session_path, camera: str, raw=False):
"""
:param session_path:
:param camera: The specific camera to load, one of ('left', 'right', 'body')
:param raw: If True the raw data are returned without preprocessing (thresholding, etc.)
:return: The frame counter, the pin state
"""
if session_path is None:
return None, None
raw_path = Path(session_path).joinpath('raw_video_data')
# Load frame count
count_file = raw_path / f'_iblrig_{camera}Camera.frame_counter.bin'
count = np.fromfile(count_file, dtype=np.float64).astype(int) if count_file.exists() else None
if not (count is None or raw):
count -= count[0] # start from zero
# Load pin state
pin_file = raw_path / f'_iblrig_{camera}Camera.GPIO.bin'
pin_state = np.fromfile(pin_file, dtype=np.float64).astype(int) if pin_file.exists() else None
if not (pin_state is None or raw):
pin_state = pin_state > PIN_STATE_THRESHOLD
return count, pin_state
| 5,337,312
|
async def test_app_and_product(app, product):
"""Create a test app and product which can be modified in the test"""
await product.create_new_product()
await app.create_new_app()
await product.update_scopes(
[
"urn:nhsd:apim:app:level3:shared-flow-testing",
"urn:nhsd:apim:user-nhs-id:aal3:shared-flow-testing",
"urn:nhsd:apim:user-nhs-login:P5:shared-flow-testing",
"urn:nhsd:apim:user-nhs-login:P9:shared-flow-testing",
"urn:nhsd:apim:user-nhs-login:P0:shared-flow-testing",
]
)
await app.add_api_product([product.name])
await app.set_custom_attributes(
{
"jwks-resource-url": "https://raw.githubusercontent.com/NHSDigital/"
"identity-service-jwks/main/jwks/internal-dev/"
"9baed6f4-1361-4a8e-8531-1f8426e3aba8.json"
}
)
yield product, app
await app.destroy_app()
await product.destroy_product()
| 5,337,313
|
def reconstruct(edata, mwm=80.4, cme=1000):
"""
Reconstructs the momentum of the neutrino and anti-neutrino, given the
momentum of the muons and bottom quarks.
INPUT:
edata: A list containing the x, y, and z momentum in GeV of the charged leptons
and bottom quarks, in the following order:
edata := [amux, amuy, amuz, b1x, b1y, b1z, mux, muy, muz, b2x, b2y, b2z]
with notation,
amu := anti-muon
b1 := bottom quark 1*
mu := muon
b2 := bottom quark 2*
* The charge of the bottom quark is assumed to be unknown.
mwm(default=80.4): The constrained mass of the W boson in GeV.
cme(default=1000): The center of mass energy.
OUTPUT:
solutions: A list of the reconstructed neutrino and anti-neutrino
x, y, and z-momenta as a tuple, for each possible solution of p2z,
[(nux, nuy, nuz, anux, anuy, anuz), ...].
"""
assert len(edata) == 12, 'edata should have length 12.'
degree = 4 # The degree of the interpolating polynomial.
rbar_threshold = 0.95
mwm2 = mwm**2
domain_func, func1s, func2s = _getFuncs(edata, mwm2, cme)
p2z_func1 = func1s[2]
p2z_func2 = func2s[2]
solutions = []
# Find domain by finding the two roots of domain_func (quadratic).
domain = solve(domain_func, rational=False,
simplify=False, minimal=True, quadratics=True)
# Check for complex domain bounds.
if not any([d.is_real for d in domain]):
return []
domain = [float(d) for d in domain]
# Interpolate function 1 and calculate adjusted R-squared (rbar).
poly1, rbar1 = dividedDiffernce(p2z_func1, domain[0], domain[1],
deg=degree, var_name='p2z')
# Add solutions only if interpolation is a good fit.
if rbar1 > rbar_threshold:
solutions = _getSols(poly1, domain, func1s)
# Interpolate function 2 and calculate adjusted R-squared.
poly2, rbar2 = dividedDiffernce(p2z_func2, domain[0], domain[1],
deg=degree, var_name='p2z')
if rbar2 > rbar_threshold:
solutions += _getSols(poly2, domain, func2s)
# rbars = (rbar1, rbar2)
return solutions
| 5,337,314
|
def run_sql_command(query: str, database_file_path:str, unique_items=False) -> list:
"""
Returns the output of an SQL query performed on a specified SQLite database
Parameters:
query (str): An SQL query
database_file_path (str): absolute path of the SQLite database file
unique_items (bool): whether the function should return a list
of items instead of a list of tuples with one value
Returns:
records (list): The output of the SQLite database
"""
with contextlib.closing(sqlite3.connect(database_file_path)) as conn:
with conn:
with contextlib.closing(conn.cursor()) as cursor: # auto-closes
cursor.execute(query)
records = cursor.fetchall()
if unique_items:
return [x[0] for x in records]
return records
| 5,337,315
|
def group_into_profiled_intervals(records: Iterable[Reading],
interval_m: int = 30,
profile: List[float] = PROFILE_DEFAULT
):
""" Group load data into billing intervals, if larger split first
:param records: Tuple in the form of (start_date, end_date, usage)
Records must be a day or less in duration
:param interval_m: The interval length in minutes
:param profile: The profile to use to scale results
:return: Yield the split up intervals
"""
if interval_m > 60.0:
raise ValueError('Interval must be 60m or less ')
records = split_into_daily_intervals(records)
records = split_into_profiled_intervals(records, interval_m, profile)
group_records = dict()
for record in records:
start_date = record[0]
end_date = record[1]
usage = record[2]
# Check interval
rec_interval = int((end_date - start_date).total_seconds()/60)
assert rec_interval <= interval_m
# Increment dictionary value
group_end = get_group_end(end_date, interval_m)
if group_end not in group_records:
group_records[group_end] = usage
else:
group_records[group_end] += usage
# Output grouped values as list
for key in sorted(group_records.keys()):
end = key
start = end - timedelta(minutes=interval_m)
yield Reading(start, end, group_records[key], None)
| 5,337,316
|
def plot_3d_pose(pose, elev=0, azim=0, figsize=(8, 8)):
"""
Visualize a 3D skeleton.
:param pose: numpy array (3 x 17) with x, y, z coordinates with COCO keypoint format.
:param elev: Elevation angle in the z plane.
:param azim: Azimuth angle in the x, y plane.
:param figsize: Figure size.
:return: None
"""
pose = pose.flatten(order='F')
vals = np.reshape(pose, (17, -1))
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax.view_init(elev, azim)
limbs = [(0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6), (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12),
(12, 13), (8, 14), (14, 15), (15, 16)]
left_right_limb = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1])
for i, limb in enumerate(limbs):
x, y, z = [np.array([vals[limb[0], j], vals[limb[1], j]]) for j in range(3)]
if left_right_limb[i] == 0:
cc = 'blue'
elif left_right_limb[i] == 1:
cc = 'red'
else:
cc = 'black'
ax.plot(x, y, z, marker='o', markersize=2, lw=1, c=cc)
radius = 650
xroot, yroot, zroot = vals[0, 0], vals[0, 1], vals[0, 2]
ax.set_xlim3d([-radius + xroot, radius + xroot])
ax.set_zlim3d([-radius + zroot, radius + zroot])
ax.set_ylim3d([-radius + yroot, radius + yroot])
ax.set_xlabel('X')
ax.set_ylabel('Z')
ax.set_zlabel('Y')
white = (1.0, 1.0, 0.1, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
ax.w_zaxis.set_pane_color(white)
| 5,337,317
|
def zeta_nbi_nvi_ode(t, y, nb, C, nv, nb0, nbi_ss, f, g, c0, alpha, B, pv, e, R, eta, mu, nbi_norm = True):
"""
Solving the regular P0 equation using the ODE solver (changing s > 0)
t : time to solve at, in minutes
y : y[0] = nvi, y[1] = nbi, y[2] = zeta(t)
"""
F = f*g*c0
r = R*g*c0
beta = alpha*pv*nb
delta = F + alpha*nb*(1-pv)
L = 30 # protospacer length
P0 = np.exp(-mu*L)
dnvi = (-(F + alpha*nb)*y[0] + alpha*B*P0*pv*y[0]*(nb - e*y[1]))
dnbi = ((g*C - F - r)*y[1] - alpha*pv*y[1]*(nv - e*y[0]) + alpha*eta*nb0*y[0]*(1 - pv))
if nbi_norm == True:
# nbi normalized by p_0
if y[1] / (1 - y[2]) > nbi_ss:
nbi_val = nbi_ss
else:
nbi_val = y[1] / (1-y[2])
if nbi_norm == False:
# nbi not normalized by p_0, capped at nbi_ss
if y[1] > nbi_ss:
nbi_val = nbi_ss
else:
nbi_val = y[1]
# straight deterministic nbi prediction
#nbi_val = y[1]
s = (beta - delta - 2*alpha*pv*e*nbi_val) / (delta + alpha*pv*e*nbi_val)
dzeta = (beta + delta)*(1/(s + 2) + y[2]**B * (s + 1)/(s + 2) - y[2])
return dnvi, dnbi, dzeta
| 5,337,318
|
def intercalamento_listas(lista1, lista2):
""" Usando 'lista1' e 'lista2', ambas do mesmo comprimento,
crie uma nova lista composta pelo intercalamento entre as duas."""
| 5,337,319
|
def get_irsa_catalog(ra=165.86, dec=34.829694, radius=3, catalog='allwise_p3as_psd', wise=False, twomass=False):
"""Query for objects in the `AllWISE <http://wise2.ipac.caltech.edu/docs/release/allwise/>`_ source catalog
Parameters
----------
ra, dec : float
Center of the query region, decimal degrees
radius : float
Radius of the query, in arcmin
Returns
-------
table : `~astropy.table.Table`
Result of the query
"""
from astroquery.irsa import Irsa
#all_wise = 'wise_allwise_p3as_psd'
#all_wise = 'allwise_p3as_psd'
if wise:
catalog = 'allwise_p3as_psd'
elif twomass:
catalog = 'fp_psc'
coo = coord.SkyCoord(ra*u.deg, dec*u.deg)
table = Irsa.query_region(coo, catalog=catalog, spatial="Cone",
radius=radius*u.arcmin, get_query_payload=False)
return table
| 5,337,320
|
def _create_supporting_representation(model,
support_root = None,
support_uuid = None,
root = None,
title = None,
content_type = 'obj_IjkGridRepresentation'):
"""Craate a supporting representation reference node refering to an IjkGrid and optionally add to root."""
assert support_root is not None or support_uuid is not None
# todo: check that support_root is for a RESQML class that can support properties, matching content_type
if support_root is not None:
uuid = rqet.uuid_for_part_root(support_root)
if uuid is not None:
support_uuid = uuid
if title is None:
title = rqet.citation_title_for_node(support_root)
assert support_uuid is not None
if not title:
title = model.title(uuid = support_uuid)
if not title:
title = 'supporting representation'
return _create_ref_node('SupportingRepresentation', title, support_uuid, content_type = content_type, root = root)
| 5,337,321
|
def os_kernel():
"""
Get the operating system's kernel version
"""
ker = "Unknown"
if LINUX:
ker = platform.release()
elif WIN32 or MACOS:
ker = platform.version()
return ker
| 5,337,322
|
def get_inputfuncs(session, *args, **kwargs):
"""
Get the keys of all available inputfuncs. Note that we don't get
it from this module alone since multiple modules could be added.
So we get it from the sessionhandler.
"""
inputfuncsdict = dict(
(key, func.__doc__) for key, func in session.sessionhandler.get_inputfuncs().items()
)
session.msg(get_inputfuncs=inputfuncsdict)
| 5,337,323
|
def call(args, env=None, cwd=None, outputHandler=None, outputEncoding=None, timeout=None, displayName=None, options=None):
"""
Call a process with the specified args, logging stderr and stdout to the specified
output handler which will throw an exception if the exit code or output
of the process indicates an error.
NB: Consider using the CustomCommand target instead of invoking this directly whenever possible.
@param args: The command and arguments to invoke (a list, the first element of which is the executable).
None items in this list will be ignored.
@param outputHandler: a ProcessOutputHandler instance, perhaps constructed using
the L{ProcessOutputHandler.create} method. If not specified, a default is created
based on the supplied options.
@param env: Override the environment the process is started in (defaults to the parent environment)
@param cwd: Change the working directory the process is started in (defaults to the parent cwd)
@param outputEncoding: name of the character encoding the process generates. Assumed to be
getStdoutEncoding (e.g. what the terminal is using, or else UTF-8) if not specified.
@param timeout: maximum time a process is allowed to run. If an options dictionary is not
present, this should ALWAYS be set to a value e.g. options['process.timeout'].
@param displayName: human-friendly description of the process for use in error messages, including the target name if possible=
@param options: where possible, always pass in a dictionary of resolved options, which may be used to customize
how this function operates.
"""
if options is None: options = {}
if not timeout: timeout = options.get('process.timeout', 600)
processName = os.path.basename(args[0])
#if not timeout: # too many things don't set it at present
# raise Exception('Invalid argument to %s call - timeout must always be set explicitly'%processName)
args = [x for x in args if x != None]
environs = os.environ.copy()
if env:
for k in env:
if None == env[k]:
del environs[k]
else:
environs[k] = env[k]
if not cwd: cwd = os.getcwd()
log.info('Executing %s process: %s', processName, ' '.join(['"%s"'%s if ' ' in s else s for s in args]))
if cwd != os.getcwd():
log.info('%s working directory: %s', processName, cwd)
if env:
log.info('%s environment overrides: %s', processName, ', '.join(sorted(['%s=%s'%(k, env[k]) for k in env])))
try:
if cwd:
process = subprocess.Popen(args, env=environs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
else:
process = subprocess.Popen(args, env=environs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception, e:
raise EnvironmentError('Cannot start process "%s": %s'%(args[0], e))
if not outputHandler: # use short processName not longer displayName for per-line prefixes, the extra context isn't necessary anyway
outputHandler = ProcessOutputHandler.create(processName, options=options)
# give the full arguments as the process display name (unless really long) since it's impossible to identify the target otherwise
if not displayName:
displayName = str(args)
if len(displayName)>200: displayName=displayName[:200]+'...]'
(out, err, timedout) = _wait_with_timeout(process, displayName, timeout, True)
outputEncoding = outputEncoding or getStdoutEncoding()
log.debug('%s outputEncoding assumed to be: %s', processName, outputEncoding)
# probably best to be tolerant about unexpected chars, given how hard it is to predict what subprocesses will write in
out = unicode(out, outputEncoding, errors='replace')
err = unicode(err, outputEncoding, errors='replace')
hasfailed = True
try:
for l in out.splitlines():
outputHandler.handleLine(l, False)
for l in err.splitlines():
outputHandler.handleLine(l, True)
if timedout: # only throw after we've written the stdout/err
raise BuildException('Terminating process %s after hitting %d second timout' % (processName, timeout))
outputHandler.handleEnd(process.returncode) # will throw on error
hasfailed = False
return outputHandler
finally:
# easy-read format
if hasfailed:
log.debug('Arguments of failed process are: %s' % '\n '.join(['"%s"'%s if ' ' in s else s for s in args]))
| 5,337,324
|
def prettyFloat(value, roundValue=False):
"""Return prettified string for a float value.
TODO
----
add flag for round to
add test
"""
## test-cases:
# if change things her, look that they are still good (mod-dc-2d)
if roundValue and abs(round(value)-value) < 1e-4 and abs(value) < 1e3 and 0:
string = str(int(round(value, 2)))
elif abs(value) < 1e-14:
string = "0"
elif abs(value) > 1e4 or abs(value) <= 1e-3:
string = str("%.1e" % value)
elif abs(value) < 1e-2:
string = str("%.4f" % round(value, 4))
elif abs(value) < 1e-1:
string = str("%.3f" % round(value, 3))
elif abs(value) < 1e0:
string = str("%.2f" % round(value, 2))
elif abs(value) < 1e1:
string = str("%.2f" % round(value, 2))
elif abs(value) < 1e2:
string = str("%.2f" % round(value, 2))
else:
string = str("%.0f" % round(value, 2))
if string.endswith(".0"):
return string.replace(".0", "")
elif '.' in string and string.endswith(".0"):
return string[0:len(string)-1]
else:
return string
| 5,337,325
|
def make_nested_pairs_from_seq(args):
"""
Given a list of arguments, creates a list in Scheme representation
(nested Pairs).
"""
cdr = Nil()
for arg in reversed(args):
cdr = Pair(arg, cdr)
return cdr
| 5,337,326
|
def add_tc_qdisc(device, qdisc_type, parent=None, handle=None, latency_ms=None,
max_kbps=None, burst_kb=None, kernel_hz=None,
namespace=None):
"""Add/replace a TC qdisc on a device
pyroute2 input parameters:
- rate (min bw): bytes/second
- burst: bytes
- latency: us
:param device: (string) device name
:param qdisc_type: (string) qdisc type (TC_QDISC_TYPES)
:param parent: (string) qdisc parent class ('root', '2:10')
:param handle: (string, int) (required for HTB) major handler identifier
(0xffff0000, '1', '1:', '1:0') [1]
:param latency_ms: (string, int) (required for TBF) latency time in ms
:param max_kbps: (string, int) (required for TBF) maximum bandwidth in
kbits per second.
:param burst_kb: (string, int) (required for TBF) maximum bandwidth in
kbits.
:param kernel_hz: (string, int) (required for TBF) kernel HZ.
:param namespace: (string) (optional) namespace name
[1] https://lartc.org/howto/lartc.qdisc.classful.html
"""
if qdisc_type and qdisc_type not in TC_QDISC_TYPES:
raise qos_exc.TcLibQdiscTypeError(
qdisc_type=qdisc_type, supported_qdisc_types=TC_QDISC_TYPES)
args = {'kind': qdisc_type}
if qdisc_type in ['htb', 'ingress']:
if handle:
args['handle'] = str(handle).split(':')[0] + ':0'
elif qdisc_type == 'tbf':
if not latency_ms or not max_kbps or not kernel_hz:
raise qos_exc.TcLibQdiscNeededArguments(
qdisc_type=qdisc_type,
needed_arguments=['latency_ms', 'max_kbps', 'kernel_hz'])
args['burst'] = int(
_get_tbf_burst_value(max_kbps, burst_kb, kernel_hz) * 1024 / 8)
args['rate'] = int(max_kbps * 1024 / 8)
args['latency'] = latency_ms * 1000
if parent:
args['parent'] = rtnl.TC_H_ROOT if parent == 'root' else parent
priv_tc_lib.add_tc_qdisc(device, namespace=namespace, **args)
| 5,337,327
|
def print_warning(text: str) -> None:
""" Prints a warning. """
print(Style.DIM + Back.YELLOW + Fore.BLACK + "[/!\\] " + text)
print(Style.RESET_ALL, end="")
| 5,337,328
|
def get_aug_assign_symbols(code):
"""Given an AST or code string return the symbols that are augmented
assign.
Parameters
----------
code: A code string or the result of an ast.parse.
"""
if isinstance(code, str):
tree = ast.parse(code)
else:
tree = code
n = AugAssignLister()
n.visit(tree)
return n.names
| 5,337,329
|
def stack_dict(state: Dict[Any, tf.Tensor]) -> tf.Tensor:
"""Stack a dict of tensors along its last axis."""
return tf.stack(sorted_values(state), axis=-1)
| 5,337,330
|
def compact_float(n, max_decimals=None):
"""Reduce a float to a more compact value.
Args:
n: Floating point number.
max_decimals: Maximum decimals to keep; defaults to None.
Returns:
An integer if `n` is essentially an integer, or a string
representation of `n` reduced to `max_decimals` numbers after
the decimal point. Otherwise, simply returns `n`.
"""
compact = n
if float(n).is_integer():
compact = int(n)
elif max_decimals is not None:
compact = "{0:.{1}f}".format(n, max_decimals)
return compact
| 5,337,331
|
def resample_2d(X, resolution):
"""Resample input data for efficient plotting.
Parameters:
-----------
X : array_like
Input data for clustering.
resolution : int
Number of "pixels" for 2d histogram downscaling.
Default 'auto' downscales to 200x200 for >5000
samples, and no downscaling for <=5000 samples.
Returns:
--------
xx[mask] : array_like
Rescaled x meshgrid.
yy[mask] : array_like
Rescaled y meshgrid.
"""
x, y = X[:,0], X[:,1]
nbins = np.ptp(X, axis=0) / resolution
hh, locx, locy = np.histogram2d(x, y, bins=np.ceil(nbins).astype('int'))
xwidth, ywidth = np.diff(locx).mean(), np.diff(locy).mean()
mask = hh != 0
locx = locx[:-1] + xwidth
locy = locy[:-1] + ywidth
yy, xx = np.meshgrid(locy, locx)
np.random.seed(0)
yy += np.random.uniform(-xwidth/2, xwidth/2, size=hh.shape)
xx += np.random.uniform(-ywidth/2, ywidth/2, size=hh.shape)
return xx[mask], yy[mask]
| 5,337,332
|
def is_mocked(metric_resource: MetricResource) -> bool:
"""
Is this metrics a mocked metric or a real metric?
"""
return metric_resource.spec.mock is not None
| 5,337,333
|
def negative_embedding_subtraction(
embedding: np.ndarray,
negative_embeddings: np.ndarray,
faiss_index: faiss.IndexFlatIP,
num_iter: int = 3,
k: int = 10,
beta: float = 0.35,
) -> np.ndarray:
"""
Post-process function to obtain more discriminative image descriptor.
Parameters
----------
embedding : np.ndarray of shape (n, d)
Embedding to be subtracted.
negative_embeddings : np.ndarray of shape (m, d)
Negative embeddings to be subtracted.
faiss_index : faiss.IndexFlatIP
Index to be used for nearest neighbor search.
num_iter : int, optional
Number of iterations. The default is 3.
k : int, optional
Number of nearest neighbors to be used for each iteration. The default is 10.
beta : float, optional
Parameter for the weighting of the negative embeddings. The default is 0.35.
Returns
-------
np.ndarray of shape (n, d)
Subtracted embedding.
"""
for _ in range(num_iter):
_, topk_indexes = faiss_index.search(embedding, k=k) # search for hard negatives
topk_negative_embeddings = negative_embeddings[topk_indexes]
embedding -= (topk_negative_embeddings.mean(axis=1) * beta) # subtract by hard negative embeddings
embedding /= np.linalg.norm(embedding, axis=1, keepdims=True) # L2-normalize
return embedding.astype('float32')
| 5,337,334
|
def op_conv_map(operator):
"""Convert operator or return same operator"""
return OPERATOR_CONVERSION.get(operator, operator)
| 5,337,335
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo Calendar platform."""
calendar_data_future = DemoGoogleCalendarDataFuture()
calendar_data_current = DemoGoogleCalendarDataCurrent()
add_entities(
[
DemoGoogleCalendar(hass, calendar_data_future, "Calendar 1"),
DemoGoogleCalendar(hass, calendar_data_current, "Calendar 2"),
]
)
| 5,337,336
|
def getGMapKey():
"""Return value for <gmapKey/> configuration parameter."""
return sciflo.utils.ScifloConfigParser().getParameter('gmapKey')
| 5,337,337
|
def agent_action(tid: uuid.UUID, name: str, action: AgentAction) -> None:
"""
Execute an action on an agent
:param tid: The environment this agent is defined in.
:param name: The name of the agent.
:param action: The type of action that should be executed on an agent.
* pause: A paused agent cannot execute any deploy operations.
* unpause: A unpaused agent will be able to execute deploy operations.
:raises Forbidden: The given environment has been halted.
"""
| 5,337,338
|
def macromodel_optimisation(
precursors, optimiser, temp_dir, collection_name, db_name, db_url
):
"""
Optimises cages and returns the modified cage.
Args:
precursors: SMILES of precursors to be optimised.
optimise: Optimiser callable to use.
temp_dir: Path of temporary directory to optimise files in.
collection_name: Name of the collection in the MongoDB database.
db_name: Name of the MongoDB database.
"""
aldehyde, amine = precursors
key = get_key(aldehyde, amine)
cage = stk.ConstructedMolecule(
(
stk.BuildingBlock(amine, functional_groups=["primary_amine"]),
stk.BuildingBlock(aldehyde, functional_groups=["aldehyde"]),
),
topology_graph=stk.cage.FourPlusSix(),
)
# Establishing connection to MongoDB must be done in each child process.
client = MongoClient(db_url)
db = client[db_name]
run_name = str(uuid4().int)
workdir = os.getcwd()
if temp_dir:
# Move to temp_dir before optimisation starts.
os.chdir(temp_dir)
# Checking for molecules in the cache.
try:
cached_mol = db[collection_name].find_one({"_id": key})
except ServerSelectionTimeoutError:
logger.error("Cannot connect to MongoDB.")
return
if cached_mol is not None:
# If cached molecule exists, do not need to optimise.
logger.info(f"{cage} in database.")
else:
try:
optimiser.optimize(cage)
# Catch exceptions from optimising.
except Exception as err:
logger.error(err)
os.chdir(workdir)
return
# Return to original directory.
os.chdir(workdir)
# Write the cage once optimised.
try:
# Try to store the molecule in the database.
store_mol(key=key, mol=cage, db=db, collection_name=collection_name)
logger.info("Finished optimisation. Appending to MongoDB.")
# Catch exceptions from appending to the database.
# Continue to dump if an error occurs here.
except Exception as err:
logger.error(err)
logger.error("An error occurred. Exiting the optimisations.")
return
# Dump the cages, even if they can't be
# added to the database.
logger.debug(f"Dumping {cage}.")
cage.dump(f"opt_{run_name}.json")
cage.write(f"opt_{run_name}.mol")
# If dumped, update the molecule identifier.
db[collection_name].update_one(
{"_id": key}, {"$set": {"identifier": run_name}}
)
logger.info(f"Finished optimisation for {cage}.")
| 5,337,339
|
def get_stored_credentials():
"""
Gets the credentials, username and password, that have been stored in
~/.shrew/config.ini and the secure keychain respectively without bothering
to prompt the user if either credential cannot be found.
:returns: username and password
:rtype: tuple of str
"""
with load_config(sections=AUTH_SECTIONS, defaults=AUTH_CONFIG_DEFAULTS) as config:
username = config.get(AUTH_SECTION, 'username')
if not username:
# if we don't have a username then we cannot lookup the password
return None, None
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if has_keychain:
try:
password = keyring.get_password(AUTH_SECTION, username)
return username, password
except Exception as e:
log.warn("Unable to get password from keyring. Continuing..")
log.debug(e)
return username, None
| 5,337,340
|
async def main(
urlfile,
outfile: IO[str],
gateway_node: SSHNode,
client_node: Sequence[SSHNode],
n_clients: int,
checkpoint_dir: Optional[pathlib.Path],
gateway_endpoint: Optional[str],
**kwargs
):
"""Script entry pooint."""
logging.basicConfig(
format='[%(asctime)s] %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
_LOGGER.info("Running script.")
# Either we have a 1 repetiton per node, or multiple client nodes
client_node = list(client_node) or [SSHNode(), ]
assert n_clients == 1 or len(client_node) == 1
if len(client_node) == 1 and n_clients > 1:
for _ in range(n_clients - 1):
client_node.append(SSHNode(client_node[0].node_address))
urls = [url for (url, ) in urlfile]
gateway_endpoint = gateway_endpoint or gateway_node.public_ip
experiment = TraceCollectionExperiment(
gateway_node, client_node, urls, checkpoint_dir=checkpoint_dir,
gateway_endpoint=gateway_endpoint, **kwargs,
)
checkpoint_filenames = await experiment.run()
sh.cat(checkpoint_filenames, _out=outfile)
experiment.clear_checkpoint()
| 5,337,341
|
def _ro_hmac(msg, h=None):
"""Implements random oracle H as HMAC-SHA256 with the all-zero key.
Input is message string and output is a 32-byte sequence containing the HMAC
value.
Args:
msg: Input message string.
h: An optional instance of HMAC to use. If None a new zeroed-out instance
will be used.
Returns:
bytes: Random Oracle output (32 bytes).
"""
if h is None:
h = ZERO_HMAC.copy()
h.update(msg)
return h.digest()
| 5,337,342
|
def test_person__DeletePersonForm__3(person_data, browser, loginname):
"""It cannot be accessed by some roles."""
browser.login(loginname)
browser.assert_forbidden(browser.PERSON_DELETE_URL)
| 5,337,343
|
def get_unique_slug(model_instance, sluggable_field_name, slug_field_name):
"""
Takes a model instance, sluggable field name (such as 'title') of that
model as string, slug field name (such as 'slug') of the model as string;
returns a unique slug as string.
"""
slug = slugify(getattr(model_instance, sluggable_field_name))
unique_slug = slug
extension = 1
ModelClass = model_instance.__class__
while ModelClass._default_manager.filter(
**{slug_field_name: unique_slug}
).exists():
unique_slug = '{}-{}'.format(slug, extension)
extension += 1
return unique_slug
| 5,337,344
|
def test_SFA(noise_dataset):
"""Test that a SFA model can be fit with no errors.
"""
X = noise_dataset
model = SFA(n_components=3)
model.fit(X)
model.transform(X)
model.fit_transform(X)
| 5,337,345
|
def where(ctx):
"""Print path to data directory."""
log.debug('chemdataextractor.data.where')
click.echo(get_data_dir())
| 5,337,346
|
def get_precomputed_features(source_dataset, experts):
"""Get precomputed features from a set of experts and a dataset.
Arguments:
source_dataset: the source dataset as an instance of Base Dataset.
experts: a list of experts to use precomputed features from
Returns: A list of dicts, where each dict maps from video id to precomputed
features.
"""
precomputed_features = []
for expert in experts:
processed_expert_features = {}
expert_features = cache.get_cached_features_by_expert_and_dataset(
source_dataset, expert)
for video_id, expert_value in expert_features.items():
video_expert_features = None
missing_modalities = False
expert_value = expert.feature_transformation(expert_value)
if is_expert_value_missing(expert_value):
video_expert_features = np.zeros(
expert.embedding_shape, np.float32)
missing_modalities = True
else:
expert_value = expert_value.astype(np.float32)
if expert.constant_length:
video_expert_features = expert_value
else:
video_expert_features = zero_pad_expert_features(
expert, expert_value)
processed_expert_features[video_id] = (
video_expert_features,
missing_modalities
)
precomputed_features.append(processed_expert_features)
return precomputed_features
| 5,337,347
|
def _local_distribution():
"""
获取地区分布
"""
data = {}
all_city = Position.objects.filter(status=1).values_list("district__province__name", flat=True)
value_count = pd.value_counts(list(all_city)).to_frame()
df_value_counts = pd.DataFrame(value_count).reset_index()
df_value_counts.columns = ['name', 'counts']
df_value_counts["name"] = df_value_counts['name'].str.slice(0, 2)
data["count"] = all_city.__len__()
all_data = df_value_counts.values
range_max = (df_value_counts["counts"].values[0] // 100 + 2) * 100
render_data = [(item[0], item[1]) for item in all_data]
maps = Map(init_opts=PYECHARTS_INIT_OPTS)
maps.add(series_name="总览", data_pair=render_data, zoom=1.2, is_map_symbol_show=False,
itemstyle_opts=opts.series_options.ItemStyleOpts(area_color="#ddd", border_color="#eee",
border_width=.5), ).set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=int(range_max)))
return maps.render_embed()
| 5,337,348
|
def paradigm_filler(shared_datadir) -> ParadigmFiller:
"""
hese layout, paradigm, and hfstol files are **pinned** test data;
the real files in use are hosted under res/ folder, and should not
be used in tests!
"""
return ParadigmFiller(
shared_datadir / "layouts",
shared_datadir / "crk-normative-generator.hfstol",
)
| 5,337,349
|
def apply_scaling(data, dicom_headers):
"""
Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
:param dicom_headers: dicom headers to use to retreive the scaling factors
:param data: the input data
"""
# Apply the rescaling if needed
private_scale_slope_tag = Tag(0x2005, 0x100E)
private_scale_intercept_tag = Tag(0x2005, 0x100D)
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \
or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers:
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers:
rescale_slope = dicom_headers.RescaleSlope
if 'RescaleIntercept' in dicom_headers:
rescale_intercept = dicom_headers.RescaleIntercept
# try:
# # this section can sometimes fail due to unknown private fields
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# except:
# pass
return do_scaling(data, rescale_slope, rescale_intercept)
else:
return data
| 5,337,350
|
def fprint(*objects, color='', style='', **kwargs):
"""Print text with fancy colors and formatting
Parameters
----------
*objects
objects to print
color : str
one- or two-character string specifying color:
r (red)
dr (dark red)
hr (highlight red)
g (green)
dg (dark green)
hg (highlight green)
y (yellow)
dy (dark yellow)
hy (highlight yellow)
b (blue)
db (dark blue)
hb (highlight blue)
m (magenta)
dm (dark magenta)
hm (highlight magenta)
c (cyan)
dc (dark cyan)
hc (highlight cyan)
style : str
one- or two-character string specifying style
b (bold)
u (underline)
**kwargs
other arguments passed to print()
"""
print(
*(
'{}{}{}\033[0m'.format(
COLOR_DICT[color] if color else '',
''.join(STYLE_DICT[char] for char in style),
object
)
for
object
in
objects
),
**kwargs
)
| 5,337,351
|
def test_1_1_FConnectionResetError():
"""
Tests formatting a FConnectionResetError exception with adjusted traceback.
"""
with pytest.raises(Exception) as excinfo:
exc_args = {
'main_message': 'Problem with the construction project.',
'expected_result': 'A door',
'returned_result': 'A window',
'suggested_resolution': 'Call contractor',
}
raise FConnectionResetError(message_args=exc_args,
tb_limit=None,
tb_remove_name='test_1_1_FConnectionResetError')
assert 'Module: python' in str(excinfo.value)
assert 'Name: pytest_pyfunc_call' in str(excinfo.value)
assert 'Line: 183' in str(excinfo.value)
| 5,337,352
|
def allowed_couplings(coupling, flow, free_id, symmetries):
"""Iterator over all the allowed Irreps for free_id in coupling if the
other two couplings are fixed.
"""
from itertools import product
if len(coupling) != 3:
raise ValueError(f'len(coupling) [{len(coupling)}] != 3')
if len(flow) != 3:
raise ValueError(f'len(flow) [{len(flow)}] != 3')
other_ids = [0, 1, 2]
other_ids.remove(free_id)
other_c = [coupling[o] for o in other_ids]
other_f = [flow[o] for o in other_ids]
this_f = flow[free_id]
def fermionic_constraint(oirr, oflow, tflow):
yield sum(oirr) % 2
def U1_constraint(oirr, oflow, tflow):
sign = {True: 1, False: -1}
yield sign[not tflow] * sum(sign[f] * x for x, f in zip(oirr, oflow))
def pg_constraint(oirr, oflow, tflow):
yield oirr[0] ^ oirr[1]
def SU2_constraint(oirr, oflow, tflow):
return range(abs(oirr[0] - oirr[1]), oirr[0] + oirr[1] + 1, 2)
constraint = {
'fermionic': fermionic_constraint,
'U(1)': U1_constraint,
'SU(2)': SU2_constraint,
'seniority': U1_constraint,
'C1': pg_constraint,
'Ci': pg_constraint,
'C2': pg_constraint,
'Cs': pg_constraint,
'D2': pg_constraint,
'C2v': pg_constraint,
'C2h': pg_constraint,
'D2h': pg_constraint
}
for ncoupling in product(*[constraint[s](c, other_f, this_f)
for *c, s in zip(*other_c, symmetries)]):
yield ncoupling
| 5,337,353
|
def poly_iou(poly1, poly2, thresh=None):
"""Compute intersection-over-union for two GDAL/OGR geometries.
Parameters
----------
poly1:
First polygon used in IOU calc.
poly2:
Second polygon used in IOU calc.
thresh: float or None
If not provided (default), returns the float IOU for the two polygons.
If provided, return True if the IOU met this threshold. Otherwise,
False.
Returns
-------
IOU: float or bool
Return the IOU value if `thresh` is None, otherwise boolean if the
threshold value was met.
"""
poly1 = ogr.CreateGeometryFromWkb(poly1)
poly2 = ogr.CreateGeometryFromWkb(poly2)
if not poly1.Intersects(poly2):
return False
intersection_area = poly1.Intersection(poly2).Area()
#intersection_area = intersection.Area()
union_area = poly1.Union(poly2).Area()
#union_area = union.Area()
# If threshold was provided, return if IOU met the threshold
if thresh is not None:
return (intersection_area / union_area) >= thresh
return intersection_area / union_area
| 5,337,354
|
def plotCorrHeatmap(mat, fout):
"""
Correlation heatmap plot for two samples correlation.
"""
#fig, ax = pylab.subplots(
cmap = sns.diverging_palette(250, 15, s=75, l=40, n=11).as_hex()
cmap[int(len(cmap) / 2)] = "#FFFFFF"
cmap = ListedColormap(cmap)
g = sns.clustermap(
mat,
xticklabels=False,
yticklabels=True,
square=True,
center=0,
linewidths=0.0,
cmap=cmap,
figsize=(0.5 * mat.shape[1], 0.5 * mat.shape[1]),
annot=True,
fmt=".3f",
annot_kws={
"size": "3",
'label': "PCC",
},
)
pylab.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pylab.savefig(fout)
| 5,337,355
|
def score_file(filename):
"""Score each line in a file and return the scores."""
# Prepare model.
hparams = create_hparams()
encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir)
has_inputs = "inputs" in encoders
# Prepare features for feeding into the model.
if has_inputs:
inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.
targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.
if has_inputs:
features = {"inputs": batch_inputs, "targets": batch_targets}
else:
features = {"targets": batch_targets}
# Prepare the model and the graph when model runs on features.
model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL)
_, losses = model(features)
saver = tf.train.Saver()
with tf.Session() as sess:
# Load weights from checkpoint.
if FLAGS.checkpoint_path is None:
ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir)
ckpt = ckpts.model_checkpoint_path
else:
ckpt = FLAGS.checkpoint_path
saver.restore(sess, ckpt)
# Run on each line.
with tf.gfile.Open(filename) as f:
lines = f.readlines()
results = []
for line in lines:
tab_split = line.split("\t")
if len(tab_split) > 2:
raise ValueError("Each line must have at most one tab separator.")
if len(tab_split) == 1:
targets = tab_split[0].strip()
else:
targets = tab_split[1].strip()
inputs = tab_split[0].strip()
# Run encoders and append EOS symbol.
targets_numpy = encoders["targets"].encode(
targets) + [text_encoder.EOS_ID]
if has_inputs:
inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID]
# Prepare the feed.
if has_inputs:
feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy}
else:
feed = {targets_ph: targets_numpy}
# Get the score.
np_loss = sess.run(losses["training"], feed)
results.append(np_loss)
return results
| 5,337,356
|
def doi_responses():
"""Responses for doi.org requests."""
import responses
from renku.core.commands.providers.dataverse import DATAVERSE_API_PATH, DATAVERSE_VERSION_API
from renku.core.commands.providers.doi import DOI_BASE_URL
with responses.RequestsMock(assert_all_requests_are_fired=False) as response:
def doi_callback(request):
response_url = "https://dataverse.harvard.edu/citation" "?persistentId=doi:10.11588/data/yyxx1122"
if "zenodo" in request.url:
response_url = "https://zenodo.org/record/3363060"
return (
200,
{"Content-Type": "application/json"},
json.dumps(
{
"type": "dataset",
"id": request.url,
"author": [{"family": "Doe", "given": "John"}],
"contributor": [{"contributorType": "ContactPerson", "family": "Doe", "given": "John"}],
"issued": {"date-parts": [[2019]]},
"abstract": "Test Dataset",
"DOI": "10.11588/data/yyxx1122",
"publisher": "heiDATA",
"title": "dataset",
"URL": response_url,
}
),
)
response.add_callback(
method="GET", url=re.compile("{base_url}/.*".format(base_url=DOI_BASE_URL)), callback=doi_callback
)
def version_callback(request):
return (
200,
{"Content-Type": "application/json"},
json.dumps({"status": "OK", "data": {"version": "4.1.3", "build": "abcdefg"}}),
)
base_url = "https://dataverse.harvard.edu"
url_parts = list(urllib.parse.urlparse(base_url))
url_parts[2] = pathlib.posixpath.join(DATAVERSE_API_PATH, DATAVERSE_VERSION_API)
pattern = "{url}.*".format(url=urllib.parse.urlunparse(url_parts))
response.add_callback(method="GET", url=re.compile(pattern), callback=version_callback)
yield response
| 5,337,357
|
def order_result():
"""
Get a specific order
"""
raise NotImplemented
| 5,337,358
|
def lat_lng_to_tile_xy(latitude, longitude, level_of_detail):
"""gives you zxy tile coordinate for given latitude, longitude WGS-84 coordinates (in decimal degrees)
"""
x, y = lat_lng_to_pixel_xy(latitude, longitude, level_of_detail)
return pixel_xy_to_tile_xy(x, y)
| 5,337,359
|
def test_arithmetic_simplify_05():
""" test_arithmetic_simplify_05 """
x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
res = arithmetic_simplify_05(x)
expect = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32)
assert np.all(res.asnumpy() == expect)
| 5,337,360
|
def update_inverse_jacobian(previous_inv_jac, dx, df, threshold=0, modify_in_place=True):
"""
Use Broyden method (following Numerical Recipes in C, 9.7) to update inverse Jacobian
current_inv_jac is previous inverse Jacobian (n x n)
dx is delta x for last step (n)
df is delta errors for last step (n)
"""
dot_dx_inv_j = np.dot(dx, previous_inv_jac)
denom = np.dot(dot_dx_inv_j, df)
if abs(threshold) <= 0:
threshold = MIN_DENOM
if abs(denom) < threshold:
return previous_inv_jac, False
if modify_in_place:
previous_inv_jac += np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom
result = previous_inv_jac
else:
result = previous_inv_jac + np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom
return result, True
| 5,337,361
|
def _subsets_recursive(items, size, begin_index=0):
"""Recursize subset enumeration.
Args:
items: the list of items. Must be indexable.
size: the current subset size.
begin_index: the index to begin at.
Yields:
all subsets of "items" of size == "size" starting from
index "begin_index." e.g. subsets([1,2,3], 2) = [[1,2],[1,3],[2,3]]
"""
if size == 0:
yield []
elif size == 1:
for x in items[begin_index:]:
yield [x]
else:
sets = []
for i in xrange(begin_index, len(items)-1):
x = items[i]
for y in _subsets_recursive(items, size-1, begin_index=i+1):
y.append(x)
sets.append(y)
for s in sets:
yield s
| 5,337,362
|
def _ConvertStack(postfix):
"""Convert postfix stack to infix string.
Arguments:
postfix: A stack in postfix notation. The postfix stack will be modified
as elements are being popped from the top.
Raises:
ValueError: There are not enough arguments for functions/operators.
Returns:
A string of the infix represetation of the stack.
"""
if not postfix:
raise bigquery_client.BigqueryInvalidQueryError(
'Not enough arguments.', None, None, None)
top = postfix.pop()
if isinstance(top, util.OperatorToken):
args = []
for unused_i in range(top.num_args):
args.append(_ConvertStack(postfix))
args.reverse()
if top.num_args == 1:
return '%s %s' % (str(top), args[0])
else:
return '(%s %s %s)' % (args[0], str(top), args[1])
elif isinstance(top, util.BuiltInFunctionToken):
func_name = str(top)
if func_name in _ZERO_ARGUMENT_FUNCTIONS:
return '%s()' % func_name
elif func_name in _ONE_ARGUMENT_FUNCTIONS:
op = _ConvertStack(postfix)
return '%s(%s)' % (func_name, op)
elif func_name in _TWO_ARGUMENT_FUNCTIONS:
op2 = _ConvertStack(postfix)
op1 = _ConvertStack(postfix)
return '%s(%s, %s)' % (top, op1, op2)
elif func_name in _THREE_ARGUMENT_FUNCTIONS:
op3 = _ConvertStack(postfix)
op2 = _ConvertStack(postfix)
op1 = _ConvertStack(postfix)
return '%s(%s, %s, %s)' % (top, op1, op2, op3)
else:
raise bigquery_client.BigqueryInvalidQueryError(
'Function %s does not exist.' % str(top), None, None, None)
elif isinstance(top, util.AggregationFunctionToken):
num_args = top.num_args
func_name = str(top)
ops = []
for unused_i in range(int(num_args)):
ops.append(_ConvertStack(postfix))
ops.reverse()
if func_name == 'DISTINCTCOUNT':
func_name = 'COUNT'
ops[0] = 'DISTINCT ' + ops[0]
ops = [str(op) for op in ops]
return func_name + '(' + ', '.join(ops) + ')'
elif not isinstance(top, basestring):
return str(top)
else:
return top
| 5,337,363
|
def count_repeats_for_motif(seq, motif, tally, intervals=None):
"""
seq --- plain sequence to search for the repeats (motifs)
motif --- plain sequence of repeat, ex: CGG, AGG
intervals --- 0-based start, 1-based end of Intervals to search motif in
"""
if intervals is None: # use the whole sequence
intervals = [Interval(0, len(seq))]
new_intl = []
for intl in intervals:
cur = seq[intl.start:intl.end]
prev_end = intl.start
found_flag = False
for m in re.finditer(motif, cur):
tally[motif].append(intl.start + m.start())
if m.start() > prev_end:
# new interval is prev_end (0-based), m.start() (1-based)
new_intl.append(Interval(prev_end, intl.start + m.start()))
prev_end = intl.start + m.end()
found_flag = True
if not found_flag:
new_intl.append(intl)
return new_intl
| 5,337,364
|
def get_oauth2_token(session: requests.Session, username: str, password: str):
"""Hackily get an oauth2 token until I can be bothered to do this correctly"""
params = {
'client_id': OAUTH2_CLIENT_ID,
'response_type': 'code',
'access_type': 'offline',
'redirect_uri': OAUTH2_REDIRECT_URI,
}
r1 = session.get(f'{LOGIN_URL}/oauth2/auth', params=params)
email_regex = (
r'^\s*(\w+(?:(?:-\w+)|(?:\.\w+)|(?:\+\w+))*\@'
r'[A-Za-z0-9]+(?:(?:\.|-)[A-Za-z0-9]+)*\.[A-Za-z0-9][A-Za-z0-9]+)\s*$'
)
clean_username = re.sub(email_regex, r'\1', username)
etr = etree.HTML(r1.text)
post_data = {
i.attrib['name']: i.attrib['value']
for i in etr.xpath("//form[@id = 'frmsignin']//input")
if 'value' in i.keys()
}
post_data['username'] = clean_username
post_data['password'] = password
r2 = session.post(f'{LOGIN_URL}/oauth2/g_authenticate', data=post_data, allow_redirects=False)
code = parse_qs(urlparse(r2.headers['Location']).query)['code'][0]
r3 = session.post(
f'{LOGIN_URL}/oauth2/token',
data={
'code': code,
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
'redirect_uri': OAUTH2_REDIRECT_URI,
'grant_type': 'authorization_code',
}, auth=(OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET)
)
oauth_token = r3.json()
try:
session.headers.update({'Authorization': 'Bearer ' + oauth_token['access_token']})
except KeyError:
# TODO: make this better
raise GeAuthError(f'Failed to get a token: {oauth_token}')
return oauth_token
| 5,337,365
|
def main():
""" call zopeedit as a lib
"""
args = sys.argv
input_file=''
if '--version' in args or '-v' in args:
credits = ('Zope External Editor %s\n'
'By atReal\n'
'http://www.atreal.net') % __version__
messageDialog(credits)
sys.exit(0)
if '--help' in args or '-h' in args:
# Open the VERSION file for reading.
try:
f=open(os.path.join(system_path,'docs','README.txt'), 'r')
except IOError:
# zopeedit is not properly installed : try uninstalled path
f=open(os.path.join(system_path,'..','..','README.txt'), 'r')
README = f.readlines()
f.close()
messageScrolledText(README)
sys.exit(0)
if len(sys.argv)>=2:
input_file = sys.argv[1]
try:
ExternalEditor(input_file).launch()
except (KeyboardInterrupt, SystemExit):
pass
except:
fatalError(sys.exc_info()[1])
else:
ExternalEditor().editConfig()
| 5,337,366
|
def press_level(pressure, heights, plevels, no_time=False):
"""
Calculates geopotential heights at a given pressure level
Parameters
----------
pressure : numpy.ndarray
The 3-D pressure field (assumes time dimension, turn off
with `no_time=True`)
heights : numpy.ndarray
The 3-D array of gridbox heights
plevels : list
List of pressure levels to interpolate to
no_time=False: bool
Optional, set to `True` to indicate lack of time dimension.
Returns
-------
press_height : numpy.ndarray
The geopotential heights at the specified pressure levels
"""
if no_time is False:
try:
tlen, zlen, ylen, xlen = pressure.shape
press_height = np.zeros((tlen, ylen, xlen))
for t in range(0, tlen):
for x in range(0, xlen):
for y in range(0, ylen):
press_height[t, y, x] =\
log_interpolate_1d(plevels, pressure[t, :, y, x],
heights[:, y, x])
except ValueError:
print("Error in dimensions, trying with no_time=True")
no_time = True
elif no_time is True:
try:
xlen, ylen, xlen = pressure.shape
press_height = np.zeros((ylen, xlen))
for x in range(0, xlen):
for y in range(0, ylen):
press_height[t, y, x] =\
log_interpolate_1d(plevels, pressure[t, :, y, x],
heights[:, y, x])
except ValueError:
print("Error in dimensions")
return press_height
| 5,337,367
|
def setup_ks_indexer():
"""
KS_INDEXER
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cluster = api.get_cluster(cmx.cluster_name)
service_type = "KS_INDEXER"
if cdh.get_service_type(service_type) is None:
print "> %s" % service_type
service_name = "ks_indexer"
print "Create %s service" % service_name
cluster.create_service(service_name, service_type)
service = cluster.get_service(service_name)
hosts = management.get_hosts()
# Service-Wide
service.update_config(cdh.dependencies_for(service))
# Pick 1 host to deploy Lily HBase Indexer Default Group
cdh.create_service_role(service, "HBASE_INDEXER", random.choice(hosts))
# HBase Service-Wide configuration
hbase = cdh.get_service_type('HBASE')
hbase.stop()
hbase.update_config({"hbase_enable_indexing": True, "hbase_enable_replication": True})
hbase.start()
# This service is started later on
# check.status_for_command("Starting Lily HBase Indexer Service", service.start())
| 5,337,368
|
def test_get_result(mock_send_message):
"""Test Dmaap's class method."""
OranDmaap.get_result()
mock_send_message.assert_called_once_with('GET',
'Get result from previous request',
(f"{BASE_URL}/events/A1-POLICY-AGENT-WRITE/users/policy-agent?timeout=15000&limit=100"))
| 5,337,369
|
def valtoindex(thearray, thevalue, evenspacing=True):
"""
Parameters
----------
thearray: array-like
An ordered list of values (does not need to be equally spaced)
thevalue: float
The value to search for in the array
evenspacing: boolean, optional
If True (default), assume data is evenly spaced for faster calculation.
Returns
-------
closestidx: int
The index of the sample in thearray that is closest to val
"""
if evenspacing:
limval = np.max([thearray[0], np.min([thearray[-1], thevalue])])
return int(np.round((limval - thearray[0]) / (thearray[1] - thearray[0]), 0))
else:
return (np.abs(thearray - thevalue)).argmin()
| 5,337,370
|
def get_converter(result_format, converters=None):
"""
Gets an converter, returns the class and a content-type.
"""
converters = get_default_converters() if converters is None else converters
if result_format in converters:
return converters.get(result_format)
else:
raise ValueError('No converter found for type {}'.format(result_format))
| 5,337,371
|
def _deduce_ConstantArray(
self: ast.ConstantArray, ctx: DeduceCtx) -> ConcreteType: # pytype: disable=wrong-arg-types
"""Deduces the concrete type of a ConstantArray AST node."""
# We permit constant arrays to drop annotations for numbers as a convenience
# (before we have unifying type inference) by allowing constant arrays to have
# a leading type annotation. If they don't have a leading type annotation,
# just fall back to normal array type inference, if we encounter a number
# without a type annotation we'll flag an error per usual.
if self.type_ is None:
return _deduce_Array(self, ctx)
# Determine the element type that corresponds to the annotation and go mark
# any un-typed numbers in the constant array as having that type.
concrete_type = deduce(self.type_, ctx)
if not isinstance(concrete_type, ArrayType):
raise TypeInferenceError(
self.type_.span, concrete_type,
f'Annotated type for array literal must be an array type; got {concrete_type.get_debug_type_name()} {self.type_}'
)
element_type = concrete_type.get_element_type()
for member in self.members:
assert ast.is_constant(member)
if isinstance(member, ast.Number) and not member.type_:
ctx.type_info[member] = element_type
_check_bitwidth(member, element_type)
# Use the base class to check all members are compatible.
_deduce_Array(self, ctx)
return concrete_type
| 5,337,372
|
def _to_bool(s):
"""Convert a value into a CSV bool."""
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise ValueError('String cannot be converted to bool')
| 5,337,373
|
def register(*args, cache_default=True):
"""
Registers function for further caching its calls and restoring source.
Example:
``` python
@register
def make_ohe_pclass(df):
...
```
"""
def __register(func):
# if source_utils.source_is_saved(func) and not source_utils.matches_cache(func):
if func.__name__ + '_fc' in cache.cached_objs() and source_utils.get_source(func) != cache.load_obj(func.__name__ + '_fc').source:
raise NameError("A function with the same name is already registered")
if func.__name__ + '_fc' in cache.cached_objs():
return cache.load_obj(func.__name__ + '_fc')
else:
functor = FeatureConstructor(func, cache_default)
cache.cache_obj(functor, functor.__name__ + '_fc')
return functor
if args:
function = args[0]
return __register(function)
else:
return __register
| 5,337,374
|
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''(["|']{0}.*?["|'])'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur, '"%s"' % (
pathname2fileurl(x['root']) +
occur[1 + len(x['url']): -1]))
return content
| 5,337,375
|
def social_distancing_start_40():
"""
Real Name: b'social distancing start 40'
Original Eqn: b'31'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 31
| 5,337,376
|
def _initialize_project(stolos_url, project):
"""
Initialize a Stolos project with the needed files, using the response from
the server.
"""
config.update_project_config(
{
"project": {
"uuid": project["uuid"],
"stack": project["stack"]["slug"] if project["stack"] else None,
"public-url": project["routing_config"]["domain"],
"subdomains": project["routing_config"]["config"]["subdomains"],
},
"user": {"default-api-server": stolos_url},
"server": {"host": project["server"]["host"]},
}
)
with open(".stolos/ca.pem", "w+") as ca_pem:
ca_pem.write(project["server"]["docker_ca_pem"])
os.chmod(".stolos/ca.pem", 0o600)
if project["stack"]:
with open("docker-compose.yaml", "w+") as docker_compose:
docker_compose.write(project["stack"]["docker_compose_file"])
with open(".stolos/default.prf", "w+") as default_profile:
default_profile.write(
"""
# Default unison profile for UNIX systems
include common
"""
)
with open(".stolos/win.prf", "w+") as windows_profile:
windows_profile.write(
"""
# Unison profile for Windows systems
perms = 0
include common
"""
)
with open(".stolos/common", "w+") as common:
common.write(
string.Template(
"""
# Roots of the synchronization
root = .
root = ssh://stolos@${STOLOS_SERVER}//mnt/stolos/${STOLOS_PROJECT_ID}
ui = text
addversionno = true
prefer = newer
fastcheck = true
ignore = Path .stolos
silent = true
# Enable this option and set it to 'all' or 'verbose' for debugging
# debug = verbose
"""
).substitute(
STOLOS_PROJECT_ID=project["uuid"],
STOLOS_SERVER=project["server"]["host"],
)
)
| 5,337,377
|
def parse_argv(argv):
"""
parse argv
"""
psr = argparse.ArgumentParser(prog=argv[0])
psr.add_argument("--users-csv", default="users.csv",
help=("a csv file describing directories to monitor, which at minimum must have a column 'notebooks'."
" they are typically notebooks/ directories of students (default: users.csv)."))
psr.add_argument("--dest", default="sync_dest",
help=("the directory into which directories are copied (default: ./sync_dest)"))
psr.add_argument("--log-dir", default="sync_logs",
help=("the directory into which rsync logs are stored (default: ./sync_logs)"))
psr.add_argument("--db", default="sync.sqlite",
help=("sqlite3 database to which all file histories are stored (default: sync.sqlite)"))
psr.add_argument("--repeat", default=-1, type=int,
help=("the number of times directories are copied."
" after this number of times, the program quits."
" negative numbers indicate forever (default: -1)."))
psr.add_argument("--overhead", default=0.05, type=float,
help=("the maximum CPU usage of this program (default: 0.05)."
" if this value is 0.05, it makes sure the program uses 5%% worth of a single core."
" it adjusts the overhead by adjusting the interval between two consecutive copies."))
psr.add_argument("--min-sleep", default=300.0, type=float,
help=("the minimum interval between two consecutive copies."))
psr.add_argument("--no-sudo", action="store_true",
help=("if given, sudo is not used"))
psr.add_argument("--replay-log", action="store_true",
help=("mainly used for debugging. if given, it does not look"
" at the actual user files. it instead looks at the log directory (--log-dir) and"
" and reconstruct the database solely based on the log."))
opt = psr.parse_args(argv[1:])
return opt
| 5,337,378
|
def get_token_auth_header(params):
"""
Obtains the Access Token from the Authorization Header
"""
auth = get_token(params)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401)
if len(parts) == 1:
raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401)
if len(parts) > 2:
raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401)
token = parts[1]
return token
| 5,337,379
|
def create():
"""Create a backup based on SQLAlchemy mapped classes"""
# create backup files
alchemy = AlchemyDumpsDatabase()
data = alchemy.get_data()
backup = Backup()
for class_name in data.keys():
name = backup.get_name(class_name)
full_path = backup.target.create_file(name, data[class_name])
rows = len(alchemy.parse_data(data[class_name]))
if full_path:
print('==> {} rows from {} saved as {}'.format(rows,
class_name,
full_path))
else:
print('==> Error creating {} at {}'.format(name,
backup.target.path))
backup.close_ftp()
| 5,337,380
|
def bam(fastq_name, samfile, samtools, index_type): # type: (str, str, str, str) -> None
"""Use SAMtools to convert a SAM to BAM file"""
logging.info("FASTQ %s: Converting SAM to BAM", fastq_name)
bam_start = time.time() # type: float
# Make BAM name
bamfile = samfile.replace('sam', 'bam') # type: str
view_cmd = (samtools, 'view -bhS', samfile, '>', bamfile) # type: Tuple[str]
index_arg = 'c' if index_type == 'csi' else 'b' # type: str
index_cmd = '%(samtools)s index -%(arg)s %(bamfile)s' % {'samtools': samtools, 'arg': index_arg, 'bamfile': bamfile} # type: str
logging.info("FASTQ %s: Writing BAM to %s", fastq_name, bamfile)
subprocess.call(' '.join(view_cmd), shell=True)
gc.collect()
logging.info("FASTQ %s: Indexing BAM file", fastq_name)
logging.debug("FASTQ %s: Making %s indices", fastq_name, index_type)
subprocess.call(index_cmd, shell=True)
gc.collect()
logging.debug("FASTQ %s: Converting SAM to BAM took %s seconds", fastq_name, round(time.time() - bam_start, 3))
logging.debug("FASTQ %s: Removing SAM file, leaving only BAM file", fastq_name)
os.remove(samfile)
| 5,337,381
|
async def test_error_fetching_new_version_bad_json(opp, aioclient_mock):
"""Test we handle json error while fetching new version."""
aioclient_mock.get(updater.UPDATER_URL, text="not json")
with patch(
"openpeerpower.helpers.system_info.async_get_system_info",
return_value={"fake": "bla"},
), pytest.raises(UpdateFailed):
await updater.get_newest_version(opp)
| 5,337,382
|
def check_hu(base: str, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
weights = (9, 7, 3, 1, 9, 7, 3)
s = sum(int(c) * w for (c, w) in zip(base, weights))
r = s % 10
if r == 0:
return '0'
else:
return str(10 - r)
| 5,337,383
|
def store_data(file, rewards, arms, agent_list):
"""
Store the rewards, arms, and agents to a pickled file.
Parameters
----------
file : string
File name.
rewards : array
Evolution of cumulative rewards along training.
arms : array
Evolution of the number of arms along training.
agent_list : list
List with the trained agents.
Returns
-------
None.
"""
with open(file, 'wb') as f:
pickle.dump({'rewards': rewards, 'arms': arms, 'agents': agent_list}, f)
| 5,337,384
|
def _eval(squad_ds: SquadDataset):
"""Perform evaluation of a saved model."""
# Perform the evaluation.
sen, doc = _chunked_eval(squad_ds)
# Evaluation is finished. Compute the final 1@N statistic and record it.
print("index size=%s questions=%s" % (
len(squad_ds.master_index), len(squad_ds.queries)))
print("[sentence] mrr=%0.3f r@1=%0.3f r@5=%0.3f r@10=%0.3f" % (
sen.mrr, sen.recall(1), sen.recall(5), sen.recall(10)))
print("[document] mrr=%0.3f r@1=%0.3f r@5=%0.3f r@10=%0.3f" % (
doc.mrr, doc.recall(1), doc.recall(5), doc.recall(10)))
| 5,337,385
|
def scrape_headline(news_link):
"""
function to scrape the headlines from a simple news website
:return: a dictionary with key as html link of the source and
value as the text in the headline of the news in the html link
"""
#Headlines
#URL = 'https://lite.cnn.com/en'
page = requests.get(news_link)
soup = BeautifulSoup(page.content, 'html.parser')
daily_news_headline_dict = myDict()
for link in soup.find_all('a'):
key = 'https://lite.cnn.com'+link.get('href')
text = cleantext.create_cleanerDoc(link.get_text('href'))
daily_news_headline_dict.add(key, text)
#print(daily_news_headline_dict)
return daily_news_headline_dict
| 5,337,386
|
def read_xml(img_path):
"""Read bounding box from xml
Args:
img_path: path to image
Return list of bounding boxes
"""
anno_path = '.'.join(img_path.split('.')[:-1]) + '.xml'
tree = ET.ElementTree(file=anno_path)
root = tree.getroot()
ObjectSet = root.findall('object')
bboxes = []
for object in ObjectSet:
box = object.find('bndbox')
x1 = int(box.find('xmin').text)
y1 = int(box.find('ymin').text)
x2 = int(box.find('xmax').text)
y2 = int(box.find('ymax').text)
bb = [x1, y1, x2, y2]
bboxes.append(bb)
return bboxes
| 5,337,387
|
def YumInstall(vm):
""" Installs SysBench 0.5 for Rhel/CentOS. We have to build from source!"""
vm.Install('build_tools')
vm.InstallPackages('bzr')
vm.InstallPackages('mysql mysql-server mysql-devel')
vm.RemoteCommand('cd ~ && bzr branch lp:sysbench')
vm.RemoteCommand(('cd ~/sysbench && ./autogen.sh &&'
' ./configure --prefix=%s --mandir=%s/share/man &&'
' make') % (INSTALL_DIR, INSTALL_DIR))
vm.RemoteCommand('cd ~/sysbench && sudo make install')
vm.RemoteCommand('sudo mkdir %s/share/doc/sysbench/tests/db -p' %
INSTALL_DIR)
vm.RemoteCommand('sudo cp ~/sysbench/sysbench/tests/db/*'
' %s/share/doc/sysbench/tests/db/' % INSTALL_DIR)
vm.RemoteCommand('echo "export PATH=$PATH:%s/bin" >> ~/.bashrc && '
'source ~/.bashrc' % INSTALL_DIR)
# Cleanup the source code enlisthment from bzr, we don't need it anymore.
vm.RemoteCommand('cd ~ && rm -fr ./sysbench')
| 5,337,388
|
def a2funcoff(*args):
"""a2funcoff(ea_t ea, char buf) -> char"""
return _idaapi.a2funcoff(*args)
| 5,337,389
|
def str(obj):
"""This function can be used as a default `__str__()` in user-defined classes.
Classes using this should provide an `__info__()` method, otherwise the `default_info()`
function defined in this module is used.
"""
info_func = getattr(type(obj), "__info__", default_info)
return "{}({})".format(type(obj).__name__, info_func(obj))
| 5,337,390
|
def isin_strategy(
pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType],
strategy: Optional[SearchStrategy] = None,
*,
allowed_values: Sequence[Any],
) -> SearchStrategy:
"""Strategy to generate values within a finite set.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param allowed_values: set of allowable values.
:returns: ``hypothesis`` strategy
"""
if strategy is None:
return pandas_dtype_strategy(
pandera_dtype, st.sampled_from(allowed_values)
)
return strategy.filter(lambda x: x in allowed_values)
| 5,337,391
|
def test_joboffer_edit_with_all_fields_empty(publisher_client, user_company_profile):
"""
Test the validation of empty fields
"""
client = publisher_client
company = user_company_profile.company
offer = JobOfferFactory.create(company=company)
target_url = reverse(EDIT_URL, kwargs={'slug': offer.slug})
assert JobOffer.objects.count() == 1
response = client.post(target_url, {'company': company.id})
assert response.status_code == 200
found_errors = response.context_data['form'].errors
MANDATORY_FIELD_ERROR = 'Este campo es obligatorio.'
expected_mandatory_fields = [
'title', 'experience', 'remoteness', 'hiring_type', 'salary', 'description'
]
for field_name in expected_mandatory_fields:
assert found_errors[field_name][0] == MANDATORY_FIELD_ERROR
| 5,337,392
|
def test_year_insert(session, year_data):
"""Year 001: Insert multiple years into Years table and verify data."""
years_list = range(*year_data['90_to_94'])
for yr in years_list:
record = Years(yr=yr)
session.add(record)
years = session.query(Years.yr).all()
years_from_db = [x[0] for x in years]
assert set(years_from_db) & set(years_list) == set(years_list)
| 5,337,393
|
def listen_to_related_object_post_save(sender, instance, created, **kwargs):
"""
Receiver function to create agenda items. It is connected to the signal
django.db.models.signals.post_save during app loading.
The agenda_item_update_information container may have fields like type,
parent_id, comment, duration, weight or skip_autoupdate.
Do not run caching and autoupdate if the instance has a key
skip_autoupdate in the agenda_item_update_information container.
"""
if hasattr(instance, "get_agenda_title_information"):
if created:
attrs = {}
for attr in ("type", "parent_id", "comment", "duration", "weight"):
if instance.agenda_item_update_information.get(attr):
attrs[attr] = instance.agenda_item_update_information.get(attr)
Item.objects.create(content_object=instance, **attrs)
# If the object is created, the related_object has to be sent again.
if not instance.agenda_item_update_information.get("skip_autoupdate"):
inform_changed_data(instance)
elif not instance.agenda_item_update_information.get("skip_autoupdate"):
# If the object has changed, then also the agenda item has to be sent.
inform_changed_data(instance.agenda_item)
| 5,337,394
|
def arrToDict(arr):
"""
Turn an array into a dictionary where each value maps to '1'
used for membership testing.
"""
return dict((x, 1) for x in arr)
| 5,337,395
|
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink sensor component."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]['ignore']
# test default state of sensor loaded from config
config_sensor = hass.states.get('sensor.test')
assert config_sensor
assert config_sensor.state == 'unknown'
assert config_sensor.attributes['unit_of_measurement'] == '°C'
# test event for config sensor
event_callback({
'id': 'test',
'sensor': 'temperature',
'value': 1,
'unit': '°C',
})
await hass.async_block_till_done()
assert hass.states.get('sensor.test').state == '1'
# test event for new unconfigured sensor
event_callback({
'id': 'test2',
'sensor': 'temperature',
'value': 0,
'unit': '°C',
})
await hass.async_block_till_done()
# test state of new sensor
new_sensor = hass.states.get('sensor.test2')
assert new_sensor
assert new_sensor.state == '0'
assert new_sensor.attributes['unit_of_measurement'] == '°C'
assert new_sensor.attributes['icon'] == 'mdi:thermometer'
| 5,337,396
|
def getPercentGC(img, nbpix) :
"""Determines if a page is in grayscale or colour mode."""
if img.mode != "RGB" :
img = img.convert("RGB")
gray = 0
for (r, g, b) in img.getdata() :
if not (r == g == b) :
# optimize : if a single pixel is no gray the whole page is colored.
return { "G" : 0.0, "C" : 100.0 }
return { "G" : 100.0, "C" : 0.0 }
| 5,337,397
|
def rotate(angle: float, iaxis: int) -> ndarray:
"""
Calculate the 3x3 rotation matrix generated by a rotation
of a specified angle about a specified axis. This rotation
is thought of as rotating the coordinate system.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/rotate_c.html
:param angle: Angle of rotation (radians).
:param iaxis: Axis of rotation X=1, Y=2, Z=3.
:return: Resulting rotation matrix
"""
angle = ctypes.c_double(angle)
iaxis = ctypes.c_int(iaxis)
mout = stypes.empty_double_matrix()
libspice.rotate_c(angle, iaxis, mout)
return stypes.c_matrix_to_numpy(mout)
| 5,337,398
|
def build_categories(semanticGroups):
"""
Returns a list of ontobio categories or None
Parameters
----------
semanticGroups : string
a space delimited collection of semanticGroups
"""
if semanticGroups is None:
return None
categories = []
for semanticGroup in semanticGroups.split(' '):
try:
categories += UMLS_to_monarch(semanticGroup.upper())
except:
None
if len(categories) == 0:
return None
else:
return categories
| 5,337,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.