content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def calculate_EHF_severity(
T,
T_p95_file=None,
EHF_p85_file=None,
T_p95_period=None,
T_p95_dim=None,
EHF_p85_period=None,
EHF_p85_dim=None,
rolling_dim="time",
T_name="t_ref",
):
"""
Calculate the severity of the Excess Heat Factor index, defined as:
EHF_severity = EHF / EHF_p85
where "_p85" denotes the 85th percentile of all positive values using all days in the
year and the Excess Heat Factor (EHF) is defined as:
EHF = max(0, EHI_sig) * max(1, EHI_accl)
with
EHI_sig = (T_i + T_i+1 + T_i+2) / 3 – T_p95
EHI_accl = (T_i + T_i+1 + T_i+2) / 3 – (T_i–1 + ... + T_i–30) / 30
T is the daily mean temperature (commonly calculated as the mean of the min and max
daily temperatures, usually with daily maximum typically preceding the daily minimum,
and the two observations relate to the same 9am-to-9am 24-h period) and T_p95 is the 95th
percentile of T using all days in the year.
Parameters
----------
T : xarray DataArray
Array of daily mean temperature
T_p95_file : xarray DataArray, optional
Path to a file with the 95th percentiles of T using all days in the year. This should be
relative to the project directory. If not provided, T_p95_period and T_p95_dim must be
provided
EHF_p85_file : xarray DataArray, optional
Path to a file with the 85th percentiles of positive EHF using all days in the year. This
should be relative to the project directory. If not provided, EHF_p85_period and
EHF_p85_dim must be provided
T_p95_period : list of str, optional
Size 2 iterable containing strings indicating the start and end dates of the period over
which to calculate T_p95. Only used if T_p95 is None
T_p95_dim : str or list of str, optional
The dimension(s) over which to calculate T_p95. Only used if T_p95 is None
EHF_p85_period : list of str, optional
Size 2 iterable containing strings indicating the start and end dates of the period over
which to calculate EHF_p85. Only used if EHF_p85 is None
EHF_p85_dim : str or list of str, optional
The dimension(s) over which to calculate EHF_p85. Only used if EHF_p85 is None
rolling_dim : str, optional
The dimension over which to compute the rolling averages in the definition of EHF
T_name : str, optional
The name of the temperature variable in T
References
----------
Nairn et al. 2015: https://doi.org/10.3390/ijerph120100227
"""
if EHF_p85_file is None:
if (EHF_p85_period is not None) & (EHF_p85_dim is not None):
calculate_EHF_p85 = True
else:
raise ValueError(
(
"Must provide either thresholds of the 85th percentile of EHF (E_p85) or details "
"of the climatological period and dimensions to use to calculate these thresholds "
"(EHF_p85_period and EHF_p85_dim)"
)
)
else:
EHF_p85_file = PROJECT_DIR / EHF_p85_file
EHF_p85 = xr.open_zarr(EHF_p85_file)
calculate_EHF_p85 = False
EHF = calculate_EHF(T, T_p95_file, T_p95_period, T_p95_dim, rolling_dim, T_name)
if calculate_EHF_p85:
EHF_p85 = calculate_percentile_thresholds(
EHF.where(EHF > 0), 0.85, EHF_p85_period, EHF_p85_dim, frequency=None
)
EHF_sev = EHF / EHF_p85
EHF_sev = EHF_sev.rename({"ehf": "ehf_severity"})
EHF_sev["ehf_severity"].attrs["long_name"] = "Severity of the Excess Heat Factor"
EHF_sev["ehf_severity"].attrs["standard_name"] = "excess_heat_factor_severity"
EHF_sev["ehf_severity"].attrs["units"] = "-"
return EHF_sev
| 24,100
|
def parse_args():
"""Parse input arguments
Return:
parsed arguments struncture
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='type')
subparsers.required = True
parser_file = subparsers.add_parser('file')
parser_file.add_argument(
"-i", "--input", help="Input file name.", required=True)
parser_file.add_argument(
"-d", "--database", help="Token database.", required=True)
parser_file.add_argument(
"-o", "--output", help="Output file name.", required=True)
parser_file = subparsers.add_parser('serial')
parser_file.add_argument(
"-i", "--input", help="Input serial port name.", required=True)
parser_file.add_argument(
"-d", "--database", help="Token database.", required=True)
parser_file.add_argument(
"-o", "--output", help="Output file name. Write to stdout and to file.")
return parser.parse_args()
| 24,101
|
def arrange_train_data(keypoints: Dict, beg_end_times: List[Tuple], fps: float, MAX_PERSONS: int) -> Dict:
"""
Arrange data into frames. Add gestures present or not based on time ranges. Generate each frame and also, add dummy when necessary.
"""
data = {}
for key in keypoints.keys():
persons = list(keypoints[key].keys())
persons.remove("start_frame")
persons.remove("end_frame")
count_persons = len(persons)
gestures_xy = []
start_frame, end_frame = keypoints[key]["start_frame"], keypoints[key]["end_frame"]
start_time_ms = start_frame/fps*1000
end_time_ms = end_frame/fps*1000
for per_ind in range(1, count_persons+1):
per_str = str(per_ind)
gestures_xy.append(keypoints[key][per_str]["person_keypoints"])
# dummy to always have MAX_PERSONS (training to be done in matrices (Required_keypoints x Max_persons x window))
dummy = generate_dummy_keypoints()
dummy_frames_list = []
for _ in range(start_frame, end_frame+1):
dummy_frames_list.append(dummy)
for i in range(MAX_PERSONS - count_persons):
gestures_xy.append(dummy_frames_list)
frame_division_gestures = list(zip(*gestures_xy))
frames_dict = {}
for i, frame in enumerate(frame_division_gestures):
frames_dict[str(start_frame + i)] = {
"frames": frame,
"gesture": False
}
data[key] = frames_dict
for be_time in beg_end_times:
if be_time[0] > end_time_ms or be_time[1] < start_time_ms:
continue
elif be_time[0] < start_time_ms and be_time[1] < end_time_ms:
bt = start_time_ms
et = be_time[1]
elif be_time[0] > start_time_ms and be_time[1] < end_time_ms:
bt = be_time[0]
et = be_time[1]
elif be_time[0] < start_time_ms and be_time[1] > end_time_ms:
bt = start_time_ms
et = end_time_ms
elif be_time[0] > start_time_ms and be_time[1] > end_time_ms:
bt = be_time[0]
et = end_time_ms
# Now using bt and et, find the frame indices with gesture
begin_at_frame_ind = int(bt*fps/1000+0.5)
no_of_frames = int((et-bt)*fps/1000+0.5)
end_at_frame_ind = begin_at_frame_ind + no_of_frames
if end_at_frame_ind > int((list(data[key].keys()))[-1]):
end_at_frame_ind = int((list(data[key].keys()))[-1])
for frame_no in range(begin_at_frame_ind, end_at_frame_ind+1):
data[key][str(frame_no)]["gesture"] = True
return data
| 24,102
|
def length(list):
"""Return the number of items in the list."""
if list == ():
return 0
else:
_, tail = list
return 1 + length(tail)
| 24,103
|
def buzz(x):
"""
Takes an input `x` and checks to see if x is a
number, and if so, also a multiple of 5.
If it is both, return 'Buzz'.
Otherwise, return the input.
"""
return 'Buzz' if isinstance(x, Number) and x % 5 == 0 else x
| 24,104
|
def _tuple_of_big_endian_int(bit_groups: Iterable[Any]) -> Tuple[int, ...]:
"""Returns the big-endian integers specified by groups of bits.
Args:
bit_groups: Groups of descending bits, each specifying a big endian
integer with the 1s bit at the end.
Returns:
A tuple containing the integer for each group.
"""
return tuple(value.big_endian_bits_to_int(bits) for bits in bit_groups)
| 24,105
|
def gene_signature_wizard_main(loomfile=None, signaturefile=None):
"""
Parameters
----------
loomfile :
(Default value = None)
signaturefile :
(Default value = None)
Returns
-------
"""
print(loomfile)
if loomfile is None:
loomfile = click.prompt(
"Loom file that you would like to augment with a gene signature: ")
while not (os.path.isfile(loomfile) and loomfile.endswith('.loom')):
loomfile = click.prompt(
"Not a loom file. Please select loom file that you would like to augment with cnv/segmentation data: "
)
if signaturefile is None:
signaturefile = click.prompt(
"Gene list that you would like to add as a gene signature (headerless file, single column): "
)
signature = np.genfromtxt(signaturefile, dtype=str)
with loompy.connect(loomfile, validate=False) as loom:
proceed = 'y'
if len(np.intersect1d(signature, loom.ra['gene'])) < len(signature):
proceed = click.prompt(
"The following genes ({} in total) in the given signature\n{}\nare not in the loom file. Would you like to proceed with those that are ({} genes in total)?"
.format(len(np.setdiff1d(signature, loom.ra['gene'])),
", ".join(np.setdiff1d(signature, loom.ra['gene'])),
len(np.intersect1d(signature, loom.ra['gene']))),
type=click.Choice(['n', 'y']),
default='y')
if proceed == 'y':
signature_name = click.prompt(
"What would you like to name this signature?",
default=signaturefile.split('/')[-1].split('.')[0::-1][0])
loom.ra[signature_name] = np.isin(loom.ra['gene'], signature)
| 24,106
|
def create_cert(
cert_store: CertificateStore,
key_store: KeyStore,
cert_minter: CertificateMinter,
cert_path: str,
issuer_key_name: str,
issuer_key_password: str,
issuer_key_no_password: bool = False,
signing_key_name: str = None,
signing_key_password: str = None,
signing_key_no_password: bool = False,
country: str = None,
state: str = None,
locality: str = None,
organization: str = None,
common_name: str = None,
duration_days: int = 365,
store: bool = True):
"""Creates a new certificate
This function handles the creation of a new certificate. The general notes
below are based on the cert_path:
``/server/myserver`` & ``/client/myclient``
A self-signed certificate in which the issuer and the subject are the same.
No signing_key_name is needed.
``/root/myroot``
A root CA in which the issuer and the subject are the same.
No signing_key_name is needed.
``/root/myroot/intermediate/myintermediate``
An intermediate CA with the issuer being ``/root/myroot``.
A signing_key_name is needed
``/root/myroot/intermediate/myintermediate/server/myserver``
A server certificate with the issuer being ``/root/myroot/intermediate/myintermediate``.
A signing_key_name is needed
``/root/myroot/server/myserver``
A server certificate with the issuer being ``/root/myroot``.
A signing_key_name is needed
:param cert_store: the certificate store in which to create the cert
as well as holds any issuer certs
:type cert_store: CertificateStore
:param key_store: the key store holding the various keys
:type key_store: KeyStore
:param cert_minter: the cert minter
:type cert_minter: CertificateMinter
:param cert_path: the path of the certificate
:type cert_path: str
:param issuer_key_name: the key used to sign the certificate
:type issuer_key_name: str
:param issuer_key_password: the issuer key's password
:type issuer_key_password: str
:param issuer_key_no_password: indicates that the issuer key doesn't have a password
, defaults to False
:type issuer_key_no_password: bool, optional
:param signing_key_name: the name of the signing key, defaults to None
:type signing_key_name: str, optional
:param signing_key_password: the password for the signing key
, defaults to None
:type signing_key_password: str, optional
:param signing_key_no_password: indicates if the issuer key doesn't have a password
, defaults to False
:type signing_key_no_password: bool, optional
:param country: the 2-letter country code used in the cert subject, defaults to None
:type country: str, optional
:param state: used in the cert subject, defaults to None
:type state: str, optional
:param locality: used in the cert subject, defaults to None
:type locality: str, optional
:param organization: used in the cert subject, defaults to None
:type organization: str, optional
:param common_name: used in the cert subject, defaults to None
:type common_name: str, optional
:param duration_days: the lifespan of the cert in days, defaults to 365
:type duration_days: int, optional
:param store: if set to False, the certificate will just be sent to stdout and not stored
, defaults to True
:type store: bool, optional
"""
if not issuer_key_name:
exit("No issuer key name was provided")
if not key_store.exists(issuer_key_name):
exit("The requested issuer key ({}) does not exist".format(issuer_key_name))
if duration_days <= 0:
exit("Duration days must be positive")
if not cert_path:
exit("No value was provided for cert_path")
cert_details = get_certificate_details(cert_path)
if cert_store.exists(cert_details):
exit("The certificate already exists")
if not common_name:
common_name = cert_details.name
subject = x509CertificateNameAttributes(
common_name=common_name,
country_name=country,
state_name=state,
locality_name=locality,
organization_name=organization
)
signing_key: PrivateKey = None
csr = None
if isinstance(cert_details.certificate_type, x509RootCertificateType):
# Root certificate
issuer = subject
elif ((type(cert_details.certificate_type) in [x509ClientCertificateType, x509ServerCertificateType])
and (cert_details.issuer is None)):
# Self-signed server/client cert
issuer = subject
else:
# An intermediate cert or signed client/server cert
if not signing_key_name:
exit("No signing key name was provided")
if not key_store.exists(signing_key_name):
exit("The requested signing key ({}) does not exist".format(
signing_key_name))
# Check the issuer's certificate
if cert_details.issuer.certificate_type in [
x509RootCertificateType, x509IntermediateCertificateType]:
exit("The certificate's issuer must be a Root or Intermediate CA")
if not cert_store.exists(cert_details.issuer):
exit("The issuing certificate {} doesn't exist.".format(
cert_details.issuer.name))
issuer_cert = cert_store.get(CertificateStoreEntryImpl(
details=cert_details.issuer,
certificate=None
))
issuer = issuer_cert.certificate.subject
# Get the signing key (for use with a CSR)
if signing_key_no_password:
signing_password = None
elif signing_key_password:
signing_password = signing_key_password
else:
signing_password = prompt_for_password(
prompt="Enter password for key {}: ".format(signing_key_name), validate=False)
try:
signing_key = key_store.get(key_name=signing_key_name,
password=signing_password)
except ValueError as e:
exit("Failed to access the signing key ({}) - {}".format(issuer_key_name,
e))
csr = x509SigningRequest.generate(
private_key=signing_key,
subject=subject
)
# Load the Issuer Key
if issuer_key_no_password:
issuer_password = None
elif issuer_key_password:
issuer_password = issuer_key_password
else:
issuer_password = prompt_for_password(
prompt="Enter password for key {}: ".format(issuer_key_name), validate=False)
try:
issuer_key: PrivateKey = key_store.get(key_name=issuer_key_name,
password=issuer_password)
except ValueError as e:
exit("Failed to access the issuer key ({}) - {}".format(issuer_key_name,
e))
if not csr:
csr = x509SigningRequest.generate(
private_key=issuer_key,
subject=issuer
)
cert_args = x509CertificateMinter.prepare_mint_args(
certificate_type=cert_details.certificate_type,
issuer_key=issuer_key,
issuer=issuer,
csr=csr,
duration_days=duration_days
)
certificate: Certificate = cert_minter.mint(**cert_args)
if store:
entry = CertificateStoreEntryImpl(details=cert_details,
certificate=certificate)
cert_store.add(entry)
else:
print(str(certificate.public_bytes(), 'utf-8'))
| 24,107
|
def read_ss(fn):
"""Read a sequence of serverStatus JSON documents, one per line"""
result = collections.defaultdict(list)
for i, line in enumerate(open(fn)):
j = json.loads(line)
_parse(j, result, ('serverStatus',))
if i>0 and i%100==0:
yield result
result.clear()
yield result
| 24,108
|
def make_simple_server(service, handler,
host="localhost",
port=9090):
"""Return a server of type TSimple Server.
Based on thriftpy's make_server(), but return TSimpleServer instead of
TThreadedServer.
Since TSimpleServer's constructor doesn't accept kwargs, some arguments of
make_server can't be used here. By default:
client_timeout: None
protocol: TBinaryProtocolFactory
transport: TBufferedTransportFactory
"""
processor = TProcessor(service, handler)
if host and port:
server_socket = TServerSocket(
host=host, port=port, client_timeout=None)
else:
raise ValueError("Either host/port or unix_socket must be provided.")
server = TSimpleServer(processor, server_socket)
return server
| 24,109
|
def test_random_horizontal_valid_prob_c():
"""
Test RandomHorizontalFlip op with c_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_horizontal_valid_prob_c")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomHorizontalFlip(0.8)
data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_horizontal_op, input_columns=["image"])
filename = "random_horizontal_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
| 24,110
|
def morph(clm1, clm2, t, lmax):
"""Interpolate linearly the two sets of sph harm. coeeficients."""
clm = (1 - t) * clm1 + t * clm2
grid_reco = clm.expand(lmax=lmax) # cut "high frequency" components
agrid_reco = grid_reco.to_array()
pts = []
for i, longs in enumerate(agrid_reco):
ilat = grid_reco.lats()[i]
for j, value in enumerate(longs):
ilong = grid_reco.lons()[j]
th = np.deg2rad(90 - ilat)
ph = np.deg2rad(ilong)
r = value + rbias
p = np.array([sin(th) * cos(ph), sin(th) * sin(ph), cos(th)]) * r
pts.append(p)
return pts
| 24,111
|
def cal_tsne_embeds_src_tgt(Xs, ys, Xt, yt, n_components=2, text=None, save_path=None, n_samples=1000, names=None):
"""
Plot embedding for both source and target domain using tSNE
:param Xs:
:param ys:
:param Xt:
:param yt:
:param n_components:
:param text:
:param save_path:
:return:
"""
Xs = Xs[: min(len(Xs), n_samples)]
ys = ys[: min(len(ys), n_samples)]
Xt = Xt[: min(len(Xt), n_samples)]
yt = yt[: min(len(Xt), n_samples)]
X = np.concatenate((Xs, Xt), axis=0)
tsne = manifold.TSNE(n_components=n_components)
X = tsne.fit_transform(X)
Xs = X[: len(Xs)]
Xt = X[len(Xs):]
plot_embedding_src_tgt(Xs, ys, Xt, yt, text, save_path, names=names)
| 24,112
|
def read_avg_residuemap(infile):
""" Read sequence definition from PSN avg file, returning sequence Map
:param infile: File handle pointing to WORDOM avgpsn output file
:return: Returns an internal.map.Map object mapping the .pdb
residues to WORDOM id's from "Seq" section of the avgpsn-file
"""
m_start = re.compile("^\*\*\* Seq \*\*\*")
m_end = re.compile("^============")
m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$")
residuemap = OrderedDict()
reading = False
for line in infile:
if reading:
# Stop reading if end of interaction strength section
if m_end.search(line):
break
else:
if m_entry.search(line):
[num, resname, normfact] = line.split()
residuemap[resname] = int(num)
# Start reading when header found
elif m_start.search(line):
reading = True
return residuemap
| 24,113
|
def test_metrics_detailed_get_401(orderBy, app, client, session):
"""Tests API/metrics/<mapper_id> endpoint with invalid data."""
rv = client.get(f"/metrics/1?from=2021-10-10&to=2021-10-31&orderBy={orderBy}")
assert rv.status_code == 401
| 24,114
|
def get_arguments():
"""Run argparse and return arguments."""
try:
# Use argparse to handle devices as arguments
description = 'htop like application for PostgreSQL replication ' + \
'activity monitoring.'
parser = argparse.ArgumentParser(description=description)
# -c / --connectstring
parser.add_argument(
'-c',
'--connectstring',
dest='connstr',
default='',
help='Connectstring (default: "").',
metavar='CONNECTSTRING')
# -r / --role
parser.add_argument(
'-r',
'--role',
dest='role',
default=None,
help='Role (default: "").',
metavar='ROLE')
# -C / --no-color
parser.add_argument(
'-C',
'--no-color',
dest='nocolor',
action='store_true',
help="Disable color usage.",)
# --debug
parser.add_argument(
'-x',
'--debug',
dest='debug',
action='store_true',
help="Enable debug mode for traceback tracking.")
args = parser.parse_args()
except (argparse.ArgumentError, argparse.ArgumentTypeError) as err:
print('pg_activity: error: %s' % str(err))
print('Try "pg_activity --help" for more information.')
sys.exit(1)
return args
| 24,115
|
def test_post_and_get(good_dataset_uuid, sample_stats, good_post_data, \
headers, good_job_url, good_dataset_url):
"""
POST stats with a given job_uuid and check that GET retrieves those stats.
"""
response = requests.post(
url=good_dataset_url,
headers=headers,
json=good_post_data
)
response_dict = json.loads(response.text)
assert 'status' in response_dict and response_dict['status'] == 'success', \
"POST failed!"
response = requests.get(good_job_url)
response_dict = json.loads(response.text)
assert 'status' in response_dict and response_dict['status'] == 'success', \
"GET failed!"
job_dict = response_dict['job_dict']
assert good_dataset_uuid in job_dict, \
"Dataset with UUID {} not found!".format(good_dataset_uuid)
dataset_dict = job_dict[good_dataset_uuid]
assert 'dataset_stats' in dataset_dict and dataset_dict['dataset_stats'] == sample_stats, \
"Correct stats not received!"
assert 'round_num' in dataset_dict and dataset_dict['round_num'] == 1, \
"Correct round not received!"
| 24,116
|
def schedule_remove(retval=None):
"""
schedule(retval=stackless.current) -- switch to the next runnable tasklet.
The return value for this call is retval, with the current
tasklet as default.
schedule_remove(retval=stackless.current) -- ditto, and remove self.
"""
_scheduler_remove(getcurrent())
r = schedule(retval)
return r
| 24,117
|
def check_region(read, pair, region):
"""
determine whether or not reads map to specific region of scaffold
"""
if region is False:
return True
for mapping in read, pair:
if mapping is False:
continue
start, length = int(mapping[3]), len(mapping[9])
r = [start, start + length - 1]
if get_overlap(r, region) > 0:
return True
return False
| 24,118
|
def test_modulo_formatting(
assert_errors,
parse_ast_tree,
code,
prefix,
default_options,
):
"""Testing that the strings violate the rules."""
tree = parse_ast_tree('x = {0}"{1}"'.format(prefix, code))
visitor = WrongStringVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ModuloStringFormatViolation], (
FormattedStringViolation,
))
| 24,119
|
def create_template_AL_AR(phi, diff_coef, adv_coef, bc_top_type, bc_bot_type,
dt, dx, N):
""" creates 2 matrices for transport equation AL and AR
Args:
phi (TYPE): vector of porosity(phi) or 1-phi
diff_coef (float): diffusion coefficient
adv_coef (float): advection coefficient
bc_top_type (string): type of boundary condition
bc_bot_type (string): type of boundary condition
dt (float): time step
dx (float): spatial step
N (int): size of mesh
Returns:
array: AL and AR matrices
"""
# TODO: error source somewhere in non constant
# porosity profile. Maybe we also need d phi/dx
s = phi * diff_coef * dt / dx / dx
q = phi * adv_coef * dt / dx
AL = spdiags(
[-s / 2 - q / 4, phi + s, -s / 2 + q / 4], [-1, 0, 1],
N,
N,
format='csr') # .toarray()
AR = spdiags(
[s / 2 + q / 4, phi - s, s / 2 - q / 4], [-1, 0, 1], N, N,
format='csr') # .toarray()
if bc_top_type in ['dirichlet', 'constant']:
AL[0, 0] = phi[0]
AL[0, 1] = 0
AR[0, 0] = phi[0]
AR[0, 1] = 0
elif bc_top_type in ['neumann', 'flux']:
AL[0,0] = phi[0] + s[0] # + adv_coef * s[0] * dx / diff_coef] - q[0] * adv_coef * dx / diff_coef] / 2
AL[0, 1] = -s[0]
AR[0,0] = phi[0] - s[0] # - adv_coef * s[0] * dx / diff_coef] + q[0] * adv_coef * dx / diff_coef] / 2
AR[0, 1] = s[0]
else:
print('\nABORT!!!: Not correct top boundary condition type...')
sys.exit()
if bc_bot_type in ['dirichlet', 'constant']:
AL[-1, -1] = phi[-1]
AL[-1, -2] = 0
AR[-1, -1] = phi[-1]
AR[-1, -2] = 0
elif bc_bot_type in ['neumann', 'flux']:
AL[-1, -1] = phi[-1] + s[-1]
AL[-1, -2] = -s[-1] # / 2 - s[-1] / 2
AR[-1, -1] = phi[-1] - s[-1]
AR[-1, -2] = s[-1] # / 2 + s[-1] / 2
else:
print('\nABORT!!!: Not correct bottom boundary condition type...')
sys.exit()
return AL, AR
| 24,120
|
def run():
"""
run_watchdog(timeout) -- run tasklets until they are all
done, or timeout instructions have passed. Tasklets must
provide cooperative schedule() calls.
If the timeout is met, the function returns.
The calling tasklet is put aside while the tasklets are running.
It is inserted back after the function stops, right before the
tasklet that caused a timeout, if any.
If an exception occours, it will be passed to the main tasklet.
Please note that the 'timeout' feature is not yet implemented
"""
curr = getcurrent()
_run_calls.append(curr)
_scheduler_remove(curr)
try:
schedule()
assert not _squeue
finally:
_scheduler_append(curr)
| 24,121
|
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND
| 24,122
|
def SE2_exp(v):
"""
SE2 matrix exponential
"""
theta, x, y = v
if np.abs(theta) < 1e-6:
A = 1 - theta**2/6 + theta**4/120
B = theta/2 - theta**3/24 + theta**5/720
else:
A = np.sin(theta)/theta
B = (1 - np.cos(theta))/theta
V = np.array([[A, -B], [B, A]])
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
u = np.array([[x, y]]).T
return np.block([
[R, V.dot(u)],
[0, 0, 1]])
| 24,123
|
def go_register_toolchains(go_version=DEFAULT_VERSION):
"""See /go/toolchains.rst#go-register-toolchains for full documentation."""
print("bblu go_register_toolchains 11111111111111111111111111111111111111111")
if "go_sdk" not in native.existing_rules():
if go_version in SDK_REPOSITORIES:
go_download_sdk(
name = "go_sdk",
sdks = SDK_REPOSITORIES[go_version],
)
elif go_version == "host":
go_host_sdk(
name = "go_sdk"
)
else:
fail("Unknown go version {}".format(go_version))
# Use the final dictionaries to register all the toolchains
for toolchain in _toolchains:
name = _label_prefix + toolchain["name"]
native.register_toolchains(name)
| 24,124
|
def _DotControlFlowGraphsFromBytecodeToQueue(
bytecode: str, queue: multiprocessing.Queue
) -> None:
"""Process a bytecode and submit the dot source or the exception."""
try:
queue.put(list(opt_util.DotControlFlowGraphsFromBytecode(bytecode)))
except Exception as e:
queue.put(DotControlFlowGraphsFromBytecodeError(bytecode, e))
| 24,125
|
def evaluate(scores, targets, queries, train_occ, k_list, ci=False,
pivotal=True, song_occ=None, metrics_file=None):
"""
Evaluate continuations induced by `scores` given target `targets`.
The arguments are lists, and each item in a list corresponds to one run
of the playlist continuation model, typically the run of one fold.
Note that the arrays are playlists x songs and not vice-versa.
Parameters
----------
scores: list of numpy arrays of shape (num_playlists, num_songs)
Probability or score for each (playlist, song) pair.
targets: list of sparse csr_matrix's of shape (num_playlists, num_songs)
Binary sparse array indicating target playlist continuations.
queries: list of sparse csr_matrix's of shape (num playlists, num_songs)
Binary sparse array indicating playlist queries.
train_occ: list of numpy arrays of shape (num_songs, )
Song occurrences when the model used to predict `scores` was trained.
k_list: list
Each item is a list length.
ci: bool
Compute basic Bootstrap confidence intervals if True.
pivotal: bool
Compute "pivotal" intervals if True, else "percentile" intervals.
song_occ: list
Test on songs observed `song_occ` times during model training.
metrics_file: str
File path to save ranks and summarized metrics.
"""
print('\nEvaluating playlist continuations...')
# mask `scores` corresponding to playlist queries
for i in range(len(scores)):
mask_array_items(scores[i], queries[i])
# evaluate predictions given target continuations
rank, rr, avgp, rec = [], [], [], {K: [] for K in k_list}
for i in range(len(scores)):
rank_i, rr_i, avgp_i, rec_i = compute_metrics(scores[i], targets[i], k_list)
rank += rank_i
rr += rr_i
avgp += avgp_i
for K in k_list:
rec[K] += rec_i[K]
metrics = summarize_metrics(rank, rr, avgp, rec, k_list, ci=ci, pivotal=pivotal)
if metrics_file is not None:
np.savetxt(metrics_file + '_all_songs.rank', rank)
np.savetxt(metrics_file + '_all_songs.rr', rr)
np.savetxt(metrics_file + '_all_songs.rec', metrics[3:])
# conduct cold-start analysis
if song_occ is not None:
# for all but last, keep songs observed exactly song_obs times
for occ in song_occ[:-1]:
rank, rr, avgp, rec = [], [], [], {K: [] for K in k_list}
for i in range(len(scores)):
print('\nKeep songs observed {} at training...'.format(occ))
target_i = copy.deepcopy(targets[i])
mask_array_cols(target_i, np.where(train_occ[i] != occ)[0])
rank_i, rr_i, avgp_i, rec_i = compute_metrics(scores[i], target_i, k_list)
rank += rank_i
rr += rr_i
avgp += avgp_i
for K in k_list:
rec[K] += rec_i[K]
summarize_metrics(rank, rr, avgp, rec, k_list, ci=ci, pivotal=pivotal)
# for the last, keep songs observed song_occ+ times, included
occ = song_occ[-1]
rank, rr, avgp, rec = [], [], [], {K: [] for K in k_list}
for i in range(len(scores)):
print('\nKeep songs observed {}+ (incl.) times at training...'.format(occ))
target_i = copy.deepcopy(targets[i])
mask_array_cols(target_i, np.where(train_occ[i] < occ)[0])
rank_i, rr_i, avgp_i, rec_i = compute_metrics(scores[i], target_i, k_list)
rank += rank_i
rr += rr_i
avgp += avgp_i
for K in k_list:
rec[K] += rec_i[K]
summarize_metrics(rank, rr, avgp, rec, k_list, ci=ci, pivotal=pivotal)
# for the last, keep songs observed song_occ- times, not included
occ = song_occ[-1]
rank, rr, avgp, rec = [], [], [], {K: [] for K in k_list}
for i in range(len(scores)):
print('\nKeep songs observed {}- (not incl.) times at training...'.format(occ))
target_i = copy.deepcopy(targets[i])
mask_array_cols(target_i, np.where(train_occ[i] >= occ)[0])
rank_i, rr_i, avgp_i, rec_i = compute_metrics(scores[i], target_i, k_list)
rank += rank_i
rr += rr_i
avgp += avgp_i
for K in k_list:
rec[K] += rec_i[K]
summarize_metrics(rank, rr, avgp, rec, k_list, ci=ci, pivotal=pivotal)
# for comparability between hybrid and pure collaborative systems,
# compare results for songs with 1+ occurrences
rank, rr, avgp, rec = [], [], [], {K: [] for K in k_list}
for i in range(len(scores)):
print('\nKeep songs observed 1+ (incl.) times at training...')
target_i = copy.deepcopy(targets[i])
mask_array_cols(target_i, np.where(train_occ[i] == 0)[0])
rank_i, rr_i, avgp_i, rec_i = compute_metrics(scores[i], target_i, k_list)
rank += rank_i
rr += rr_i
avgp += avgp_i
for K in k_list:
rec[K] += rec_i[K]
metrics = summarize_metrics(rank, rr, avgp, rec, k_list, ci=ci, pivotal=pivotal)
if metrics_file is not None:
np.savetxt(metrics_file + '_inset_songs.rank', rank)
np.savetxt(metrics_file + '_inset_songs.rr', rr)
np.savetxt(metrics_file + '_inset_songs.rec', metrics[3:])
| 24,126
|
def skip_on_hw(func):
"""Test decorator for skipping tests which should not be run on HW."""
def decorator(f):
def decorated(self, *args, **kwargs):
if has_ci_ipus():
self.skipTest("Skipping test on HW")
return f(self, *args, **kwargs)
return decorated
return decorator(func)
| 24,127
|
def rxns4tag(tag, rdict=None, ver='1.7', wd=None):
"""
Get a list of all reactions with a given p/l tag
Notes
-----
- This function is useful, but update to GEOS-Chem flexchem ( in >v11)
will make it redundent and therefore this is not being maintained.
"""
# --- get reaction dictionary
if isinstance(rdict, type(None)):
rdict = rxn_dict_from_smvlog(wd, ver=ver)
# --- Caveats -
# to adapt for long line errors in fortran written output
errs = ['LO3_36'] # + ['LO3_87']
cerrs = ['RD95'] # + ['LR48']
# To account for reaction where not all channels result in Ox loss
errs += ['RD48']
cerrs += ['LO3_87']
if any([(tag == i) for i in errs]):
tag = cerrs[errs.index(tag)]
# -- loop reactions, if tag in reaction return reaction
rxns = []
for n, rxn in enumerate(rdict.values()):
expanded_rxn_str = [i.split('+') for i in rxn]
expanded_rxn_str = [
item for sublist in expanded_rxn_str for item in sublist]
# ( Issue) Why endswith? Restore to use if contains any tag
# if any( [ (i.endswith(tag) ) for i in rxn]):
# This is because otherwise 'LR10' would be read as 'LR100'
# if any( [tag in i for i in rxn]): # <= This will lead to false +ve
# However, fortran print statment err for ( LO3_87 )
if any([i.endswith(tag) for i in expanded_rxn_str]):
rxns.append([list(rdict.keys())[n]] + rxn)
return rxns
| 24,128
|
def update_daily_report(site_id, result_date, disease_id):
"""
Update daily testing activity report (without subtotals per demographic)
- called when a new individual test result is registered
@param site_id: the test station site ID
@param result_date: the result date of the test
@param disease_id: the disease ID
"""
db = current.db
s3db = current.s3db
table = s3db.disease_case_diagnostics
# Count records grouped by result
query = (table.site_id == site_id) & \
(table.disease_id == disease_id) & \
(table.result_date == result_date) & \
(table.deleted == False)
cnt = table.id.count()
rows = db(query).select(table.result,
cnt,
groupby = table.result,
)
total = positive = 0
for row in rows:
num = row[cnt]
total += num
if row.disease_case_diagnostics.result == "POS":
positive += num
# Look up the daily report
rtable = s3db.disease_testing_report
query = (rtable.site_id == site_id) & \
(rtable.disease_id == disease_id) & \
(rtable.date == result_date) & \
(rtable.deleted == False)
report = db(query).select(rtable.id,
rtable.tests_total,
rtable.tests_positive,
limitby = (0, 1),
).first()
if report:
# Update report if actual numbers are greater
if report.tests_total < total or report.tests_positive < positive:
report.update_record(tests_total = total,
tests_positive = positive,
)
else:
# Create report
report = {"site_id": site_id,
"disease_id": disease_id,
"date": result_date,
"tests_total": total,
"tests_positive": positive,
}
report_id = rtable.insert(**report)
if report_id:
current.auth.s3_set_record_owner(rtable, report_id)
report["id"] = report_id
s3db.onaccept(rtable, report, method="create")
| 24,129
|
def DateTime_GetCurrentYear(*args, **kwargs):
"""DateTime_GetCurrentYear(int cal=Gregorian) -> int"""
return _misc_.DateTime_GetCurrentYear(*args, **kwargs)
| 24,130
|
def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):
"""Decay learning rate by a f# model_out_path ="./model/W_epoch_{}.pth".format(epoch)
# torch.save(model_W, model_out_path) actor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.8**(epoch // lr_decay_epoch))
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
| 24,131
|
def wait(duration):
"""
Waits the duration, in seconds, you specify.
Args:
duration (:any:`DoubleValue`): time, in seconds, this function waits. You may specify fractions of seconds.
Returns:
float: actual seconds waited.
This wait is non-blocking, so other tasks will run while this wait executes.
"""
init_time = DoubleValue(0)
init_time.value = seqtime()
while seqtime() - init_time.value < duration.value:
nivs_yield()
init_time.value = seqtime()
return init_time.value
| 24,132
|
def test_cp_self_2(test_dir: str) -> None:
"""cp a_file -> . should fail (same file)"""
a_file = os.path.join(test_dir, "a_file")
sys.argv = ["pycp", a_file, test_dir]
with pytest.raises(SystemExit):
pycp_main()
| 24,133
|
def yesterday_handler(update: Update, context: CallbackContext):
"""
Diary content upload handler. Uploads incoming messages to db as a note for yesterday.
"""
# get user timezone
user_timezone = Dao.get_user_timezone(update.effective_user)
# calculate time at user's
user_datetime = update.effective_message.date.astimezone(user_timezone)
# get yesterday
user_yesterday = user_datetime - datetime.timedelta(days=1)
# save message content
save_message_content_by_date(update, context, user_yesterday)
return ConversationHandler.END
| 24,134
|
def get_lens_pos(sequence):
"""
Calculate positions of lenses.
Returns
-------
List of tuples with index and position of OPE in sequence.
"""
d = 0.0
d_ = []
for idx, ope in enumerate(sequence):
if ope.is_lens():
d_.append((idx, d))
else:
d += ope.get_travel_length()
return d_
| 24,135
|
def test_product_with_rvs1():
"""
Test product_distribution() with an rvs specification.
"""
d = dit.example_dists.Xor()
d_iid = dit.product_distribution(d, [[0,1], [2]])
d_truth = dit.uniform_distribution(3, ['01'])
d_truth = dit.modify_outcomes(d_truth, lambda x: ''.join(x))
assert d_truth.is_approx_equal(d_iid)
| 24,136
|
def disorientation(orientation_matrix, orientation_matrix1, crystal_structure=None):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not
necessarily the case in multi-phase materials.
:param orientation: an instance of
:py:class:`~pymicro.crystal.microstructure.Orientation` class
describing the other crystal orientation from which to compute the
angle.
:param crystal_structure: an instance of the `Symmetry` class
describing the crystal symmetry, triclinic (no symmetry) by
default.
:returns tuple: the misorientation angle in radians, the axis as a
numpy vector (crystal coordinates), the axis as a numpy vector
(sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (orientation_matrix, orientation_matrix1) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
mis_angle = misorientation_angle_from_delta(delta)
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = misorientation_axis_from_delta(delta)
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return the_angle, the_axis, the_axis_xyz
| 24,137
|
def response_minify(response):
"""
minify html response to decrease site traffic
"""
if not DEBUG and response.content_type == u'text/html; charset=utf-8':
response.set_data(
minify(response.get_data(as_text=True))
)
return response
return response
| 24,138
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list,1)}
features = []
for (ex_index, example) in enumerate(examples): # example : InputExample obj
text_spc_tokens = example.text_a
aspect_tokens = example.text_b
sentence_label = example.sentence_label
aspect_label = example.aspect_label
polaritiylist = example.polarity # 标记aspect和非aspect
tokens = []
labels = []
polarities = []
valid = []
label_mask = []
text_spc_tokens.extend(['[SEP]'])
text_spc_tokens.extend(aspect_tokens)
enum_tokens = text_spc_tokens # text_scp_tokens : sentence tokens + [SEP] + aspect tokens 注意并不是规范的BERT-SPC格式
sentence_label.extend(['[SEP]'])
# sentence_label.extend(['O'])
sentence_label.extend(aspect_label)
label_lists = sentence_label
# if len(enum_tokens) != len(label_lists):
# print(enum_tokens)
# print(label_lists)
for i, word in enumerate(enum_tokens): # spc tokens, 注意这里的enum_tokens并不是标准的bert spc格式, 后边会添加新的符号使之符合标准
token = tokenizer.tokenize(word) # bert tokenizer, 使用bert进行分词
tokens.extend(token)
label_1 = label_lists[i]
polarity_1 = polaritiylist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
polarities.append(polarity_1)
valid.append(1)
label_mask.append(1)
else: # 如果bert对token进一步细分,就会到这里
valid.append(0)
if len(tokens) >= max_seq_length - 1: # 为啥剔除后边2个而不是更多?
tokens = tokens[0:(max_seq_length - 2)]
polarities = polarities[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0,1)
label_mask.insert(0,1)
label_ids.append(label_map["[CLS]"])
# label_ids.append(label_map["O"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i: # 感觉这个判断是多余的
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]") # 得到标准的bert spc格式
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
# label_ids.append(label_map["O"])
input_ids_spc = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids_spc)
label_mask = [1] * len(label_ids)
# import numpy as np
while len(input_ids_spc) < max_seq_length: # pad
input_ids_spc.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
while len(polarities) < max_seq_length:
polarities.append(-1)
assert len(input_ids_spc) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# if ex_index < 5:
# print("*** Example ***")
# print("guid: %s" % (example.guid))
# print("tokens: %s" % " ".join(
# [str(x) for x in ntokens]))
# print("input_ids: %s" % " ".join([str(x) for x in input_ids_spc]))
# print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# # print("label: %s (id = %d)" % (example.label, label_ids))
#
# input_ids_spc = np.array(input_ids_spc)
# label_ids = np.array(label_ids)
# labels = np.array(labels)
# valid = np.array(valid)
features.append(
InputFeatures(input_ids_spc=input_ids_spc,
input_mask=input_mask, # spc 非pad部分的 attention mask
segment_ids=segment_ids, # 全为0, bert 的 token_type_ids
label_id=label_ids, # aspect抽取的label
polarities=polarities, # aspect 对应的情感倾向, 非aspect的标记值是-1
valid_ids=valid, #
label_mask=label_mask)) # label_mask和input_mask没区别
return features
| 24,139
|
def to_json_compatible_object(obj):
"""
This function returns a representation of a UAVCAN structure (message, request, or response), or
a DSDL entity (array or primitive), or a UAVCAN transfer, as a structure easily able to be
transformed into json or json-like serialization
Args:
obj: Object to convert.
Returns: structure which can easily be transformed into a json-like serialization
"""
if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'):
output = dict()
if hasattr(obj, 'message'):
payload = obj.message
output['transfer_type'] = 'Message'
elif hasattr(obj, 'request'):
payload = obj.request
output['transfer_type'] = 'Request'
elif hasattr(obj, 'response'):
payload = obj.response
output['transfer_type'] = 'Response'
else:
raise ValueError('Cannot generate JSON-compatible object representation for %r' % type(obj))
output['source_node_id'] = obj.transfer.source_node_id
output['dest_node_id'] = obj.transfer.dest_node_id
output['ts_monotonic'] = obj.transfer.ts_monotonic
output['ts_real'] = obj.transfer.ts_real
output['transfer_priority'] = obj.transfer.transfer_priority
output['datatype'] = '{}'.format(payload._type)
output['fields'] = _to_json_compatible_object_impl(payload)
return output
else:
return _to_json_compatible_object_impl(obj)
| 24,140
|
def rot_box_kalman_filter(initial_state, Q_std, R_std):
"""
Tracks a 2D rectangular object (e.g. a bounding box) whose state includes
position, centroid velocity, dimensions, and rotation angle.
Parameters
----------
initial_state : sequence of floats
[x, vx, y, vy, w, h, phi]
Q_std : float
Standard deviation to use for process noise covariance matrix
R_std : float
Standard deviation to use for measurement noise covariance matrix
Returns
-------
kf : filterpy.kalman.KalmanFilter instance
"""
kf = KalmanFilter(dim_x=7, dim_z=5)
dt = 1.0 # time step
# state mean and covariance
kf.x = np.array([initial_state]).T
kf.P = np.eye(kf.dim_x) * 500.
# no control inputs
kf.u = 0.
# state transition matrix
kf.F = np.eye(kf.dim_x)
kf.F[0, 1] = kf.F[2, 3] = dt
# measurement matrix - maps from state space to observation space, so
# shape is dim_z x dim_x.
kf.H = np.zeros([kf.dim_z, kf.dim_x])
# z = Hx. H has nonzero coefficients for the following components of kf.x:
# x y w h phi
kf.H[0, 0] = kf.H[1, 2] = kf.H[2, 4] = kf.H[3, 5] = kf.H[4, 6] = 1.0
# measurement noise covariance
kf.R = np.eye(kf.dim_z) * R_std**2
# process noise covariance for x-vx or y-vy pairs
q = Q_discrete_white_noise(dim=2, dt=dt, var=Q_std**2)
# diagonal process noise sub-matrix for width, height, and phi
qq = Q_std**2*np.eye(3)
# process noise covariance matrix for full state
kf.Q = block_diag(q, q, qq)
return kf
| 24,141
|
def validate_schema(path, schema_type):
"""Validate a single file against its schema"""
if schema_type not in _VALID_SCHEMA_TYPES.keys():
raise ValueError(f"No validation schema found for '{schema_type}'")
return globals()["validate_" + schema_type](path)
| 24,142
|
def setup_test_env():
"""Sets up App Engine test environment."""
sys.path.insert(0, APP_DIR)
from test_support import test_env
test_env.setup_test_env()
sys.path.insert(0, THIRD_PARTY)
from components import utils
utils.fix_protobuf_package()
| 24,143
|
def import_vote_internal(vote, principal, file, mimetype):
""" Tries to import the given csv, xls or xlsx file.
This is the format used by onegov.ballot.Vote.export().
This function is typically called automatically every few minutes during
an election day - we use bulk inserts to speed up the import.
:return:
A list containing errors.
"""
csv, error = load_csv(
file, mimetype, expected_headers=INTERNAL_VOTE_HEADERS, dialect='excel'
)
if error:
return [error]
ballot_results = {}
errors = []
added_entity_ids = {}
ballot_types = set()
status = 'unknown'
entities = principal.entities[vote.date.year]
for line in csv.lines:
line_errors = []
status = line.status or 'unknown'
if status not in STATI:
line_errors.append(_("Invalid status"))
ballot_type = line.type
if ballot_type not in BALLOT_TYPES:
line_errors.append(_("Invalid ballot type"))
ballot_types.add(ballot_type)
added_entity_ids.setdefault(ballot_type, set())
ballot_results.setdefault(ballot_type, [])
# the id of the entity
entity_id = None
try:
entity_id = validate_integer(line, 'entity_id')
except ValueError as e:
line_errors.append(e.args[0])
else:
if entity_id not in entities and entity_id in EXPATS:
entity_id = 0
if entity_id in added_entity_ids[ballot_type]:
line_errors.append(
_("${name} was found twice", mapping={
'name': entity_id
}))
if entity_id and entity_id not in entities:
line_errors.append(
_("${name} is unknown", mapping={
'name': entity_id
}))
else:
added_entity_ids[ballot_type].add(entity_id)
# Skip expats if not enabled
if entity_id == 0 and not vote.expats:
continue
# Counted
counted = line.counted.strip().lower() == 'true'
# the yeas
try:
yeas = validate_integer(line, 'yeas')
except ValueError as e:
line_errors.append(e.args[0])
# the nays
try:
nays = validate_integer(line, 'nays')
except ValueError as e:
line_errors.append(e.args[0])
# the eligible voters
try:
eligible_voters = validate_integer(line, 'eligible_voters')
except ValueError as e:
line_errors.append(e.args[0])
# the empty votes
try:
empty = validate_integer(line, 'empty')
except ValueError as e:
line_errors.append(e.args[0])
# the invalid votes
try:
invalid = validate_integer(line, 'invalid')
except ValueError as e:
line_errors.append(e.args[0])
# now let's do some sanity checks
try:
if not eligible_voters:
line_errors.append(_("No eligible voters"))
if (yeas + nays + empty + invalid) > eligible_voters:
line_errors.append(_("More cast votes than eligible voters"))
except UnboundLocalError:
pass
# pass the errors
if line_errors:
errors.extend(
FileImportError(error=err, line=line.rownumber)
for err in line_errors
)
continue
# all went well (only keep doing this as long as there are no errors)
if not errors:
entity = entities.get(entity_id, {})
ballot_results[ballot_type].append(
dict(
name=entity.get('name', ''),
district=entity.get('district', ''),
counted=counted,
yeas=yeas,
nays=nays,
eligible_voters=eligible_voters,
entity_id=entity_id,
empty=empty,
invalid=invalid
)
)
if errors:
return errors
if not any((len(results) for results in ballot_results.values())):
return [FileImportError(_("No data found"))]
# Add the missing entities
for ballot_type in ballot_types:
remaining = set(entities.keys())
if vote.expats:
remaining.add(0)
remaining -= added_entity_ids[ballot_type]
for entity_id in remaining:
entity = entities.get(entity_id, {})
ballot_results[ballot_type].append(
dict(
name=entity.get('name', ''),
district=entity.get('district', ''),
counted=False,
entity_id=entity_id
)
)
# Add the results to the DB
vote.clear_results()
vote.status = status
ballot_ids = {b: vote.ballot(b, create=True).id for b in ballot_types}
session = object_session(vote)
session.flush()
session.bulk_insert_mappings(
BallotResult,
(
dict(**result, ballot_id=ballot_ids[ballot_type])
for ballot_type in ballot_types
for result in ballot_results[ballot_type]
)
)
return []
| 24,144
|
def get_mail_count(imap, mailbox_list):
""" Gets the total number of emails on specified account.
Args:
imap <imaplib.IMAP4_SSL>: the account to check
mailbox_list [<str>]: a list of mailboxes
Must be surrounded by double quotes
Returns:
<int>: total emails
"""
total = 0
num_mailboxes = len(mailbox_list)
for idx, mailbox in enumerate(mailbox_list):
print("Counting mail: %d (Mailbox %d of %d) " \
% (total, idx+1, num_mailboxes), end='\r')
total += int(imap.select(mailbox)[1][0])
imap.close()
print("Counting mail: %d (Mailbox %d of %d) " \
% (total, idx+1, num_mailboxes))
return total
| 24,145
|
def get_bdb_path_by_shoulder_model(shoulder_model, root_path=None):
"""Get the path to a BerkeleyDB minter file in a minter directory hierarchy.
The path may or may not exist. The caller may be obtaining the path in which to
create a new minter, so the path is not checked.
Args:
shoulder_model (Shoulder): The Django ORM model for the shoulder to use for
the minting. The model may be a legacy record for N2T based minting, or
a record from a minter created in EZID.
root_path (str, optional):
Path to the root of the minter directory hierarchy. If not provided, the
default for EZID is used.
Returns:
pathlib2.Path
"""
m = shoulder_model
minter_uri = m.minter.strip()
if not minter_uri:
raise nog.exc.MinterNotSpecified(
'A minter has not been specified (minter field in the database is empty)'
)
return pathlib2.Path(
_get_bdb_root(root_path), '/'.join(minter_uri.split('/')[-2:]), 'nog.bdb',
).resolve()
| 24,146
|
def getDefuzzificationMethod(name):
"""Get an instance of a defuzzification method with given name.
Normally looks into the fuzzy.defuzzify package for a suitable class.
"""
m = __import__("fuzzy.defuzzify."+name, fromlist=[name])
c = m.__dict__[name]
return c()
| 24,147
|
def celestial(func):
"""
Transform a point x from cartesian coordinates to celestial coordinates and returns the function evaluated at the probit point y
"""
def f_transf(ref, x, *args, **kwargs):
y = cartesian_to_celestial(x)
return func(ref, y, *args)
return f_transf
| 24,148
|
def _rescale_to_width(
img: Image,
target_width: int):
"""Helper function to rescale image to `target_width`.
Parameters
----------
img : PIL.Image
Input image object to be rescaled.
target_width : int
Target width (in pixels) for rescaling.
Returns
-------
PIL.Image
Rescaled image object
"""
w, h = img.size
rescaled_img = img.resize(_scale_wh_by_target_width(w, h, target_width))
return rescaled_img
| 24,149
|
def make_dpi_aware():
"""
https://github.com/PySimpleGUI/PySimpleGUI/issues/1179
"""
if int(platform.release()) >= 8:
ctypes.windll.shcore.SetProcessDpiAwareness(True)
| 24,150
|
def compute_exact_R_P(final_patterns, centones_tab):
"""
Function tha computes Recall and Precision with exact matches
"""
true = 0
for tab in final_patterns:
for centon in centones_tab[tab]:
check = False
for p in final_patterns[tab]:
if centon == p:
check = True
if check:
true += 1
all_centones = len([x for y in centones_tab.values() for x in y])
all_ours = len([x for y in final_patterns.values() for x in y])
overall_recall = true / all_centones
overall_precision = true / all_ours
return overall_recall, overall_precision
| 24,151
|
def build_embedding(embedding_matrix, max_len, name):
"""
Build embedding by lda
:param max_len:
:param name:
:return:
"""
# build embedding with initial weights
topic_emmd = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_len,
trainable=True,
name=name)
return topic_emmd
| 24,152
|
def check_playlist_url(playlist_url):
"""Check if a playlist URL is well-formated.
Parameters
----------
playlist_url : str
URL to a YouTube playlist.
Returns
-------
str
If the URL is well-formated, return the playlist ID. Else return `None`.
"""
match = re.match(
r"https?://www\.youtube\.com/playlist\?list=(.+)",
playlist_url.strip()
)
if match is None:
raise ValueError("Incorrect URL: %s" % playlist_url)
return match.group(1)
| 24,153
|
def test_PipeJsonRpcSendAsync_2(method, params, result, notification):
"""
Test of basic functionality. Here we don't test for timeout case (it raises an exception).
"""
value_nonlocal = None
def method_handler1():
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return 5
def method_handler2(value=2):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 10
def method_handler3(*, value=3):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
class SomeClass:
def method_handler4(self, *, value=4):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
some_class = SomeClass()
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1) # No name is specified, default name is "method_handler1"
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.add_method(method_handler3, "method3")
pc.add_method(some_class.method_handler4, "method4")
pc.start()
async def send_messages():
nonlocal value_nonlocal
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
for n in range(3):
value_nonlocal = None
response = await p_send.send_msg(method, params, notification=notification)
if not notification:
assert response == result, f"Result does not match the expected: {response}"
assert value_nonlocal == "function_was_called", "Non-local variable has incorrect value"
elif response is not None:
assert False, "Response was received for notification."
p_send.stop()
asyncio.run(send_messages())
pc.stop()
| 24,154
|
def browse_runs(driver: Driver = None, key: Dict[str, str] = {}):
""" Main function that governs the browsing of projects within a specified
Synergos network
"""
st.title("Orchestrator - Browse Existing Run(s)")
########################
# Step 0: Introduction #
########################
############################################################
# Step 1: Pull run information from specified orchestrator #
############################################################
st.header("Step 1: Select your run of interest")
selected_run_id, _ = render_runs(driver=driver, **key, form_type="display")
########################################################################
# Step 2: Pull associations & relationships of specified collaboration #
########################################################################
# st.header("Step 2: Explore Relationships & Associations")
| 24,155
|
def pynx(name, author, cwd):
"""
Function that holds the logic for the 'pynx' command.
:param name: Name of the project
:param author: Name of the author
:param cwd: Current working directory
"""
folder_name, folder_path = generate_folder_name_and_path(name, cwd)
check_and_create_directory(folder_path)
filebuilder.pynx.create_pynx_project(folder_path, name, author)
filebuilder.generic.modify_readme_file(folder_path, name, author)
click.echo("Successfully created the PyNX project!")
| 24,156
|
def _calculate_accuracy(actual: np.ndarray, predictions: np.ndarray) -> None:
"""Calculates the accuracy of predictions.
"""
accuracy = sklearn.metrics.accuracy_score(actual, predictions)
print(f' Predicted test labels: {predictions}')
print(f' Actual test labels: {actual}')
print(f' Accuracy: {accuracy * 100:.2f}%')
| 24,157
|
def f(t, T):
"""
returns -1, 0, or 1 based on relationship between t and T
throws IndexError
"""
if(t > 0 and t < float(T/2)):
return 1
elif(t == float(T/2)):
return 0
elif(t > float(T/2) and t < T):
return -1
raise IndexError("Out of function domain")
| 24,158
|
def auth0_token():
"""
Token for Auth0 API
"""
auth = settings["auth0"]
conn = http.client.HTTPSConnection(auth['domain'])
payload = '{' + f"\"client_id\":\"{auth['client']}\"," \
f"\"client_secret\":\"{auth['client-secret']}\"," \
f"\"audience\":\"https://{auth['domain']}/api/v2/\",\"grant_type\":\"client_credentials\"" + '}'
headers = {'content-type': "application/json"}
conn.request("POST", "/oauth/token", payload, headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))["access_token"]
| 24,159
|
def get_ami(region, instance_type):
"""Returns the appropriate AMI to use for a given region + instance type
HVM is always used except for instance types which cannot use it. Based
on matrix here:
http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
.. note::
:func:`populate_ami_ids` must be called first to populate the available
AMI's.
"""
if not _POPULATED:
raise KeyError('populate_ami_ids must be called first')
instances = AWS_AMI_IDS[region]
inst_type = "hvm"
if instance_type[:2] in ["m1", "m2", "c1", "t1"]:
inst_type = "paravirtual"
if inst_type not in instances:
msg = "Could not find instance type %r in %s for region %s"
raise KeyError(msg % (inst_type, list(instances.keys()), region))
return instances[inst_type].id
| 24,160
|
def convert_config(cfg):
""" Convert some configuration values to different values
Args:
cfg (dict): dict of sub-dicts, each sub-dict containing configuration
keys and values pertinent to a process or algorithm
Returns:
dict: configuration dict with some items converted to different objects
Raises:
KeyError: raise KeyError if configuration file is not specified
correctly
"""
# Parse dataset:
cfg = _parse_dataset_config(cfg)
# Parse YATSM:
cfg = _parse_YATSM_config(cfg)
return cfg
| 24,161
|
def _signals_exist(names):
""" Return true if all of the given signals exist in this version of flask.
"""
return all(getattr(signals, n, False) for n in names)
| 24,162
|
def login():
"""Route for logging the user in."""
try:
if request.method == 'POST':
return do_the_login()
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('login.html')
except Exception as e:
abort(500, {'message': str(e)})
| 24,163
|
def clean_database():
"""
If an instance is removed from the Orthanc server that contains de-identified DICOM images, it
must also be removed from the database. This function compared the list of instance IDs in the
database with those in Orthanc.
"""
try:
logger.debug('Cleaning the database')
db = DB(env.pg_host, env.pg_port, env.pg_user, env.pg_pwd, env.pg_db)
db_dicom_json = DBDicomJson(db)
ids_in_db = db_dicom_json.list_instance_ids()
ids_in_orthanc = client.orthanc.list_instance_ids()
ids_to_delete = [i for i in ids_in_db if not i in ids_in_orthanc]
for instance_id in ids_to_delete:
db_dicom_json.delete_instance(instance_id)
except Exception as e:
logger.error(f'Failed to clean database - {e}')
| 24,164
|
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
| 24,165
|
def randomsplit_permuted_non_fixed_rotated_15():
"""
Launch training and evaluation with a pretrained base model from rxnfp.
"""
launch_training_on_all_splits(experiment='repro_randomsplit', splits=RANDOM_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659, n_permutations=15, random_type='rotated', fixed_perm=False, epochs=10)
| 24,166
|
def __screen_info_to_dict():
"""
筛查
:return:
"""
screen_dict: {str: List[GitLogObject]} = {}
for info in git.get_all_commit_info():
if not authors.__contains__(info.name):
continue
if not info.check_today_time():
continue
if screen_dict.__contains__(info.name):
screen_dict.get(info.name).append(info.msg)
else:
screen_dict[info.name] = [info.msg]
pass
pass
return screen_dict
| 24,167
|
def parse_flextext(file_name, log=None):
"""Iterate over glossed examples contained in a flextext file."""
gloss_db = ET.parse(file_name)
for example in separate_examples(gloss_db.getroot(), log):
example['example'] = merge_glosses(
[extract_gloss(e, log) for e in example['example']])
yield example
| 24,168
|
def _collapse_edge_by_namespace(
graph: BELGraph,
victim_namespaces: Strings,
survivor_namespaces: str,
relations: Strings,
) -> None:
"""Collapse pairs of nodes with the given namespaces that have the given relationship.
:param graph: A BEL Graph
:param victim_namespaces: The namespace(s) of the node to collapse
:param survivor_namespaces: The namespace of the node to keep
:param relations: The relation(s) to search
"""
relation_filter = build_relation_predicate(relations)
source_namespace_filter = build_source_namespace_filter(victim_namespaces)
target_namespace_filter = build_target_namespace_filter(survivor_namespaces)
edge_predicates = [
relation_filter,
source_namespace_filter,
target_namespace_filter
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates)
| 24,169
|
def bind_prop_arr(
prop_name: str,
elem_type: Type[Variable],
doc: Optional[str] = None,
doc_add_type=True,
) -> property:
"""Convenience wrapper around bind_prop for array properties
:meta private:
"""
if doc is None:
doc = f"Wrapper around `variables['{prop_name}']` of type `VariableArray[{elem_type.__name__}]`."
if doc_add_type:
doc = f"MutableSequence[{_get_python_prop_type(elem_type)}]: {doc}"
return bind_prop(
prop_name,
VariableArray,
lambda: VariableArray(elem_type),
doc=doc,
doc_add_type=False,
objtype=True,
)
| 24,170
|
def generate_profile_yaml_file(destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
profiles_config["credentials_json"] = json.dumps(profiles_config)
profiles_config["dataset_id"] = target_schema
else:
profiles_config["schema"] = target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config
| 24,171
|
def plot_altitude(target_list={}, observatory=None, utc_offset=0, obs_time=None, show_sun=True, show_moon=True):
"""plot the position of science target during observation
"""
# define the observation time
delta_hours = np.linspace(-12, 12, 100)*u.hour
obstimes = obs_time + delta_hours - utc_offset
# observertory
altaz_frames = AltAz(location=observatory, obstime=obstimes)
target_altaz_list = {}
for name, skcoord in target_list.items():
target_altaz_list[name] = skcoord.transform_to(altaz_frames)
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(121)
for name, pos in target_altaz_list.items():
ax.scatter(delta_hours, pos.alt, label=name, s=8)
ax.set_title("Observed on {}".format(obs_time.fits))
# get position of sun and moon
sun = get_sun(obstimes).transform_to(altaz_frames)
if show_sun:
ax.plot(delta_hours, sun.alt, 'r', label='sun')
if show_moon:
moon = get_moon(obstimes).transform_to(altaz_frames)
ax.plot(delta_hours, moon.alt, 'k--', label='moon')
ax.fill_between(delta_hours.to('hr').value, -90, 90, sun.alt < -0*u.deg,
color='0.5', zorder=0, alpha=0.5)
ax.fill_between(delta_hours.to('hr').value, -90, 90, sun.alt < -18*u.deg,
color='k', zorder=0, alpha=0.5)
ax.set_xlabel('LST offset')
ax.set_ylabel('Altitude')
# ax.set_ylim(-10, 90)
ax.legend(loc='upper right')
# plt.tight_layout()
ax = fig.add_subplot(122, projection='polar')
for name, pos in target_altaz_list.items():
ax.plot(pos.az/180*np.pi, np.cos(pos.alt), label=name, marker='.', ms=8)
if show_sun:
ax.plot(sun.az/180*np.pi, np.cos(sun.alt), 'r.', label='sun')
if show_moon:
moon = get_moon(obstimes).transform_to(altaz_frames)
ax.plot(moon.az/180*np.pi, np.cos(moon.alt), 'k--', label='moon')
ax.set_ylim(0, 1)
plt.show()
| 24,172
|
def get_foreign_trips(db_connection):
"""
Gets the time series data for all Foreign visitors from the database
Args:
db_connection (Psycopg.connection): The database connection
Returns:
Pandas.DataFrame: The time series data for each unique Foreign visitor.
It has the columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
counts = get_daily_call_counts(db_connection,
'optourism.foreigners_timeseries_daily')
return get_trips(counts)
| 24,173
|
def get_diagonal_sums(square: Square) -> List[int]:
""" Returns a list of the sum of each diagonal. """
topleft = 0
bottomleft = 0
# Seems like this could be more compact
i = 0
for row in square.rows:
topleft += row[i]
i += 1
i = 0
for col in square.columns:
bottomleft += col[i]
i += 1
return [topleft, bottomleft]
| 24,174
|
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
| 24,175
|
def int_to_base64(i: int) -> str:
""" Returns a 12 char length representation of i in base64 """
return base64.b64encode(i.to_bytes(8, 'big'))
| 24,176
|
def payment_provider(provider_base_config):
"""When it doesn't matter if request is contained within provider the fixture can still be used"""
return TurkuPaymentProviderV3(config=provider_base_config)
| 24,177
|
def gradle_extract_data(build_gradle):
"""
Extract the project name and dependencies from a build.gradle file.
:param Path build_gradle: The path of the build.gradle file
:rtype: dict
"""
# Content for dependencies
content_build_gradle = extract_content(build_gradle)
match = re.search(r'apply plugin: ("|\')org.ros2.tools.gradle\1', content_build_gradle)
if not match:
raise RuntimeError("Gradle plugin missing, please add the following to build.gradle: \"apply plugin: 'org.ros2.tools.gradle'\"")
return extract_data(build_gradle)
| 24,178
|
def start_services(server_argv, portal_argv, doexit=False):
"""
This calls a threaded loop that launches the Portal and Server
and then restarts them when they finish.
"""
global SERVER, PORTAL
processes = Queue.Queue()
def server_waiter(queue):
try:
rc = Popen(server_argv, env=getenv()).wait()
except Exception as e:
print(PROCESS_ERROR.format(component="Server", traceback=e))
return
# this signals the controller that the program finished
queue.put(("server_stopped", rc))
def portal_waiter(queue):
try:
rc = Popen(portal_argv, env=getenv()).wait()
except Exception as e:
print(PROCESS_ERROR.format(component="Portal", traceback=e))
return
# this signals the controller that the program finished
queue.put(("portal_stopped", rc))
if portal_argv:
try:
if not doexit and get_restart_mode(PORTAL_RESTART) == "True":
# start portal as interactive, reloadable thread
PORTAL = thread.start_new_thread(portal_waiter, (processes, ))
else:
# normal operation: start portal as a daemon;
# we don't care to monitor it for restart
PORTAL = Popen(portal_argv, env=getenv())
except IOError as e:
print(PROCESS_IOERROR.format(component="Portal", traceback=e))
return
try:
if server_argv:
if doexit:
SERVER = Popen(server_argv, env=getenv())
else:
# start server as a reloadable thread
SERVER = thread.start_new_thread(server_waiter, (processes, ))
except IOError as e:
print(PROCESS_IOERROR.format(component="Server", traceback=e))
return
if doexit:
# Exit immediately
return
# Reload loop
while True:
# this blocks until something is actually returned.
from twisted.internet.error import ReactorNotRunning
try:
try:
message, rc = processes.get()
except KeyboardInterrupt:
# this only matters in interactive mode
break
# restart only if process stopped cleanly
if (message == "server_stopped" and int(rc) == 0 and
get_restart_mode(SERVER_RESTART) in ("True", "reload", "reset")):
print(PROCESS_RESTART.format(component="Server"))
SERVER = thread.start_new_thread(server_waiter, (processes, ))
continue
# normally the portal is not reloaded since it's run as a daemon.
if (message == "portal_stopped" and int(rc) == 0 and
get_restart_mode(PORTAL_RESTART) == "True"):
print(PROCESS_RESTART.format(component="Portal"))
PORTAL = thread.start_new_thread(portal_waiter, (processes, ))
continue
break
except ReactorNotRunning:
break
| 24,179
|
def stack_batch_img(
img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0
) -> torch.Tensor:
"""
Args
:param img_tensors (Sequence[torch.Tensor]):
:param divisible (int):
:param pad_value (float): value to pad
:return: torch.Tensor.
"""
assert len(img_tensors) > 0
assert isinstance(img_tensors, (tuple, list))
assert divisible >= 0
img_heights = []
img_widths = []
for img in img_tensors:
assert img.shape[:-2] == img_tensors[0].shape[:-2]
img_heights.append(img.shape[-2])
img_widths.append(img.shape[-1])
max_h, max_w = max(img_heights), max(img_widths)
if divisible > 0:
max_h = (max_h + divisible - 1) // divisible * divisible
max_w = (max_w + divisible - 1) // divisible * divisible
batch_imgs = []
for img in img_tensors:
padding_size = [0, max_w - img.shape[-1], 0, max_h - img.shape[-2]]
batch_imgs.append(F.pad(img, padding_size, value=pad_value))
return torch.stack(batch_imgs, dim=0).contiguous()
| 24,180
|
def gen_accessor_declarations(out):
"""
Generate the declaration of each version independent accessor
@param out The file to which to write the decs
"""
out.write("""
/****************************************************************
*
* Unified, per-member accessor function declarations
*
****************************************************************/
""")
for cls in of_g.standard_class_order:
if type_maps.class_is_virtual(cls) and not loxi_utils.class_is_list(cls):
continue
out.write("\n/* Unified accessor functions for %s */\n" % cls)
for m_name in of_g.ordered_members[cls]:
if m_name in of_g.skip_members:
continue
m_type = loxi_utils.member_base_type(cls, m_name)
base_name = "%s_%s" % (cls, m_name)
gparams = ",\n ".join(param_list(cls, m_name, "get"))
get_ret_type = accessor_return_type("get", m_type)
sparams = ",\n ".join(param_list(cls, m_name, "set"))
set_ret_type = accessor_return_type("set", m_type)
bparams = ",\n ".join(param_list(cls, m_name, "bind"))
bind_ret_type = accessor_return_type("bind", m_type)
if loxi_utils.type_is_of_object(m_type):
# Generate bind accessors, but not get accessor
out.write("""
extern %(set_ret_type)s %(base_name)s_set(
%(sparams)s);
extern %(bind_ret_type)s %(base_name)s_bind(
%(bparams)s);
extern %(m_type)s *%(cls)s_%(m_name)s_get(
%(cls)s_t *obj);
""" % dict(base_name=base_name, sparams=sparams, bparams=bparams,
m_name=m_name, m_type=m_type, cls=cls,
set_ret_type=set_ret_type, bind_ret_type=bind_ret_type))
else:
out.write("""
extern %(set_ret_type)s %(base_name)s_set(
%(sparams)s);
extern %(get_ret_type)s %(base_name)s_get(
%(gparams)s);
""" % dict(base_name=base_name, gparams=gparams, sparams=sparams,
get_ret_type=get_ret_type, set_ret_type=set_ret_type))
if loxi_utils.class_is_list(cls):
e_type = loxi_utils.list_to_entry_type(cls)
out.write("""
extern int %(cls)s_first(
%(cls)s_t *list, of_object_t *iter);
extern int %(cls)s_next(
%(cls)s_t *list, of_object_t *iter);
extern int %(cls)s_append_bind(
%(cls)s_t *list, of_object_t *iter);
extern int %(cls)s_append(
%(cls)s_t *list, of_object_t *iter);
/**
* Iteration macro for list of type %(cls)s
* @param list Pointer to the list being iterated over of
* type %(cls)s
* @param elt Pointer to an element of type %(e_type)s
* @param rv On exiting the loop will have the value OF_ERROR_RANGE.
*/
#define %(u_cls)s_ITER(list, elt, rv) \\
for ((rv) = %(cls)s_first((list), (elt)); \\
(rv) == OF_ERROR_NONE; \\
(rv) = %(cls)s_next((list), (elt)))
""" % dict(u_cls=cls.upper(), cls=cls, e_type=e_type))
| 24,181
|
def convert_rel_traj_to_abs_traj(traj):
""" Converts a relative pose trajectory to an absolute-pose trajectory.
The incoming trajectory is processed elemente-wise. Poses at each
timestamp are appended to the absolute pose from the previous timestamp.
Args:
traj: A PoseTrajectory3D object with timestamps as indices containing, at a minimum,
columns representing the xyz position and wxyz quaternion-rotation at each
timestamp, corresponding to the pose between previous and current timestamps.
Returns:
A PoseTrajectory3D object with xyz position and wxyz quaternion fields for the
relative pose trajectory corresponding to the relative one given in `traj`.
"""
from evo.core import lie_algebra as lie
new_poses = [lie.se3()] # origin at identity
for i in range(0, len(traj.timestamps)):
abs_pose = np.dot(new_poses[-1], traj.poses_se3[i])
new_poses.append(abs_pose)
return trajectory.PoseTrajectory3D(timestamps=traj.timestamps[1:], poses_se3=new_poses)
| 24,182
|
def remove(handle):
"""The remove action allows users to remove a roommate."""
user_id = session['user']
roommate = model.roommate.get_roommate(user_id, handle)
# Check if roommate exists
if not roommate:
return abort(404)
if request.method == 'POST':
model.roommate.delete_roommate(roommate.id)
return redirect(url_for('roommate.overview'))
return render_template('/roommate/remove.jinja', roommate=roommate)
| 24,183
|
def delete_conversation(bot_id: str):
"""
Deletes conversation.
"""
count = get_db().delete(f'conversation.{bot_id}')
logger.info(f'{count} conversation(s) deleted for bot {bot_id}.')
| 24,184
|
def maxsubarray(list):
"""
Find a maximum subarray following this idea:
Knowing a maximum subarray of list[0..j]
find a maximum subarray of list[0..j+1] which is either
(I) the maximum subarray of list[0..j]
(II) or is a maximum subarray list[i..j+1] for some 0 <= i <= j
We can determine (II) in constant time by keeping a max
subarray ending at the current j.
This is done in the first if of the loop, where the max
subarray ending at j is max(previousSumUntilJ + array[j], array[j])
This works because if array[j] + sum so far is less than array[j]
then the sum of the subarray so far is negative (and less than array[j]
in case it is also negative) so it has a bad impact on the
subarray until J sum and we can safely discard it and start anew
from array[j]
Complexity (n = length of list)
Time complexity: O(n)
Space complexity: O(1)
"""
if len(list) == 0:
return (-1, -1, 0)
# keep the max sum of subarray ending in position j
maxSumJ = list[0]
# keep the starting index of the maxSumJ
maxSumJStart = 0
# keep the sum of the maximum subarray found so far
maxSum = list[0]
# keep the starting index of the current max subarray found
maxStart = 0
# keep the ending index of the current max subarray found
maxEnd = 0
for j in range(1, len(list)):
if maxSumJ + list[j] >= list[j]:
maxSumJ = maxSumJ + list[j]
else:
maxSumJ = list[j]
maxSumJStart = j
if maxSum < maxSumJ:
maxSum = maxSumJ
maxStart = maxSumJStart
maxEnd = j
return (maxSum, maxStart, maxEnd)
| 24,185
|
def set_packet_timeout (address, timeout):
"""
Adjusts the ACL flush timeout for the ACL connection to the specified
device. This means that all L2CAP and RFCOMM data being sent to that
device will be dropped if not acknowledged in timeout milliseconds (maximum
1280). A timeout of 0 means to never drop packets.
Since this affects all Bluetooth connections to that device, and not just
those initiated by this process or PyBluez, a call to this method requires
superuser privileges.
You must have an active connection to the specified device before invoking
this method
"""
n = round (timeout / 0.625)
write_flush_timeout (address, n)
| 24,186
|
def get_merged_threadlocal(bound_logger: BindableLogger) -> Context:
"""
Return a copy of the current thread-local context merged with the context
from *bound_logger*.
.. versionadded:: 21.2.0
"""
ctx = _get_context().copy()
ctx.update(structlog.get_context(bound_logger))
return ctx
| 24,187
|
def sort_cipher_suites(cipher_suites, ordering):
"""Sorts the given list of CipherSuite instances in a specific order."""
if ordering == 'asc':
return cipher_suites.order_by('name')
elif ordering == 'desc':
return cipher_suites.order_by('-name')
else:
return cipher_suites
| 24,188
|
def make_1D_distributions(lims, n_points, all_shifts, all_errs, norm=None, max_shifts=None, seed=None):
"""
Generate 1D distributions of chemical shifts from arrays of shifts and errors of each distribution
Inputs: - lims Limits of the distributions
- n_points Number of points in the distributions
- all_shifts Array of shifts for each distribution
- all_errs Array of predicted error for each distribution
- norm Distribution normalization to apply
None: no normalization
"max": top of each distribution set to 1
- max_shifts Maximum number of shifts to consider when constructing the distribution
- seed Seed for the random selection of shifts
Outputs: - x Array of shielding values to plot the distributions against
- ys List of distributions
"""
# Construct the array of shielding values
x = np.linspace(lims[0], lims[1], n_points)
# Generate the distributions
ys = []
for i, (sh, er) in enumerate(zip(all_shifts, all_errs)):
print(" Constructing distribution {}/{}...".format(i+1, len(all_shifts)))
ys.append(make_1D_distribution(x, sh, er, norm=norm, max_shifts=max_shifts, seed=seed))
print(" Distribution constructed!\n")
return x, ys
| 24,189
|
def fill_column_values(df, icol=0):
"""
Fills empty values in the targeted column with the value above it.
Parameters
----------
df: pandas.DataFrame
icol: int
Returns
-------
pandas.DataFrame
"""
v = df.iloc[:,icol].fillna('').values.tolist()
vnew = fill_gaps(v)
dfnew = df.copy() # type: pd.DataFrame
dfnew.iloc[:,icol] = vnew
return dfnew
| 24,190
|
def reward(static, tour_indices):
"""
Euclidean distance between all cities / nodes given by tour_indices
"""
# Convert the indices back into a tour
idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1)
tour = torch.gather(static.data, 2, idx).permute(0, 2, 1)
# Ensure we're always returning to the depot - note the extra concat
# won't add any extra loss, as the euclidean distance between consecutive
# points is 0
start = static.data[:, :, 0].unsqueeze(1)
y = torch.cat((start, tour, start), dim=1)
# Euclidean distance between each consecutive point
tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2))
return tour_len.sum(1)
| 24,191
|
def test_sls_networks_update(cli_runner, rest_mock):
""" Test `cray sls networks update` with various params """
runner, cli, opts = cli_runner
network_name = 'foobar'
url_template = f'/apis/sls/v1/networks/{network_name}'
config = opts['default']
hostname = config['hostname']
payload = f"""{{
"Name": "{network_name}",
"FullName": "barfoobar",
"IPRanges": ["192.168.1.0/24"],
"Type": "ethernet"
}}"""
fd, path = tempfile.mkstemp(prefix='test_sls_networks_create', suffix='.json')
try:
with os.fdopen(fd, 'w') as f:
f.write(payload)
result = runner.invoke(cli, ['sls', 'networks', 'update', path, network_name])
print(result.output)
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'].lower() == 'put'
body = data.get('body')
assert body
assert body['Name'] == network_name
uri = data['url'].split(hostname)[-1]
assert uri == url_template
finally:
os.remove(path)
| 24,192
|
def valid_review_queue_name(request):
"""
Given a name for a queue, validates the correctness for our review system
:param request:
:return:
"""
queue = request.matchdict.get('queue')
if queue in all_queues:
request.validated['queue'] = queue
return True
else:
_tn = Translator(get_language_from_cookie(request))
add_error(request, 'Invalid queue', _tn.get(_.internalError))
return False
| 24,193
|
def is_valid_cluster_dir(path):
"""Checks whether a given path is a valid postgres cluster
Args:
pg_ctl_exe - str, path to pg_ctl executable
path - str, path to directory
Returns:
bool, whether or not a directory is a valid postgres cluster
"""
pg_controldata_exe = which('pg_controldata')
cmd = '"{pg_controldata}" "{path}"'.format(
pg_controldata=pg_controldata_exe, path=path)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
if 'No such file or directory' in err.decode('utf-8'):
return False
else:
return True
| 24,194
|
def get_best_straight(possible_straights, hand):
""" get list of indices of hands that make the strongest straight
if no one makes a straight, return empty list
:param possible_straights: ({tuple(str): int})
map tuple of connecting cards --> best straight value they make
:param hand: (set(str)) set of strings
:return: (int) top value in the straight, or 0 if no straight
"""
highest_straight_value = 0 # e.g. 14 for broadway, 5 for the wheel
hand_values = set(
ranks_to_sorted_values(
ranks=[r for r, _ in hand], aces_high=True, aces_low=True
)
)
for connecting_values, max_value in possible_straights.items():
connecting_cards = set(connecting_values) & hand_values
if len(connecting_cards) == 2:
# we've made a straight!
if max_value > highest_straight_value:
highest_straight_value = max_value
return highest_straight_value
| 24,195
|
def stats(request):
"""
Display statistics for the web site
"""
from django.shortcuts import render_to_response, RequestContext
views = list(View.objects.all().only('internal_url', 'browser'))
urls = {}
mob_vs_desk = { 'desktop': 0, 'mobile': 0 }
for view in views:
if is_mobile(view.browser):
mob_vs_desk['mobile'] += 1
else:
mob_vs_desk['desktop'] += 1
if not urls.has_key(view.internal_url):
urls[view.internal_url] = 0
urls[view.internal_url] += 1
stats = []
count = 0
for url in urls:
stats.append({'url': url, 'count': urls[url]})
count += urls[url]
stats = sorted(stats, key=lambda k: k['count'], reverse=True)
return render_to_response('admin/appview/view/display_stats.html',
RequestContext(request, { 'stats' : stats,
'total' : count,
'views': mob_vs_desk
}
)
)
| 24,196
|
def show_scatter_plot(
content_on_x, content_on_y, x_label, y_label, title, color='#1f77b4'):
"""Plots scatter plot"""
plt.figure(1)
plt.scatter(content_on_x, content_on_y, s=2, c=color)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
| 24,197
|
def build_argparser():
"""
Builds argument parser.
:return argparse.ArgumentParser
"""
banner = "%(prog)s - generate a static file representation of a PEP data repository."
additional_description = "\n..."
parser = _VersionInHelpParser(
description=banner,
epilog=additional_description)
parser.add_argument(
"-V", "--version",
action="version",
version="%(prog)s {v}".format(v=__version__))
parser.add_argument(
"-d", "--data", required=False,
default=PEPHUB_URL,
help="URL/Path to PEP storage tree.")
parser.add_argument(
"-o", "--out", required=False,
default="./out",
help="Outpath for generated PEP tree.")
parser.add_argument(
"-p", "--path", required=False,
help="Path to serve the file server at."
)
# parser for serve command
subparsers = parser.add_subparsers(
help="Functions",
dest="serve"
)
serve_parser = subparsers.add_parser("serve", help="Serve a directory using pythons built-in http library")
serve_parser.set_defaults(
func=serve_directory
)
serve_parser.add_argument(
"-f", "--files", required=False,
help="Files to serve.",
default="./out"
)
return parser
| 24,198
|
def abc19():
"""Solution to exercise C-1.19.
Demonstrate how to use Python’s list comprehension syntax to produce
the list [ a , b , c , ..., z ], but without having to type all 26 such
characters literally.
"""
a_idx = 97
return [chr(a_idx + x) for x in range(26)]
| 24,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.