content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def average_filter(values, n=3):
"""
Calculate the sliding window average for the give time series.
Mathematically, res[i] = sum_{j=i-t+1}^{i} values[j] / t, where t = min(n, i+1)
:param values: list.
a list of float numbers
:param n: int, default 3.
window size.
:return res: list.
a list of value after the average_filter process.
"""
if n >= len(values):
n = len(values)
res = np.cumsum(values, dtype=float)
res[n:] = res[n:] - res[:-n]
res[n:] = res[n:] / n
for i in range(1, n):
res[i] /= (i + 1)
return res | 50616baf5255242a0b19b345743ed088745730d3 | 3,630,000 |
def _get_int_val(val, parser):
"""Get a possibly `None` single element list as an `int` by using
the given parser on the element of the list.
"""
if val is None:
return 0
return parser.parse(val[0]) | d2e029657b3424027e83ee8e1e2be76e3abf8fda | 3,630,001 |
def read_csv_folder_into_tidy_df(csv_glob, drop_columns=[' '], sample_id_categories=None, regex_exp="[a-z]\dg\d\d?"):
"""
Input
-----
Takes glob (str) to csv folder as input. Optional sample_id_categories (e.g. list).
Function
--------
Combines into tidy dataframe.
Returns
-------
Returns tidy dataframe.
"""
df = (
dd.read_csv(csv_glob, include_path_column="sample_gut_id")
.compute()
.drop(columns=drop_columns)
)
if sample_id_categories is None:
df = df.assign(
sample_gut_id=lambda x: x["sample_gut_id"].str.findall(
regex_exp).str[-1],
sample_id=lambda x: pd.Categorical(
x["sample_gut_id"].str.split("g", expand=True)[0],
),
gut_id=lambda x: x["sample_gut_id"].str.split("g", expand=True)[1],
)
else:
df = df.assign(
sample_gut_id=lambda x: x["sample_gut_id"].str.findall(
regex_exp).str[-1],
sample_id=lambda x: pd.Categorical(
x["sample_gut_id"].str.split("g", expand=True)[
0], categories=sample_id_categories
),
gut_id=lambda x: x["sample_gut_id"].str.split("g", expand=True)[1],
)
return df | 824a2f5b0605c0472ded58e3dd1d37099237e261 | 3,630,002 |
def read_gps(gps_filename):
"""
read gps data and output arrays of coordinates, one with speeds,
one with altitudes
:param gps_filename: the gps filename
:return: 2 lists of points, one containing speed, the other altitude
"""
speed_data = [] # list for storing gps coordinates with speed
altitude_data = [] # list for storing gps coordinates with altitude
with open(gps_filename, 'r') as file:
for line in file.readlines():
try:
msg = pynmea2.parse(line)
if msg.sentence_type == 'RMC': # check speed coordinates
speed_data.append(msg)
elif msg.sentence_type == 'GGA': # check altitude coordinates
altitude_data.append(msg)
except pynmea2.ParseError as e:
# print('Parse error: {}'.format(e))
continue
return speed_data, altitude_data | 83129a91c62db1e9ac272b3d84170c76366d7506 | 3,630,003 |
def gram_schmidt(vs, normalised=True):
"""Gram-Schmidt Orthonormalisation / Orthogonisation.
Given a set of vectors, returns an orthogonal set of vectors
spanning the same subspace.
Set `normalised` to False to return a non-normalised set of vectors."""
us = []
for v in np.array(vs):
u = v - np.sum([project(x, v) for x in us], axis=0)
us.append(u)
if normalised:
return np.array([normalise(u) for u in us])
else:
return np.array(us) | 9bd67f9a412166dd0be15724677cc71ce94f4307 | 3,630,004 |
def gcd(a, b):
"""Compute greatest common divisor of a and b.
This function is used in some of the functions in PyComb module.
"""
r = a % b
while r != 0:
a = b
b = r
r = a % b
return b | 10f09e979b525dffe480ca870726459ad0420c0d | 3,630,005 |
import math
def circle_line_intersect(circle, a, b):
"""a and b are endpoints"""
assert isinstance(a, Point) and isinstance(b, Point)
c, r = circle
ab = b - a
p = a + ab * (c - a).dot(ab) / ab.dist2()
s = Point.cross(b-a, c-a)
h2 = r*r - s * s / ab.dist2()
if h2 < 0:
return ()
if h2 == 0:
return (p,)
h = ab.unit() * math.sqrt(h2)
return (p - h, p + h) | 09a9fc06aac9bed7fbf9abc8167d1a96d9d57816 | 3,630,006 |
import json
def rekognition_json_to_df(path, filter_poseNAs=False):
"""Convert AWS Rekognition output json into Pandas DataFrame.
Works for json responses written by AWS Rekognition GetFaceSearch function
(or as run in VidFaceSearch.py)
Arguments:
path -- (string) path/file
filter_poseNAs -- (bool) If True, includes only data in which a face pose was detected. If False,
return records for all unique persons detected and indexed in video.
"""
with open(path) as f:
d = json.load(f)
df = json_normalize(d['Persons'], meta='Face', record_prefix=True)
if not face_detected_bool(d):
# If no faces were detected, create blank (NaN) columns for missing keys in json source file.
df['Person.Face.BoundingBox.Top'] = np.NaN
df['Person.Face.BoundingBox.Left'] = np.NaN
df['Person.Face.Pose.Pitch'] = np.NaN
df['Person.Face.Pose.Yaw'] = np.NaN
cols_raw = [
"Timestamp",
"Person.Index",
"Person.Face.BoundingBox.Top", # BoundingBox Top and Left for position of the face in the frame
"Person.Face.BoundingBox.Left",
"Person.Face.Pose.Pitch",
"Person.Face.Pose.Yaw",
]
source = path.split("/")[-1]
video = source.split("_response_")[0]
rows_filtered = df["Person.Face.Pose.Pitch"].map(lambda x: not np.isnan(x)) # filter out frames with no faces
if filter_poseNAs:
df = df.loc[rows_filtered, cols_raw]
else:
df = df[cols_raw]
df["SourceFile"] = source
# Reorder columns
cols_reordered = [
"SourceFile",
"Timestamp",
"Person.Index",
"Person.Face.Pose.Yaw",
"Person.Face.Pose.Pitch",
"Person.Face.BoundingBox.Top",
"Person.Face.BoundingBox.Left"
]
df = df[cols_reordered]
# Rename columns
cols_renamed = [
"source_file",
"timestamp",
"person_index",
"face_yaw",
"face_pitch",
"face_box_top", # face bounding box location
"face_box_left"
]
df.columns = cols_renamed
# Order and index records by Timestamp
df.sort_values("timestamp", inplace=True)
df.index = np.arange(len(df))
df.name = video.lower()
return df | a8a608b3059862602ff251446c786ab014cfa109 | 3,630,007 |
def horizontal_flip(img, boxes, labels):
""" Function to horizontally flip the image
The gt boxes will be need to be modified accordingly
Args:
img: the original PIL Image
boxes: gt boxes tensor (num_boxes, 4)
labels: gt labels tensor (num_boxes,)
Returns:
img: the horizontally flipped PIL Image
boxes: horizontally flipped gt boxes tensor (num_boxes, 4)
labels: gt labels tensor (num_boxes,)
"""
img = img.transpose(Image.FLIP_LEFT_RIGHT)
boxes = tf.stack([
1 - boxes[:, 2],
boxes[:, 1],
1 - boxes[:, 0],
boxes[:, 3]], axis=1)
return img, boxes, labels | 11e789ad1f3f459a2a4cd5ad586560d15ff8b7ce | 3,630,008 |
def is_micropython_usb_device(port):
"""Checks a USB device to see if it looks like a MicroPython device.
"""
if type(port).__name__ == 'Device':
# Assume its a pyudev.device.Device
if ('ID_BUS' not in port or port['ID_BUS'] != 'usb' or
'SUBSYSTEM' not in port or port['SUBSYSTEM'] != 'tty'):
return False
usb_id = 'usb vid:pid={}:{}'.format(port['ID_VENDOR_ID'], port['ID_MODEL_ID'])
else:
# Assume its a port from serial.tools.list_ports.comports()
usb_id = port[2].lower()
# We don't check the last digit of the PID since there are 3 possible
# values.
if usb_id.startswith('usb vid:pid=f055:980'):
return True
# Check Raspberry Pi Pico
if usb_id.startswith('usb vid:pid=2e8a:0005'):
global USB_BUFFER_SIZE
USB_BUFFER_SIZE = RPI_PICO_USB_BUFFER_SIZE
return True
# Check for Teensy VID:PID
if usb_id.startswith('usb vid:pid=16c0:0483'):
return True
# Check for LEGO Technic Large Hub
if usb_id.startswith('usb vid:pid=0694:0010'):
return True
return False | 4b7e00abab0c927b982a5f326d4cb27f307a1eae | 3,630,009 |
def gauss_paramters():
"""
Generate a random set of Gaussian parameters.
Parameters
----------
None
Returns
-------
comps: int
Number of components
amp: float
Amplitude of the core component
x: array
x positions of components
y: array
y positions of components
sig_x:
standard deviation in x
sig_y:
standard deviation in y
rot: int
rotation in degree
sides: int
0 for one-sided and 1 for two-sided jets
"""
# random number of components between 4 and 9
comps = np.random.randint(4, 7) # decrease for smaller images
# start amplitude between 10 and 1e-3
amp_start = (np.random.randint(0, 100) * np.random.random()) / 10
# if start amp is 0, draw a new number
while amp_start == 0:
amp_start = (np.random.randint(0, 100) * np.random.random()) / 10
# logarithmic decrease to outer components
amp = np.array([amp_start / np.exp(i) for i in range(comps)])
# linear distance bestween the components
x = np.arange(0, comps) * 5
y = np.zeros(comps)
# extension of components
# random start value between 1 - 0.375 and 1 - 0
# linear distance between components
# distances scaled by factor between 0.25 and 0.5
# randomnized for each sigma
off1 = (np.random.random() + 0.5) / 4
off2 = (np.random.random() + 0.5) / 4
fac1 = (np.random.random() + 1) / 4
fac2 = (np.random.random() + 1) / 4
sig_x = (np.arange(1, comps + 1) - off1) * fac1
sig_y = (np.arange(1, comps + 1) - off2) * fac2
# jet rotation
rot = np.random.randint(0, 360)
# jet one- or two-sided
sides = np.random.randint(0, 2)
return comps, amp, x, y, sig_x, sig_y, rot, sides | a921426f6b56303ff0f74b3a0e9c8111e390121c | 3,630,010 |
def ignore_previously_commented(reviews, username=None, email=None):
"""Ignore reviews where I'm the last commenter."""
filtered_reviews = []
for review in reviews:
if _name(review['comments'][-1]['reviewer']) not in (username, email):
filtered_reviews.append(review)
return filtered_reviews | dfce6f8553326799c5894501368cc93d881ef048 | 3,630,011 |
def get_n_p(A_A, n_p_in='指定しない'):
"""付録 C 仮想居住人数
Args:
A_A(float): 床面積
n_p_in(str): 居住人数の入力(「1人」「2人」「3人」「4人以上」「指定しない」)
Returns:
float: 仮想居住人数
"""
if n_p_in is not None and n_p_in != '指定しない':
return {
'1人': 1.0,
'2人': 2.0,
'3人': 3.0,
'4人以上': 4.0
}[n_p_in]
if A_A < 30:
return 1.0
elif A_A < 120:
return A_A / 30
else:
return 4.0 | db257abdb76ee35f16b07e5baccec82211737971 | 3,630,012 |
def parse_ma_file(seq_obj, in_file):
"""
read seqs.ma file and create dict with
sequence object
"""
name = ""
index = 1
total = defaultdict(int)
ratio = list()
with open(in_file) as handle_in:
line = handle_in.readline().strip()
cols = line.split("\t")
samples = cols[2:]
for line in handle_in:
line = line.strip()
cols = line.split("\t")
name = int(cols[0].replace("seq_", ""))
seq = cols[1]
exp = {}
for i in range(len(samples)):
exp[samples[i]] = int(cols[i+2])
total[samples[i]] += int(cols[i+2])
ratio.append(np.array(list(exp.values())) / np.mean(list(exp.values())))
index = index+1
if name in seq_obj:
seq_obj[name].set_freq(exp)
seq_obj[name].set_seq(seq)
# new_s = sequence(seq, exp, index)
# seq_l[name] = new_s
df = pd.DataFrame(ratio)
df = df[(df.T != 0).all()]
size_factor = dict(zip(samples, df.median(axis=0)))
seq_obj = _normalize_seqs(seq_obj, size_factor)
return seq_obj, total, index | ba17eba1a26cd913423fe685f10f1375bbbf04e1 | 3,630,013 |
def make_mmvt_boundary_definitions(cv, milestone):
"""
Take a Collective_variable object and a particular milestone and
return an OpenMM Force() object that the plugin can use to monitor
crossings.
Parameters
----------
cv : Collective_variable()
A Collective_variable object which contains all the information
for the collective variable describine this variable. In fact,
the boundaries are contours of the function described by cv.
This variable contains information like the groups of atoms
involved with the CV, and the expression which describes the
function.
milestone : Milestone()
A Milestone object which describes the boundary between two
Voronoi cells. This variable contains information like the
values of the variables which will be entered into the
Force() object.
Returns
-------
myforce : openmm.Force()
An OpenMM force object which does not affect atomic motion, but
allows us to conveniently monitor a function of atomic
position.
"""
myforce = cv.make_force_object()
myforce.setForceGroup(1)
variable_names_list = cv.add_parameters(myforce)
cv.add_groups_and_variables(myforce, cv.get_variable_values_list(
milestone))
return myforce | 45baaaa70ea24cb564c529cd885597415561a25d | 3,630,014 |
def mask(inputs, queries=None, keys=None, type=None):
"""Masks paddings on keys or queries to inputs
inputs: 3d tensor. (N, T_q, T_k)
queries: 3d tensor. (N, T_q, d)
keys: 3d tensor. (N, T_k, d)
e.g.,
>> queries = tf.constant([[[1.],
[2.],
[0.]]], tf.float32) # (1, 3, 1)
>> keys = tf.constant([[[4.],
[0.]]], tf.float32) # (1, 2, 1)
>> inputs = tf.constant([[[4., 0.],
[8., 0.],
[0., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "key")
array([[[ 4.0000000e+00, -4.2949673e+09],
[ 8.0000000e+00, -4.2949673e+09],
[ 0.0000000e+00, -4.2949673e+09]]], dtype=float32)
>> inputs = tf.constant([[[1., 0.],
[1., 0.],
[1., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "query")
array([[[1., 0.],
[1., 0.],
[0., 0.]]], dtype=float32)
"""
padding_num = -2 ** 32 + 1
if type in ("k", "key", "keys"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1)) # (N, T_k)
masks = tf.expand_dims(masks, 1) # (N, 1, T_k)
masks = tf.tile(masks, [1, tf.shape(queries)[1], 1]) # (N, T_q, T_k)
# Apply masks to inputs
paddings = tf.ones_like(inputs) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs) # (N, T_q, T_k)
elif type in ("q", "query", "queries"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # (N, T_q)
masks = tf.expand_dims(masks, -1) # (N, T_q, 1)
masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]]) # (N, T_q, T_k)
# Apply masks to inputs
outputs = inputs*masks
elif type in ("f", "future", "right"):
diag_vals = tf.ones_like(inputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(inputs)[0], 1, 1]) # (N, T_q, T_k)
paddings = tf.ones_like(masks) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs)
else:
print("Check if you entered type correctly!")
return outputs | a4a9953cbab03bde821be0079339ca35705f0423 | 3,630,015 |
def is_leaf_module(module):
"""Utility function to determine if the given module is a leaf module - that is, does not have children modules
:return:
True if the module is a leaf, False otherwise
"""
module_list = list(module.modules())
return bool(len(module_list) == 1) | f34cbd4e961a467117a980ab0b7829f4a8245d2f | 3,630,016 |
import os
def get_dir_url(user_path):
"""
Gets the URL for a directory
"""
return os.path.join(DIR_URL_ROOT, user_path.lstrip('/')) | a31c84112fca63307d9f40d4baae38880cc26768 | 3,630,017 |
import macfs, MACFS
def _gettempdir_inner():
"""Function to calculate the directory to use."""
global tempdir
if tempdir is not None:
return tempdir
try:
pwd = os.getcwd()
except (AttributeError, os.error):
pwd = os.curdir
attempdirs = ['/tmp', '/var/tmp', '/usr/tmp', pwd]
if os.name == 'nt':
attempdirs.insert(0, 'C:\\TEMP')
attempdirs.insert(0, '\\TEMP')
elif os.name == 'mac':
try:
refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
MACFS.kTemporaryFolderType, 1)
dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
attempdirs.insert(0, dirname)
except macfs.error:
pass
elif os.name == 'riscos':
scrapdir = os.getenv('Wimp$ScrapDir')
if scrapdir:
attempdirs.insert(0, scrapdir)
for envname in 'TMPDIR', 'TEMP', 'TMP':
if os.environ.has_key(envname):
attempdirs.insert(0, os.environ[envname])
testfile = gettempprefix() + 'test'
for dir in attempdirs:
try:
filename = os.path.join(dir, testfile)
if os.name == 'posix':
try:
fd = os.open(filename,
os.O_RDWR | os.O_CREAT | os.O_EXCL, 0700)
except OSError:
pass
else:
fp = os.fdopen(fd, 'w')
fp.write('blat')
fp.close()
os.unlink(filename)
del fp, fd
tempdir = dir
break
else:
fp = open(filename, 'w')
fp.write('blat')
fp.close()
os.unlink(filename)
tempdir = dir
break
except IOError:
pass
if tempdir is None:
msg = "Can't find a usable temporary directory amongst " + `attempdirs`
raise IOError, msg
return tempdir | 89f9d1f68b892552f6e0f4759959b6d0b2d4423e | 3,630,018 |
def edit_subgroup_purchases(request, delivery, subgroup):
"""Allows to change the purchases of user's subgroup. Subgroup staff only."""
delivery = get_delivery(delivery)
user = request.user
subgroup = get_subgroup(subgroup)
if user not in subgroup.staff.all() and user not in delivery.network.staff.all():
return HttpResponseForbidden('Réservé aux administrateurs du réseau ' + delivery.network.name + \
' ou du sous-groupe '+subgroup.name)
if request.method == 'POST':
_parse_form(request)
return redirect("circuitscourts:view_subgroup_purchases_html", delivery=delivery.id, subgroup=subgroup.id)
else:
vars = delivery_description(delivery, [subgroup], user=user)
vars.update(csrf(request))
return render_to_response('edit_subgroup_purchases.html', vars) | 799e068aad1eef95cbccb69f5a30ee78b2c526f8 | 3,630,019 |
def get_biggest_pv_to_exchange_ratio(dataset):
"""Return the largest ration of production volume to exchange amount.
Considers only reference product exchanges with the ``allocatable product`` classification.
In theory, this ratio should always be the same in a multioutput dataset. However, this is quite often not the case, and when calculating production volume for other exchanges (byproducts, activity links) we need one number for the dataset.
So, we look for the biggest absolute value. This may not be perfect, but it is consistent.
Returns a float."""
production_volumes = sorted([
exc['production volume']['amount'] / exc['amount']
for exc in dataset['exchanges']
if exc['type'] == 'reference product'
and exc['amount']
], reverse=True, key=lambda x: abs(x))
if not production_volumes:
message = "No suitable reference product exchanges in {}"
raise ZeroProduction(message.format(dataset['name']))
return production_volumes[0] | dc52622bc464372801deeab011a54ed1a862f659 | 3,630,020 |
def sequence_processing_pipeline(qclient, job_id, parameters, out_dir):
"""Sequence Processing Pipeline command
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values for this job
out_dir : str
The path to the job's output directory
Returns
-------
bool, list, str
The results of the job
"""
run_identifier = parameters.pop('run_identifier')
sample_sheet = parameters.pop('sample_sheet')
job_pool_size = 30
# checking if this is running as part of the unittest
# https://stackoverflow.com/a/25025987
skip_exec = True if [x for x in stack() if
'unittest' in x.filename] else False
success = True
ainfo = None
msg = None
qclient.update_job_step(job_id, "Step 1 of 6: Setting up pipeline")
if {'body', 'content_type', 'filename'} == set(sample_sheet):
# Create a Pipeline object
try:
pipeline = Pipeline(CONFIG_FP, run_identifier, out_dir, job_id)
except PipelineError as e:
# Pipeline is the object that finds the input fp, based on
# a search directory set in configuration.json and a run_id.
if str(e).endswith("could not be found"):
msg = f"A path for {run_identifier} could not be found."
return False, None, msg
else:
raise e
outpath = partial(join, out_dir)
final_results_path = outpath('final_results')
makedirs(final_results_path, exist_ok=True)
# the user is uploading a sample-sheet to us, but we need the
# sample-sheet as a file to pass to the Pipeline().
sample_sheet_path = outpath(sample_sheet['filename'])
with open(sample_sheet_path, 'w') as f:
f.write(sample_sheet['body'])
msgs, val_sheet = pipeline.validate(sample_sheet_path)
if val_sheet is None:
# only pass the top message to update_job_step, due to
# limited display width.
msg = str(msgs[0]) if msgs else "Sample sheet failed validation."
qclient.update_job_step(job_id, msg)
raise ValueError(msg)
else:
# if we're passed a val_sheet, assume any msgs are warnings only.
# unfortunately, we can only display the top msg.
msg = msgs[0] if msgs else None
qclient.update_job_step(job_id, f'warning: {msg}')
# get project names and their associated qiita ids
bioinformatics = val_sheet.Bioinformatics
lst = bioinformatics.to_dict('records')
sifs = pipeline.generate_sample_information_files(sample_sheet_path)
# find the uploads directory all trimmed files will need to be
# moved to.
results = qclient.get("/qiita_db/artifacts/types/")
# trimmed files are stored by qiita_id. Find the qiita_id
# associated with each project and ensure a subdirectory exists
# for when it comes time to move the trimmed files.
special_map = []
for result in lst:
project_name = result['Sample_Project']
qiita_id = result['QiitaID']
upload_path = join(results['uploads'], qiita_id)
makedirs(upload_path, exist_ok=True)
special_map.append((project_name, upload_path))
# Create a SequenceDirectory object
sdo = SequenceDirectory(pipeline.run_dir, sample_sheet_path)
qclient.update_job_step(job_id,
"Step 2 of 6: Converting BCL to fastq")
config = pipeline.configuration['bcl-convert']
convert_job = ConvertJob(pipeline.run_dir,
pipeline.output_path,
sdo.sample_sheet_path,
config['queue'],
config['nodes'],
config['nprocs'],
config['wallclock_time_in_hours'],
config['per_process_memory_limit'],
config['executable_path'],
config['modules_to_load'],
job_id)
# if skip_execution is True, then each Pipeline object will be
# initialized, their assertions tested, and an ainfo will be
# returned to the caller. However the Jobs will not actually
# be executed. This is useful for testing.
if not skip_exec:
convert_job.run()
qclient.update_job_step(job_id,
"Step 3 of 6: Adaptor & Host [optional] "
"trimming")
raw_fastq_files_path = join(pipeline.output_path, 'ConvertJob')
config = pipeline.configuration['qc']
qc_job = QCJob(raw_fastq_files_path,
pipeline.output_path,
sdo.sample_sheet_path,
config['mmi_db'],
config['queue'],
config['nodes'],
config['nprocs'],
config['wallclock_time_in_hours'],
config['job_total_memory_limit'],
config['fastp_executable_path'],
config['minimap2_executable_path'],
config['samtools_executable_path'],
config['modules_to_load'],
job_id,
job_pool_size,
config['job_max_array_length'])
if not skip_exec:
qc_job.run()
qclient.update_job_step(job_id, "Step 4 of 6: Generating FastQC & "
"MultiQC reports")
config = pipeline.configuration['fastqc']
raw_fastq_files_path = join(pipeline.output_path, 'ConvertJob')
processed_fastq_files_path = join(pipeline.output_path, 'QCJob')
fastqc_job = FastQCJob(pipeline.run_dir,
pipeline.output_path,
raw_fastq_files_path,
processed_fastq_files_path,
config['nprocs'],
config['nthreads'],
config['fastqc_executable_path'],
config['modules_to_load'],
job_id,
config['queue'],
config['nodes'],
config['wallclock_time_in_hours'],
config['job_total_memory_limit'],
job_pool_size,
config['multiqc_config_file_path'],
config['job_max_array_length'])
if not skip_exec:
fastqc_job.run()
project_list = fastqc_job.project_names
qclient.update_job_step(job_id, "Step 5 of 6: Generating Prep "
"Information Files")
config = pipeline.configuration['seqpro']
gpf_job = GenPrepFileJob(
pipeline.run_dir,
raw_fastq_files_path,
processed_fastq_files_path,
pipeline.output_path,
sdo.sample_sheet_path,
config['seqpro_path'],
project_list,
config['modules_to_load'],
job_id)
if not skip_exec:
gpf_job.run()
qclient.update_job_step(job_id, "Step 6 of 6: Copying results to "
"archive")
cmds = [f'cd {out_dir}; tar zcvf logs-ConvertJob.tgz ConvertJob/logs',
f'cd {out_dir}; tar zcvf reports-ConvertJob.tgz '
'ConvertJob/Reports ConvertJob/Logs',
f'cd {out_dir}; tar zcvf logs-QCJob.tgz QCJob/logs',
f'cd {out_dir}; tar zcvf logs-FastQCJob.tgz '
'FastQCJob/logs',
f'cd {out_dir}; tar zcvf reports-FastQCJob.tgz '
'FastQCJob/fastqc',
f'cd {out_dir}; tar zcvf logs-GenPrepFileJob.tgz '
'GenPrepFileJob/logs',
f'cd {out_dir}; tar zcvf prep-files.tgz '
'GenPrepFileJob/PrepFiles']
# just use the filenames for tarballing the sifs.
# the sifs should all be stored in the {out_dir} by default.
if sifs:
tmp = [basename(x) for x in sifs]
# convert sifs into a list of filenames.
tmp = ' '.join(tmp)
cmds.append(f'cd {out_dir}; tar zcvf sample-files.tgz {tmp}')
csv_fps = []
for root, dirs, files in walk(join(gpf_job.output_path, 'PrepFiles')):
for csv_file in files:
csv_fps.append(join(root, csv_file))
for project, upload_dir in special_map:
cmds.append(f'cd {out_dir}; tar zcvf reports-QCJob.tgz '
f'QCJob/{project}/fastp_reports_dir')
if exists(f'{out_dir}/QCJob/{project}/filtered_sequences'):
cmds.append(f'cd {out_dir}; mv '
f'QCJob/{project}/filtered_sequences/* '
f'{upload_dir}')
else:
cmds.append(f'cd {out_dir}; mv '
f'QCJob/{project}/trimmed_sequences/* '
f'{upload_dir}')
for csv_file in csv_fps:
if project in csv_file:
cmds.append(f'cd {out_dir}; mv {csv_file} {upload_dir}')
break
cmds.append(f'cd {out_dir}; mv *.tgz final_results')
cmds.append(f'cd {out_dir}; mv FastQCJob/multiqc final_results')
if sifs:
cmds.append(f'cd {out_dir}; mv sample-files.tgz {upload_dir}')
if skip_exec:
cmds = []
cmd_log_fp = join(out_dir, 'cmds.log')
with open(cmd_log_fp, 'w') as cmd_log_f:
for cmd in cmds:
cmd_log_f.write(f'{cmd}\n')
for cmd in cmds:
p = Popen(cmd, universal_newlines=True, shell=True,
stdout=PIPE, stderr=PIPE)
std_out, std_err = p.communicate()
return_code = p.returncode
if return_code != 0:
raise PipelineError(f"'{cmd}' returned {return_code}")
ainfo = [
ArtifactInfo('output', 'job-output-folder',
[(f'{final_results_path}/', 'directory')])
]
else:
success = False
msg = "This doesn't appear to be a valid sample sheet; please review."
qclient.update_job_step(job_id, "Main Pipeline Finished, processing "
"results")
return success, ainfo, msg | aa95360945183293a6f80adcff9ec8726c217219 | 3,630,021 |
def Reorder(x, params, output=None, **kwargs):
"""Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
"""
del params, kwargs
if output is None:
return x
return base.nested_map(output, lambda i: x[i]) | 42dc6bbf9d0a40af0f86d1fa1ed1afb7fc2ee402 | 3,630,022 |
def _is_fix_comment(line, isstrict):
""" Check if line is a comment line in fixed format Fortran source.
References
----------
:f2008:`3.3.3`
"""
if line:
if line[0] in '*cC!':
return True
if not isstrict:
i = line.find('!')
if i!=-1:
start = line[:i].lstrip()
if not start:
if i==5:
# line continuation
return False
return True
else:
# inline comment or ! is used in character context
# inline comments are handled elsewhere
pass
elif line=='':
return True
return False | 8ac7f74f2b4e57b9fb65183a46ed3dbfc0f7ef79 | 3,630,023 |
def parseConfigFile(configFilePath):
"""
:param configFilePath:
:return: a hash map of the parameters defined in the given file.
Each entry is organized as <parameter name, parameter value>
"""
# parse valid lines
lines = []
with open(configFilePath) as f:
for line in f:
line = line.strip()
if line == '': # ignore empty line
continue
if line.startswith('#'): # ignore the comment in config file
continue
lines.append(line)
params = {}
for line in lines:
if not line.__contains__("="):
raise Exception("Invalid parameter definition as \"" + line + "\" in file " + configFilePath)
paramName = line.split("=")[0].strip()
value = line.split("=")[1].strip()
params[paramName] = value
return params | aee6a1da052f4c2ef907bf41b2cfaa4b93612a5e | 3,630,024 |
def powerset(iterable):
"""
powerset([1,2,3]) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]
Args:
iterable : iterable (e.g. list, tuple,...) - set to generate possible subsets of
Returns:
list - list of possible subsets
"""
xs = list(iterable)
# note we return an iterator rather than a list
tuples = chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))
return list(tuples) | 31f986c10641ee5c97275f67cd9ee210c27f752a | 3,630,025 |
def request_artifact_published(etos, artifact_id):
"""Request an artifact published event from graphql.
:param etos: ETOS library instance.
:type etos: :obj:`etos_lib.etos.Etos`
:param artifact_id: ID of artifact created the artifact published links to.
:type artifact_id: str
:return: Response from graphql or None
:rtype: dict or None
"""
query = """
{
artifactPublished(last: 1, search: "{'links.type': 'ARTIFACT', 'links.target': '%s'}") {
edges {
node {
data {
locations {
type
uri
}
}
}
}
}
}
"""
for response in request(etos, query % artifact_id):
if response:
return response
return None | 300f681a2993320932eaa1f16e2ff6a2ae0b33cb | 3,630,026 |
import requests
from datetime import datetime
def get_stats(selected_sensors=None):
""" return a dictionary of { sensorname: {stats} }
NOTE: runs immediately not - used when we _need_ a result
"""
Config.logger.debug("Get Stats: {}".format(selected_sensors))
if is_str(selected_sensors):
selected_sensors = (selected_sensors, )
datas = {}
global _INSTANCES
for instance in _INSTANCES:
if ((selected_sensors is not None) and
(instance['sensor'] not in selected_sensors)):
continue
Config.logger.debug("query instance: {}".format(instance['sensor']))
url = "https://{}:{}/debug/stats".format(
instance['host'], instance['port'])
data = None
try:
rq = requests.get(
url,
cert=(instance['cert'], instance['key']),
verify=instance['ca'],
timeout=STAT_TIMEOUT
)
if rq.status_code != requests.codes.ok:
Config.logger.error("stats failed({}): {} {}".format(rq.status_code,
instance['sensor'],
rq.text))
continue
except requests.exceptions.ConnectTimeout as ex:
Config.logger.error("Stats: {}:{} Connection timeout after {} seconds".format(
instance['host'], instance['port'], STAT_TIMEOUT))
data = "Connection Timeout"
except requests.exceptions.ReadTimeout as ex:
Config.logger.error("Stats: {}:{} Didn't provide stats in {} seconds".format(
instance['host'], instance['port'], STAT_TIMEOUT))
data = "Read Timeout"
except requests.exceptions.ConnectionError as ex:
Config.logger.error("Stats: {}:{} Connection Error? {}".format(
instance['host'], instance['port'], str(ex)))
raise ex
except requests.exceptions.SSLError as ex:
Config.logger.error("Stats: {}:{} SSL Error - check conf {}".format(
instance['host'], instance['port'], str(ex)))
raise ex
else:
# Request Succeeded
Config.logger.debug("response code: {}".format(rq.status_code))
lines = rq.text.split('\n')
data = instance.get('stats')
for line in lines:
if line.strip():
k, v = line.split()
data[k] = int(v)
if 'oldest_timestamp' in data:
ot = data['oldest_timestamp']
dt = datetime.utcfromtimestamp(0) + timedelta(microseconds=ot/1000 )
data['oldest_timestamp'] = dt.strftime(ISOFORMAT)
if data.get('indexfile_current_reads') == 0:
instance['idle'] = datetime.utcnow()
datas[instance['sensor']] = data
return datas | 06a92a2d417632cd7b638933ccc1f0d89fb1edca | 3,630,027 |
from scipy.signal import find_peaks
def count_abs_peak(arr1d, threshold):
"""
calculates the number of scenes which are underflooded depending on the peak count function which
calculates how often the signal drops beneath a certain threshold
----------
arr1d: numpy.array
1D array representing the time series for one pixel
threshold: float
radar backscatter value - depends on type of polarization (smaller for VH than for VV)
Returns
----------
numpy.int32
returns the number of how often the radar signal drops beneath a certain threshold
"""
peaks = find_peaks(arr1d, height=threshold)
return len(peaks) | 4af85024aa562087eee51d5848a06d4eca2323de | 3,630,028 |
from typing import List
def torch_to_numpy(data: Dataset) -> List[np.ndarray]:
"""Convert data from torch dataset to list of numpy arrays [input, target]."""
# Create empty numpy arrays.
images_shape = (len(data), *data[0][0].shape)
images = np.zeros(images_shape)
labels = np.zeros(len(data))
# Fill arrays with samples from torch dataset.
# Note that samples in torch datasets may change from iteration to iteration
# because of random transforms.
# TODO: What to do if data is too large for memory?
for i, (image, label) in enumerate(data):
images[i] = image
labels[i] = label
return [images, labels] | 148c3ca20ec127d70ccb95d1f98b814a65a4446a | 3,630,029 |
def planck_taper(tlist, t1, t2):
"""tlist: array of times
t1. for t<=t1 then return 0
t2. for t>=t2 then return 1
else return 1./(np.exp((t2-t1)/(t-t1)+(t2-t1)/(t-t2))+1)"""
tout = []
for t in tlist:
if t<=t1:
tout.append(0.)
elif t>=t2:
tout.append(1.)
else:
tout.append(1./(exp((t2-t1)/(t-t1)+(t2-t1)/(t-t2))+1))
return asarray(tout) | 75a899df53209b0fd161ad274b7622a8e819c188 | 3,630,030 |
def has_annotations(doc):
""" Check if document has any mutation mention saved. """
for part in doc.values():
if len(part['annotations']) > 0:
return True
return False | 6b57893bc35af45950ec2eeb5008b663028d48bf | 3,630,031 |
def xmon_to_arc(xmon: XmonDevice) -> Architecture:
"""Generates a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture` object for a Cirq :py:class:`XmonDevice` .
:param xmon: The device to convert
:return: The corresponding :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture`
"""
nodes = len(xmon.qubits)
indexed_qubits = _sort_row_col(xmon.qubits)
pairs = []
for qb in indexed_qubits:
neighbours = xmon.neighbors_of(qb)
#filter only higher index neighbours to avoid double counting edges
forward_neighbours = filter(lambda x: indexed_qubits.index(x)>indexed_qubits.index(qb), neighbours)
for x in forward_neighbours:
pairs.append((indexed_qubits.index(qb), indexed_qubits.index(x)))
return Architecture(pairs, nodes) | fec0bf1a9e4f343a4f06f1bf1965e9d4ab6ddfa3 | 3,630,032 |
def LoadAcqSA():
"""Acquisition Loading per Sum Assured"""
param1 = SpecLookup("LoadAcqSAParam1", Product())
param2 = SpecLookup("LoadAcqSAParam2", Product())
return param1 + param2 * min(PolicyTerm / 10, 1) | a038b4e5b22cba0755a4d7ad3ab73ccdd046d283 | 3,630,033 |
def DEFAULT_APPLICANT_SCRUBBER(raw):
"""Remove all personal data."""
return {k: v for k, v in raw.items() if k in ("id", "href", "created_at")} | 16fa853551cd03bcf1124639e23b0c72ec9db75d | 3,630,034 |
import io
def _has_fileno(f):
# type: (Any) -> bool
""" test that a file-like object is really a filehandle
Only filehandles can be given to apt_pkg.TagFile.
"""
try:
f.fileno()
return True
except (AttributeError, io.UnsupportedOperation):
return False | 07e7ffb43886775125d314196c5b0acc20d090a6 | 3,630,035 |
import binascii
def _sign_rsa(hash_algorithm_name: str,
sig_base_str: str,
rsa_private_key: str):
"""
Calculate the signature for an RSA-based signature method.
The ``alg`` is used to calculate the digest over the signature base string.
For the "RSA_SHA1" signature method, the alg must be SHA-1. While OAuth 1.0a
only defines the RSA-SHA1 signature method, this function can be used for
other non-standard signature methods that only differ from RSA-SHA1 by the
digest algorithm.
Signing for the RSA-SHA1 signature method is defined in
`section 3.4.3`_ of RFC 5849.
The RSASSA-PKCS1-v1_5 signature algorithm used defined by
`RFC3447, Section 8.2`_ (also known as PKCS#1), with the `alg` as the
hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
"""
# Get the implementation of RSA-hash
alg = _get_jwt_rsa_algorithm(hash_algorithm_name)
# Check private key
if not rsa_private_key:
raise ValueError('rsa_private_key required for RSA with ' +
alg.hash_alg.name + ' signature method')
# Convert the "signature base string" into a sequence of bytes (M)
#
# The signature base string, by definition, only contain printable US-ASCII
# characters. So encoding it as 'ascii' will always work. It will raise a
# ``UnicodeError`` if it can't encode the value, which will never happen
# if the signature base string was created correctly. Therefore, using
# 'ascii' encoding provides an extra level of error checking.
m = sig_base_str.encode('ascii')
# Perform signing: S = RSASSA-PKCS1-V1_5-SIGN (K, M)
key = _prepare_key_plus(alg, rsa_private_key)
s = alg.sign(m, key)
# base64-encoded per RFC2045 section 6.8.
#
# 1. While b2a_base64 implements base64 defined by RFC 3548. As used here,
# it is the same as base64 defined by RFC 2045.
# 2. b2a_base64 includes a "\n" at the end of its result ([:-1] removes it)
# 3. b2a_base64 produces a binary string. Use decode to produce a str.
# It should only contain only printable US-ASCII characters.
return binascii.b2a_base64(s)[:-1].decode('ascii') | 22c0cdd1168d3e20c9ec564b1636ce6bd8ea33d5 | 3,630,036 |
def partition(my_list: list, part: int) -> list:
""" Function which performs Partition """
begin = 0
end = len(my_list) - 1
while begin < end:
check_lower = my_list[begin] < part
check_higher = my_list[end] >= part
if not check_lower and not check_higher:
# Swap
my_list[begin], my_list[end] = my_list[end], my_list[begin]
else:
if check_lower:
begin += 1
if check_higher:
end -= 1
return my_list | 754a039eede5e143400b0fd58b622d57e083a671 | 3,630,037 |
import numpy
import logging
import multiprocessing
def GenerateHoverDatabase():
"""Generates a hover aerodynamics database in the DVL format.
A hover aerodynamic database models all the aerodynamic surfaces as
independent airfoils and accounts for the effect of the propwash on
these surfaces. The database is output in the DVL format which
includes the following coefficients and derivatives:
cx, cy, cz, cl, cm, cn,
dcx1, dcy1, dcz1, dcl1, dcm1, dcn1,
...
dcx8, dcy8, dcz8, dcl8, dcm8, dcn8,
dcx/dp, dcy/dp, dcz/dp, dcl/dp, dcm/dp, dcn/dp
dcx/dq, dcy/dq, dcz/dq, dcl/dq, dcm/dq, dcn/dq
dcx/dr, dcy/dr, dcz/dr, dcl/dr, dcm/dr, dcn/dr
dcx1/dp, dcy1/dp, dcz1/dp, dcl1/dp, dcm1/dp, dcn1/dp
dcx1/dq, dcy1/dq, dcz1/dq, dcl1/dq, dcm1/dq, dcn1/dq
dcx1/dr, dcy1/dr, dcz1/dr, dcl1/dr, dcm1/dr, dcn1/dr
...
"""
alpha_list = _VariableLinspaceFromStrings(
FLAGS.alpha_intervals_deg, FLAGS.num_alphas) * numpy.pi / 180.0
beta_list = _VariableLinspaceFromStrings(
FLAGS.beta_intervals_deg, FLAGS.num_betas) * numpy.pi / 180.0
delta1s = numpy.pi / 180.0 * FLAGS.delta1s_deg
delta2s = numpy.pi / 180.0 * FLAGS.delta2s_deg
delta3s = numpy.pi / 180.0 * FLAGS.delta3s_deg
delta4s = numpy.pi / 180.0 * FLAGS.delta4s_deg
delta5s = numpy.pi / 180.0 * FLAGS.delta5s_deg
delta6s = numpy.pi / 180.0 * FLAGS.delta6s_deg
delta7s = numpy.pi / 180.0 * FLAGS.delta7s_deg
delta8s = numpy.pi / 180.0 * FLAGS.delta8s_deg
logging.info('Building wing parameters and airfoils for %s:%s.',
FLAGS.wing_model, FLAGS.wing_serial)
params = hover_model.GetParams(FLAGS.wing_model, FLAGS.wing_serial,
use_wake_model=FLAGS.use_wake_model)
def _ProcessTasks(tasks, group_name=''):
"""Iterates through a dictionary of tasks executing them in parallel."""
def _CreateTaskTarget(func):
def _WrapFun(args, pipe):
pipe.put(func(*args))
pipe.close()
return _WrapFun
queues = [multiprocessing.Queue() for _ in tasks]
task_names = []
processes = []
for i, (name, (func, args)) in enumerate(tasks.iteritems()):
task_names += [name]
processes.append(multiprocessing.Process(
target=_CreateTaskTarget(func), args=(args, queues[i])))
pending = [True for _ in range(len(task_names))]
running = [False for _ in range(len(task_names))]
results = {
task_name: None for task_name in task_names
}
while numpy.sum(running) > 0 or numpy.sum(pending) > 0:
# Fill the running queue.
while numpy.sum(running) < FLAGS.num_workers and numpy.sum(pending) > 0:
start_idx = numpy.argwhere(pending)[0, 0]
processes[start_idx].start()
logging.info('Calculating %s%s...',
group_name + ': ' if group_name else '',
task_names[start_idx])
pending[start_idx] = False
running[start_idx] = True
done = numpy.logical_and(running, [not queue.empty() for queue in queues])
for done_idx in numpy.argwhere(done):
results[task_names[done_idx[0]]] = queues[done_idx[0]].get(
block=True, timeout=None)
running[done_idx[0]] = False
# Wait for work to finish.
for process in processes:
process.join()
return results
database = {
'reynolds_number': (params['phys']['rho'] * FLAGS.apparent_wind_speed
* params['wing']['c']
/ params['phys']['dynamic_viscosity']),
'alphas': alpha_list,
'betas': beta_list,
'delta1s': delta1s,
'delta2s': delta2s,
'delta3s': delta3s,
'delta4s': delta4s,
'delta5s': delta5s,
'delta6s': delta6s,
'delta7s': delta7s,
'delta8s': delta8s,
}
# Pre-compute the local apparent wind speeds for each sampling point
# on each surface for all the different alphas, betas, and angular
# rates. This is the most expensive part of the database
# calculation, so there are huge gains from pre-computing these.
alphas, betas = numpy.meshgrid(alpha_list, beta_list, indexing='ij')
omega_hat_step = 1e-2
tasks = {
'nominal': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [0.0, 0.0, 0.0], FLAGS.apparent_wind_speed, params)),
'positive_p': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [omega_hat_step / 2.0, 0.0, 0.0],
FLAGS.apparent_wind_speed, params)),
'negative_p': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [-omega_hat_step / 2.0, 0.0, 0.0],
FLAGS.apparent_wind_speed, params)),
'positive_q': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [0.0, omega_hat_step / 2.0, 0.0],
FLAGS.apparent_wind_speed, params)),
'negative_q': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [0.0, -omega_hat_step / 2.0, 0.0],
FLAGS.apparent_wind_speed, params)),
'positive_r': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [0.0, 0.0, omega_hat_step / 2.0],
FLAGS.apparent_wind_speed, params)),
'negative_r': (
hover_model.PrecomputeLocalApparentWindSph,
(alphas, betas, [0.0, 0.0, -omega_hat_step / 2.0],
FLAGS.apparent_wind_speed, params))
}
local_apparent_wind_sph = _ProcessTasks(tasks, 'local apparent wind')
# The delta increment tasks require the base CFM so we run it first.
tasks = {
'cfm': (CalcZeroDeflectionCoeffs, (alpha_list, beta_list,
local_apparent_wind_sph['nominal'],
params))
}
rate_steps = {
'p': [omega_hat_step, 0.0, 0.0],
'q': [0.0, omega_hat_step, 0.0],
'r': [0.0, 0.0, omega_hat_step]
}
for name, step in rate_steps.iteritems():
derivative_name = 'dcfm_d%s' % name
tasks.update({
derivative_name: (CalcAngularRateDerivativeCoeffs,
(alpha_list, beta_list, step,
local_apparent_wind_sph['positive_%s' % name],
local_apparent_wind_sph['negative_%s' % name],
params))
})
database.update(_ProcessTasks(tasks))
# The delta increment angular rate tasks require the results of the
# previous computations so we run them in a separate sweep.
tasks = {}
deltas_list = [delta1s, delta2s, delta3s, delta4s, delta5s, delta6s,
delta7s, delta8s]
for delta_index, delta_list in enumerate(deltas_list):
derivative_name = 'dcfm%d' % (delta_index + 1)
tasks.update({
derivative_name: (CalcDeltaIncrementCoeffs,
(alpha_list, beta_list, delta_list, delta_index,
database['cfm'], local_apparent_wind_sph['nominal'],
params))
})
for rate_name, step in rate_steps.iteritems():
for delta_index, delta_list in enumerate(deltas_list):
nominal_derivative_coeffs = database['dcfm_d%s' % rate_name]
derivative_name = 'dcfm%d_d%s' % (delta_index + 1, rate_name)
tasks.update({
derivative_name: (CalcDeltaIncrementAngularRateDerivativeCoeffs,
(alpha_list, beta_list, delta_list, delta_index,
step, nominal_derivative_coeffs,
local_apparent_wind_sph['positive_%s' % rate_name],
local_apparent_wind_sph['negative_%s' % rate_name],
params))
})
database.update(_ProcessTasks(tasks))
logging.info('Writing database to file %s.', FLAGS.filename)
WriteJsonFile(FLAGS.filename, params, database) | 60ad7d806092875a7756cc6f0f8584588fc89b03 | 3,630,038 |
def silhouette_k(data, n_clusters):
"""Generates a silhouette plot for n_clusters
"""
fig, ax1 = plt.subplots(1)
ax1.set_xlim([-.1, 1])
ax1.set_ylim([0, data.shape[0] + (n_clusters + 1) * 10])
clusterer = KMeans(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(data)
silhouette_avg = silhouette_score(data, cluster_labels)
silhouette_values = silhouette_samples(data, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values, facecolor=color,
edgecolor=color, alpha=.7)
ax1.text(-0.05, y_lower + .05 * size_cluster_i, str(i))
y_lower = y_upper + 10
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
return fig, silhouette_avg | 11667bf2f7db63425cabbbea1111c8b23a85bec6 | 3,630,039 |
from typing import Tuple
import torch
def _get_value(point: Tuple[int, int, int], volume_data: torch.Tensor) -> float:
"""
Gets the value at a given coordinate point in the scalar field.
Args:
point: data of shape (3) corresponding to an xyz coordinate.
volume_data: a Tensor of size (D, H, W) corresponding to
a 3D scalar field
Returns:
data: scalar value in the volume at the given point
"""
x, y, z = point
return volume_data[z][y][x] | 7353d43b4d1cc4375a38c3cf616a083690d71ab8 | 3,630,040 |
import hashlib
from io import StringIO
from datetime import datetime
def generate(request, name):
"""
Generate initialcons for a given name as a .png.
Accepts custom size and font as query parameters.
"""
if name == '':
name = '?'
name = name.encode('utf-8').upper()
# Custom size
size = request.GET.get('size', False)
if size and size.isdigit():
size = int(size)
else:
size = size_default
# Stop from being crazy big
size = min(size, size_max)
# Custom font
font = request.GET.get('font', False)
if font and font in fonts:
font = fonts[font]
else:
font = fonts.values()[0]
# Consistent color based on name
encoded = hashlib.md5(name)
color_index = int(encoded.hexdigest(), 16)
color = colors[color_index % len(colors)]
# Take the first two initals
initials = get_initials(name)
font = ImageFont.truetype(font, int(size * font_size))
img = Image.new("RGBA", (size, size), color)
draw = ImageDraw.Draw(img)
w, h = font.getsize(initials)
# Account for vertical offset to center
h = h + font.getoffset(initials)[1]
x = (size - w) / 2
y = (size - h) / 2
# Draw
draw.text((x, y), initials, font=font, fill=font_color)
draw = ImageDraw.Draw(img)
# Output as PNG
output = StringIO.StringIO()
img.save(output, format="PNG")
response = HttpResponse(output.getvalue(), content_type="image/png")
# Attempt to cache, fix later
now = datetime.now()
expires_at = now + expires_time
total_seconds = int((expires_at - now).total_seconds())
cache.patch_response_headers(response, total_seconds)
return response | 9ab9b5d85ef4d9740e04ebd77c7712a6b7668f85 | 3,630,041 |
from typing import List
from typing import Dict
from typing import Any
import time
import yaml
import copy
import ray
import re
from typing import Counter
def run_learning_tests_from_yaml(
yaml_files: List[str],
*,
max_num_repeats: int = 2,
smoke_test: bool = False,
) -> Dict[str, Any]:
"""Runs the given experiments in yaml_files and returns results dict.
Args:
yaml_files (List[str]): List of yaml file names.
max_num_repeats (int): How many times should we repeat a failed
experiment?
smoke_test (bool): Whether this is just a smoke-test. If True,
set time_total_s to 5min and don't early out due to rewards
or timesteps reached.
"""
print("Will run the following yaml files:")
for yaml_file in yaml_files:
print("->", yaml_file)
# All trials we'll ever run in this test script.
all_trials = []
# The experiments (by name) we'll run up to `max_num_repeats` times.
experiments = {}
# The results per experiment.
checks = {}
# Metrics per experiment.
stats = {}
start_time = time.monotonic()
# Loop through all collected files and gather experiments.
# Augment all by `torch` framework.
for yaml_file in yaml_files:
tf_experiments = yaml.safe_load(open(yaml_file).read())
# Add torch version of all experiments to the list.
for k, e in tf_experiments.items():
# If framework explicitly given, only test for that framework.
# Some algos do not have both versions available.
if "frameworks" in e:
frameworks = e["frameworks"]
else:
# By default we don't run tf2, because tf2's multi-gpu support
# isn't complete yet.
frameworks = ["tf", "torch"]
# Pop frameworks key to not confuse Tune.
e.pop("frameworks", None)
e["stop"] = e["stop"] if "stop" in e else {}
e["pass_criteria"] = e[
"pass_criteria"] if "pass_criteria" in e else {}
# For smoke-tests, we just run for n min.
if smoke_test:
# 0sec for each(!) experiment/trial.
# This is such that if there are many experiments/trials
# in a test (e.g. rllib_learning_test), each one can at least
# create its trainer and run a first iteration.
e["stop"]["time_total_s"] = 0
else:
# We also stop early, once we reach the desired reward.
min_reward = e.get("pass_criteria",
{}).get("episode_reward_mean")
if min_reward is not None:
e["stop"]["episode_reward_mean"] = min_reward
# Generate `checks` dict for all experiments
# (tf, tf2 and/or torch).
for framework in frameworks:
k_ = k + "-" + framework
ec = copy.deepcopy(e)
ec["config"]["framework"] = framework
if framework == "tf2":
ec["config"]["eager_tracing"] = True
checks[k_] = {
"min_reward": ec["pass_criteria"].get(
"episode_reward_mean", 0.0),
"min_throughput": ec["pass_criteria"].get(
"timesteps_total", 0.0) /
(ec["stop"].get("time_total_s", 1.0) or 1.0),
"time_total_s": ec["stop"].get("time_total_s"),
"failures": 0,
"passed": False,
}
# This key would break tune.
ec.pop("pass_criteria", None)
# One experiment to run.
experiments[k_] = ec
# Print out the actual config.
print("== Test config ==")
print(yaml.dump(experiments))
# Keep track of those experiments we still have to run.
# If an experiment passes, we'll remove it from this dict.
experiments_to_run = experiments.copy()
try:
ray.init(address="auto")
except ConnectionError:
ray.init()
for i in range(max_num_repeats):
# We are done.
if len(experiments_to_run) == 0:
print("All experiments finished.")
break
print(f"Starting learning test iteration {i}...")
# Run remaining experiments.
trials = run_experiments(
experiments_to_run,
resume=False,
verbose=2,
progress_reporter=CLIReporter(
metric_columns={
"training_iteration": "iter",
"time_total_s": "time_total_s",
"timesteps_total": "ts",
"episodes_this_iter": "train_episodes",
"episode_reward_mean": "reward_mean",
},
sort_by_metric=True,
max_report_frequency=30,
))
all_trials.extend(trials)
# Check each experiment for whether it passed.
# Criteria is to a) reach reward AND b) to have reached the throughput
# defined by `timesteps_total` / `time_total_s`.
for experiment in experiments_to_run.copy():
print(f"Analyzing experiment {experiment} ...")
# Collect all trials within this experiment (some experiments may
# have num_samples or grid_searches defined).
trials_for_experiment = []
for t in trials:
trial_exp = re.sub(".+/([^/]+)$", "\\1", t.local_dir)
if trial_exp == experiment:
trials_for_experiment.append(t)
print(f" ... Trials: {trials_for_experiment}.")
# If we have evaluation workers, use their rewards.
# This is useful for offline learning tests, where
# we evaluate against an actual environment.
check_eval = experiments[experiment]["config"].get(
"evaluation_interval", None) is not None
# Error: Increase failure count and repeat.
if any(t.status == "ERROR" for t in trials_for_experiment):
print(" ... ERROR.")
checks[experiment]["failures"] += 1
# Smoke-tests always succeed.
elif smoke_test:
print(" ... SMOKE TEST (mark ok).")
checks[experiment]["passed"] = True
del experiments_to_run[experiment]
# Experiment finished: Check reward achieved and timesteps done
# (throughput).
else:
if check_eval:
episode_reward_mean = np.mean([
t.last_result["evaluation"]["episode_reward_mean"]
for t in trials_for_experiment
])
else:
episode_reward_mean = np.mean([
t.last_result["episode_reward_mean"]
for t in trials_for_experiment
])
desired_reward = checks[experiment]["min_reward"]
timesteps_total = np.mean([
t.last_result["timesteps_total"]
for t in trials_for_experiment
])
total_time_s = np.mean([
t.last_result["time_total_s"]
for t in trials_for_experiment
])
# TODO(jungong) : track trainer and env throughput separately.
throughput = timesteps_total / (total_time_s or 1.0)
desired_throughput = checks[experiment]["min_throughput"]
# Record performance.
stats[experiment] = {
"episode_reward_mean": episode_reward_mean,
"throughput": throughput,
}
print(f" ... Desired reward={desired_reward}; "
f"desired throughput={desired_throughput}")
# We failed to reach desired reward or the desired throughput.
if (desired_reward and
episode_reward_mean < desired_reward) or \
(desired_throughput and
throughput < desired_throughput):
print(" ... Not successful: Actual "
f"reward={episode_reward_mean}; "
f"actual throughput={throughput}")
checks[experiment]["failures"] += 1
# We succeeded!
else:
print(" ... Successful: (mark ok).")
checks[experiment]["passed"] = True
del experiments_to_run[experiment]
ray.shutdown()
time_taken = time.monotonic() - start_time
# Create results dict and write it to disk.
result = {
"time_taken": time_taken,
"trial_states": dict(Counter([trial.status for trial in all_trials])),
"last_update": time.time(),
"stats": stats,
"passed": [k for k, exp in checks.items() if exp["passed"]],
"failures": {
k: exp["failures"]
for k, exp in checks.items() if exp["failures"] > 0
}
}
return result | 71f418e85f1466fe3294b80f20bdc8405f45ed06 | 3,630,042 |
def make_interpolant(a, b, func, order, error, basis="chebyshev",
adapt_type="Remez", dtype='64', accurate=True, optimizations=[]):
"""
Takes an interval from a to b, a function, an interpolant order, and a
maximum allowed error and returns an Approximator class representing
a monomial interpolant that fits those parameters
"""
my_adapt = adapt.Interpolant(func, order, error, basis, dtype, accurate, optimizations=optimizations)
my_adapt.run_adapt(a, b, adapt_type)
approximation = app.Approximator(my_adapt, optimizations=optimizations)
dt = int(dtype)
if dt <= 32:
approximation.dtype_name = "float"
elif dt <= 64:
approximation.dtype_name = "double"
elif dt <= 80:
approximation.dtype_name = "long double"
else:
raise Exception("Incorrect data type specified")
return approximation | 3370da9128bdfe9c6663b327482d074353bb5b5b | 3,630,043 |
def handle_UnknownLanguageError(exc, *args):
"""Handles error raised when an unknown language is requested
"""
_ = gettext_lang.lang
lang = exc.args[0]
all_langs = exc.args[1]
message = _(
" AvantPy exception: UnknownLanguageError\n\n"
" The following unknown language was requested: {lang}.\n\n"
" The known languages are: {all_langs}.\n\n"
)
return message.format(
exception_name=exc.__class__.__name__, lang=lang, all_langs=all_langs
) | 9ca38b740d6dc5b218e4b9f0aedf196946fe961f | 3,630,044 |
import requests
def get_title(bot, trigger):
""" Get the title of the page referred to by a chat message URL """
DOMAIN_REMAPS = [("mobile.twitter.com", "twitter.com")]
url = trigger.group(1)
for substr, repl in DOMAIN_REMAPS:
url = url.replace(substr, repl)
host = urlparse(url).hostname
if any(badhost.lower() in host.lower() for badhost in FORBIDDEN_HOSTS):
return False
try:
with requests.get(url, stream=True, **REQUEST_PARAMS) as response:
if not response.ok:
return
# only read the first MAXSIZE bytes to find the <title>
content = ""
for chunk in response.iter_content(MAXSIZE, decode_unicode=True):
content += chunk
if len(content) >= MAXSIZE:
break
if "encoding" in content:
content = content.encode("utf-8")
try:
doc = lxml.html.fromstring(content)
title = doc.find(".//title").text
bot.say("title: {title}".format(title=title.strip()))
except:
return
except TypeError:
return False | 19cfa071a2282f7b5d4e46f9e32632a6f0e7e1dd | 3,630,045 |
def assumption_html():
"""Produces an HTML list of all assumption descriptions."""
# full_descriptions = [a.description.format(a.value) for a in ASSUMPTIONS_TO_DISPLAY]
items = _list_items(map(_prettyprint, ASSUMPTIONS_TO_DISPLAY))
return ASSUMPTION_TEXT.format("\n".join(items)) | a41a44c82a87e1f65111ff373409f8fedd3f6159 | 3,630,046 |
from typing import OrderedDict
def edit_page_element(project, pagenumber, pchange, location, tag_name, brief, hide_if_empty, attribs):
"""Given an element at project, pagenumber, location
sets the element values, returns page change uuid """
proj, page = get_proj_page(project, pagenumber, pchange)
part = page.location_item(location)
part.tag_name = tag_name
part.brief = brief
part.attribs = OrderedDict(sorted(attribs.items(), key=lambda t: t[0]))
if part.__class__.__name__ == "Part":
part.hide_if_empty = hide_if_empty
# save the altered page, and return the page.change uuid
return proj.save_page(page) | f1b236a5cf972c494dde0d9bae281c8941a3cb73 | 3,630,047 |
import os
def check_predicted_masks():
"""
Returns the list of images for which we have a mask.
The length of this list must be equal to the total number of original images. Because there should be a black and white
mask for every image.
:return: python list of strings
"""
path_to_masks = os.path.join(SEGMENTATION_DATA_PATH, "predicted_masks")
path_contents = os.listdir(path_to_masks)
masks = [item for item in path_contents if item.endswith("_pred.jpg")]
return masks | 34cc6acf2a60c52c76ae691467bd44c08476596c | 3,630,048 |
def _sparse_elm_mul(spmat_csr, col):
"""
spmat (n, m)
col (n,)
"""
for i in range(spmat_csr.shape[0]):
i0, i1 = spmat_csr.indptr[i], spmat_csr.indptr[i+1]
if i1 == i0:
continue
spmat_csr.data[i0:i1] *= col[i]
return spmat_csr | 55c7ca7f848989eaa95a5917cfa40edf2d7e1372 | 3,630,049 |
def list_ports():
"""
Return a list of current port trees managed by poudriere
CLI Example:
.. code-block:: bash
salt '*' poudriere.list_ports
"""
_check_config_exists()
cmd = "poudriere ports -l"
res = __salt__["cmd.run"](cmd).splitlines()
return res | 84342edd644e890b5bcd233b5adf3bcb36033d01 | 3,630,050 |
def _make_vc_curves(ch_data_cache: analyzer.CalcCache):
"""
Format the VC curves of the main accelerometer channel into a pandas object.
"""
df_vc = ch_data_cache._VCCurveData * analyzer.MPS_TO_UMPS # (m/s) -> (μm/s)
df_vc["Resultant"] = calc_stats.L2_norm(df_vc.to_numpy(), axis=1)
if df_vc.size == 0:
return None
return df_vc.stack(level="axis").reorder_levels(["axis", "frequency (Hz)"]) | a5beaec9920046a9fb34e36f29e3130d26edc4f2 | 3,630,051 |
from typing import Any
from typing import Optional
def match_attribute_node(obj: Any, name: Optional[str] = None) -> bool:
"""
Returns `True` if the first argument is an attribute node matching the name, `False` otherwise.
Raises a ValueError if the argument name has to be used, but it's in a wrong format.
:param obj: the node to be tested.
:param name: a fully qualified name, a local name or a wildcard. The accepted wildcard formats \
are '*', '*:*', '*:local-name' and '{namespace}*'.
"""
if name is None or name == '*' or name == '*:*':
return isinstance(obj, (AttributeNode, TypedAttribute))
elif not isinstance(obj, (AttributeNode, TypedAttribute)):
return False
elif isinstance(obj, TypedAttribute):
obj = obj.attribute
if not name:
return not obj.name
elif name[0] == '*':
try:
_, _name = name.split(':')
except (ValueError, IndexError):
raise ElementPathValueError("unexpected format %r for argument 'name'" % name)
else:
if obj.name.startswith('{'):
return obj.name.split('}')[1] == _name
else:
return obj.name == _name
elif name[-1] == '*':
if name[0] != '{' or '}' not in name:
raise ElementPathValueError("unexpected format %r for argument 'name'" % name)
elif obj.name.startswith('{'):
return obj.name.split('}')[0][1:] == name.split('}')[0][1:]
else:
return False
else:
return obj.name == name | 6e50bce0e4a8419cea31a8be86eca53a228c6272 | 3,630,052 |
def get_data_accessor_predicate(
data_type: DataTypeLike = None,
format_id: str = None,
storage_id: str = None
) -> ExtensionPredicate:
"""
Get a predicate that checks if a data accessor extensions's name is
compliant with *data_type*, *format_id*, *storage_id*.
:param data_type: Optional data data type to be supported.
May be given as type alias name, as a type,
or as a DataType instance.
:param format_id: Optional data format identifier to be supported.
:param storage_id: Optional data storage identifier to be supported.
:return: A filter function.
:raise DataStoreError: If an error occurs.
"""
if any((data_type, format_id, storage_id)):
data_type = DataType.normalize(data_type) \
if data_type is not None else None
def _predicate(extension: Extension) -> bool:
extension_parts = extension.name.split(':', maxsplit=4)
if storage_id is not None:
ext_storage_id = extension_parts[2]
if ext_storage_id != '*' and ext_storage_id != storage_id:
return False
if format_id is not None:
ext_format_id = extension_parts[1]
if ext_format_id != '*' and ext_format_id != format_id:
return False
if data_type is not None:
ext_data_type = DataType.normalize(extension_parts[0])
if not data_type.is_super_type_of(ext_data_type):
return False
return True
else:
# noinspection PyUnusedLocal
def _predicate(extension: Extension) -> bool:
return True
return _predicate | 4089a9149322257d7dbc155130d149f1c9afbd98 | 3,630,053 |
from typing import Any
from typing import Type
import dataclasses
from typing import is_typeddict
from typing import get_origin
from typing import Literal
from typing import Union
def check(value: Any, ty: Type[Any]) -> Result:
"""
# Examples
>>> assert is_error(check(1, str))
>>> assert not is_error(check(1, int))
>>> assert is_error(check(1, list))
>>> assert is_error(check(1.3, int))
>>> assert is_error(check(1.3, Union[str, int]))
"""
if not isinstance(value, type) and dataclasses.is_dataclass(ty):
# dataclass
return check_dataclass(value, ty)
elif is_typeddict(ty):
# should use `typing.is_typeddict` in future
return check_typeddict(value, ty)
else:
to = get_origin(ty)
if to is not None:
# generics
err = check(value, to)
if is_error(err):
return err
if to is list or to is set or to is frozenset:
err = check_mono_container(value, ty)
elif to is dict:
err = check_dict(value, ty) # type: ignore
elif to is tuple:
err = check_tuple(value, ty)
elif to is Literal:
err = check_literal(value, ty)
elif to is Union:
err = check_union(value, ty)
return err
elif isinstance(ty, type):
# concrete type
if issubclass(ty, bool):
if not isinstance(value, ty):
return Error0(ty=ty, value=value)
elif issubclass(ty, int): # For boolean
return check_int(value, ty)
elif not isinstance(value, ty):
return Error0(ty=ty, value=value)
return None | 127fd7990cebd217f598303cb0a837a87bd50e68 | 3,630,054 |
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)]) | 1ee64be4f1ed6783aeb54acdb76b592488d078c8 | 3,630,055 |
def is_array_str(obj):
"""
Check if obj is a list of strings or a tuple of strings or a set of strings
:param obj: an object
:return: flag: True or False
"""
# TODO: modify the use of is_array_str(obj) in the code to is_array_of(obj, classinfo)
flag = False
if isinstance(obj, str):
pass
elif all(isinstance(item, str) for item in obj):
flag = True
return flag | c1c6a37befb70b481eb82b0a4778b45013156c68 | 3,630,056 |
def generate_sample_path(n):
"""
Generates a sample path
:param n: path length
:returns x, y: state and observations sample path
"""
x = np.zeros((n + 1, m_W.shape[0]))
y = np.zeros((n + 1, m_Nu.shape[0]))
# w = np.random.normal(self.m_w, self.std_w, (n + 1, self.m_w.shape[0]))
# nu = np.random.normal(self.m_nu, self.std_nu, (n + 1, self.m_nu.shape[0]))
x[0, :] = sample_X0()
y[0, :] = psi(x[0, :]) + sample_normal(m_Nu, std_Nu)
for i in range(0, n):
x[i + 1, :] = phi(x[i, :]) + sample_normal(m_W, std_W)
y[i + 1, :] = psi(x[i + 1, :]) + sample_normal(m_Nu, std_Nu)
return x, y | 50ecbc58bb47cae72302bb1b2ee8425cbb5ee32f | 3,630,057 |
def get_pod_by_label_selector(
kube_client, label_selector, pod_namespace=namespace
) -> str:
"""Return the name of a pod found by label selector."""
pods = kube_client.list_namespaced_pod(
pod_namespace, label_selector=label_selector
).items
assert (
len(pods) > 0
), f"Expected to find at least one pod with labels '{label_selector}'"
return pods[0].metadata.name | 408b9073ed996d4349b28243fd99201dc642bda9 | 3,630,058 |
import os
def make_file_path(file, args):
"""Create any directories and subdirectories needed to store data in the specified file,
based on inputs_dir and inputs_subdir arguments. Return a pathname to the file."""
# extract extra path information from args (if available)
# and build a path to the specified file.
path = os.path.join(args.get('inputs_dir', ''), args.get('inputs_subdir', ''))
if path != '' and not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, file)
return path | 58ac390734f60daf67adcd6e05b3bf721f4b2383 | 3,630,059 |
import re
def get_ticket_refs(text, prefixes=None):
"""Returns a list of ticket IDs referenced in given text.
Args:
prefixes (list of unicode):
Prefixes allowed before the ticket number.
For example, prefixes=['app-', ''] would recognize
both 'app-1' and '1' as ticket IDs.
By default, prefixes is a regex of '[A-Z-]*'
Returns:
set of unicode
The set of recognized issue numbers.
"""
verbs = ['closed', 'closes', 'close', 'fixed', 'fixes', 'fix',
'addresses', 're', 'references', 'refs', 'see',
'issue', 'bug', 'ticket']
trigger = '(?:' + '|'.join(verbs) + r')\s*(?:ticket|bug)?:*\s*'
ticket_join = r'\s*(?:,|and|, and)\s*'
if prefixes is None:
safe_prefixes = '[A-Z-]*'
else:
safe_prefixes = '|'.join([re.escape(prefix) for prefix in prefixes])
ticket_id = '#?((?:' + safe_prefixes + r')\d+)'
matches = re.findall(trigger + ticket_id +
('(?:' + ticket_join + ticket_id + ')?') * 10, text,
flags=re.IGNORECASE)
ids = [submatch for match in matches for submatch in match if submatch]
return sorted(set(ids)) | 41c0f25f387e5f6045c94f6b160afeac6862d3ab | 3,630,060 |
import builtins
def xsh_session():
"""return current xonshSession instance."""
return builtins.__xonsh__ | 25006760522733cfd27c2b189a4d0b934ff59b90 | 3,630,061 |
def is_date_valid(keyid, date, lookup_dict=movie_dict):
"""
Function to see if a date is valid for a given key.
:param keyid: key into the dictionary
:param date: a date to check for in the list corresponding to 'keyid'
:param lookup_dict: lookup dictionary
Note, the date is treated as an exact date string
"""
if keyid not in movie_dict.keys() or type(movie_dict[keyid]) != list:
return False
return date in movie_dict[keyid] | 841b4ce7c5447f6ed32fa25d85695888cd172865 | 3,630,062 |
import string
def format_filename(name):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in name if c in valid_chars)
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
return filename | bb8bf76421d372d6e0c0ae13dd7743cd164d61ea | 3,630,063 |
def bin_plot(frame, x = None, target = 'target', iv = True):
"""plot for bins
"""
group = frame.groupby(x)
table = group[target].agg(['sum', 'count']).reset_index()
table['badrate'] = table['sum'] / table['count']
table['prop'] = table['count'] / table['count'].sum()
prop_ax = tadpole.barplot(
x = x,
y = 'prop',
data = table,
color = '#82C6E2',
)
prop_ax = add_annotate(prop_ax)
badrate_ax = prop_ax.twinx()
badrate_ax.grid(False)
badrate_ax = tadpole.lineplot(
x = x,
y = 'badrate',
data = table,
color = '#D65F5F',
ax = badrate_ax,
)
badrate_ax = add_annotate(badrate_ax)
if iv:
prop_ax = reset_ylim(prop_ax)
prop_ax = add_text(prop_ax, 'IV: {:.5f}'.format(IV(frame[x],frame[target])))
return prop_ax | 7ee014167e9af5bd327f31606f95213ba8b4cc2a | 3,630,064 |
import os
import re
def getCtimeOfFile(fileName):
"""
input: string
output: string
description: Get the first line of the file to determine whether there
is a date,
if any, change to the specified date format(yyyymmddHHMM)
and return,
if not, return an empty string
"""
if not os.path.exists(fileName):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % str(fileName))
# 2018-08-26 14:18:40
rule1 = r'\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-6]\d:[0-6]\d'
# 2018/08/25 20:40:16
rule2 = r'\d{4}/[0-1]\d/[0-3]\d [0-2]\d:[0-6]\d:[0-6]\d'
# Wed Aug 29 00:00:03 CST 2018
rule3 = r'(Mon|Tue|Wed|Thu|Fri|Sat|Sun)\b (' \
r'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\b [0-3]\d [' \
r'0-2]\d:[0-6]\d:[0-6]\d CST \d{4}'
# 2018-08-25T20:49:05+08:00
rule4 = r'\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-6]\d:[0-6]\d'
# defining rules and partitioning method key-value pairs
rule_dict = {rule1: d_timeToString,
rule2: d_timeToString,
rule3: e_timeToString,
rule4: d_timeToString
}
# open file
with open(fileName, "r") as f:
# get the first line of the file
line = f.readline().strip()
# match according to known rules
for rule in rule_dict.keys():
result = re.search(rule, line)
if result:
# change to the specified date format and return
return rule_dict[rule](result.group())
return "" | 3848e82728bea615bf834be719ed958b91253634 | 3,630,065 |
import time
def Get_ConfusionMatrix(TrueLabels, PredictedLabels, Classes, Normal=False, Title='Confusion matrix', ColorMap='rainbow',
FigSize=(30,30), save=False):
""" Function designed to plot the confusion matrix of the predicted labels versus the true leabels
INPUT: vector containing the actual true labels, vector containing the predicted labels, flag for normalizing the data (default False),
name of the title for the graph, color map (default winter) and flag to save or not the figure (default False).
OUTPUT: function returns a matrix containing the confusion matrix values """
# Colormap reference -> https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
try:
ConfMatrix = metrics.confusion_matrix(TrueLabels, PredictedLabels) #Calculating confusion matrix
if(Normal==True):
ConfMatrix = ConfMatrix.astype('float') / ConfMatrix.sum(axis=1)[:, np.newaxis]
ConfMatrix_DF = pd.DataFrame(data=ConfMatrix, index=Classes, columns=Classes)
fig, ax = plt.subplots(figsize=FigSize)
sb.heatmap(ConfMatrix_DF, annot=True, cmap=ColorMap)
ax.set_title(Title, fontsize=26)
ax.set_xlabel('Predicted labels', fontsize = 20)
ax.set_ylabel('True labels', fontsize = 20)
ax.set_ylim(len(ConfMatrix)+0.25, -0.25)
plt.xticks(rotation=45)
plt.yticks(rotation=45)
plt.show()
if(save==True):
timestr = time.strftime("%y-%m-%d_%Hh%Mm%Ss_")
fig = ax.get_figure()
fig.savefig(timestr+Title+".png")
return ConfMatrix_DF
except:
print("CONFUSION-MATRIX_ERROR\n") | e42b59f723c8380951fce3cc930aeab80e96564e | 3,630,066 |
def _noncentrality_chisquare(chi2_stat, df, alpha=0.05):
"""noncentrality parameter for chi-square statistic
`nc` is zero-truncated umvue
Parameters
----------
chi2_stat : float
Chisquare-statistic, for example from a hypothesis test
df : int or float
Degrees of freedom
alpha : float in (0, 1)
Significance level for the confidence interval, covarage is 1 - alpha.
Returns
-------
HolderTuple
The main attributes are
- ``nc`` : estimate of noncentrality parameter
- ``confint`` : lower and upper bound of confidence interval for `nc``
Other attributes are estimates for nc by different methods.
References
----------
.. [1] Kubokawa, T., C.P. Robert, and A.K.Md.E. Saleh. 1993. “Estimation of
Noncentrality Parameters.”
Canadian Journal of Statistics 21 (1): 45–57.
https://doi.org/10.2307/3315657.
.. [2] Li, Qizhai, Junjian Zhang, and Shuai Dai. 2009. “On Estimating the
Non-Centrality Parameter of a Chi-Squared Distribution.” S
tatistics & Probability Letters 79 (1): 98–104.
https://doi.org/10.1016/j.spl.2008.07.025.
"""
alpha_half = alpha / 2
nc_umvue = chi2_stat - df
nc = np.maximum(nc_umvue, 0)
nc_lzd = np.maximum(nc_umvue, chi2_stat / (df + 1))
nc_krs = np.maximum(nc_umvue, chi2_stat * 2 / (df + 2))
nc_median = special.chndtrinc(chi2_stat, df, 0.5)
ci = special.chndtrinc(chi2_stat, df, [1 - alpha_half, alpha_half])
res = Holder(nc=nc,
confint=ci,
nc_umvue=nc_umvue,
nc_lzd=nc_lzd,
nc_krs=nc_krs,
nc_median=nc_median,
name="Noncentrality for chisquare-distributed random variable"
)
return res | 7d179db6e4b503b3890b6f1ca71365b04fff13b0 | 3,630,067 |
import os
import subprocess
import threading
def delete_helm_release(release):
"""Delete helm release
This method deletes a helm release without --purge which removes
all associated resources from kubernetes but not from the store(ETCD)
In the scenario of updating application, the method is needed to clean
up the releases if there were deployed releases in the old application
but not in the new application
:param release: the name of the helm release
"""
# NOTE: This mechanism deletes armada/tiller managed releases.
# This could be adapted to also delete helm v3 releases using
# 'helm uninstall'.
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
helm_cmd = subprocess.Popen(
['helmv2-cli', '--',
'helm', 'delete', release, '--tiller-connection-timeout', '5'],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
timer = threading.Timer(20, kill_process_and_descendants, [helm_cmd])
try:
timer.start()
out, err = helm_cmd.communicate()
if err and not out:
if ("deletion completed" or "not found" or "is already deleted") in err:
LOG.debug("Release %s not found or deleted already" % release)
return True
raise exception.HelmTillerFailure(
reason="Failed to delete release: %s" % err)
elif not err and not out:
err_msg = "Failed to delete release. " \
"Helm tiller response timeout."
raise exception.HelmTillerFailure(reason=err_msg)
return True
except Exception as e:
LOG.error("Failed to delete release: %s" % e)
raise exception.HelmTillerFailure(
reason="Failed to delete release: %s" % e)
finally:
timer.cancel() | 36b0af7612b38c7b20144bc3fdfe65e061bd158e | 3,630,068 |
from typing import cast
import select
def get_subjects():
"""
Get all subjects from the database
"""
connection = db_engine.connect()
subject = get_table("subject")
columns = [
subject.c.id, cast(subject.c.date_created, Text), subject.c.date_created.label('date_created'),
cast(subject.c.date_modified, Text), subject.c.date_modified.label('date_modified'),
subject.c.deleted, subject.c.type, subject.c.first_name, subject.c.last_name,
subject.c.place_of_birth, subject.c.occupation, subject.c.preposition,
subject.c.full_name, subject.c.description, subject.c.legacy_id,
cast(subject.c.date_born, Text), subject.c.date_born.label('date_born'),
cast(subject.c.date_deceased, Text), subject.c.date_deceased.label('date_deceased'),
subject.c.project_id, subject.c.source
]
stmt = select(columns)
rows = connection.execute(stmt).fetchall()
result = []
for row in rows:
result.append(dict(row))
connection.close()
return jsonify(result) | df817e536f436babe314d1f3ac1f6f32045ac01b | 3,630,069 |
def get_rest_api(*, config: Config) -> FastAPI:
"""Creates a FastAPI app."""
container = setup_container(config=config)
container.wire(modules=["ucs.adapters.inbound.fastapi_"])
api = FastAPI()
api.include_router(router)
configure_app(api, config=config)
return api | a6d579f5d79108136b356f5bea67607a7119fd95 | 3,630,070 |
def read_expression_profiles(pro_file):
"""
Return a DataFrame containing data from a FluxSimulator .pro file.
Return a DataFrame encapsulating the data from a FluxSimulator
transcriptome profile (.pro) file.
pro_file: Path to a FluxSimulator transcriptome profile file.
"""
return pd.read_csv(pro_file, delim_whitespace=True,
header=None, names=_PRO_FILE_COLS) | ec90c27da9f5aa01383745f25898895a2019c570 | 3,630,071 |
def format_list(lst):
"""
Format a list as a string, ignore if it is string
:param lst the list to format
:return the formatted string
"""
if not isinstance(lst, basestring):
return " ".join(str(i) for i in lst)
return lst | d2602666351e0bf3e08882a9f6f3eecc6726889b | 3,630,072 |
def sampler_paraphrase(sentence, sampling_temp=1.0):
"""Paraphrase by sampling a distribution
Args:
sentence (str): A sentence input that will be paraphrased by
sampling from distribution.
sampling_temp (int) : A number between 0 an 1
Returns:
str: a candidate paraphrase of the `sentence`
"""
with tf.Session(graph=graph) as sess:
return infer(sess, model, 1, sentence, idx_to_word, end_id, sampling_temp) | ced919e216a891bb8477234017a2547cec613ca7 | 3,630,073 |
def create_graph(tensorboard_scope, mode_scope, input_file, input_len=2, output_len=1, batch_size=1, verbose=True, reuse=None, n_threads=2):
"""
create or reuse graph
:param tensorboard_scope: variable scope name
:param mode_scope: 'train', 'valid', 'test'
:param input_file: train or valid or test file path
:param input_len: x1, x2
:param output_len: y
:param batch_size: batch size > 0
:param verbose: print graph nodes
:param reuse: reuse graph or not
:param n_threads: number of example enqueue threands (2 is enough)
:return: tensorflow graph nodes
"""
with tf.name_scope(mode_scope): # don't share
x, y = input_pipeline([input_file], batch_size=batch_size, delim='\t', splits=3, n_threads=n_threads)
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
with tf.variable_scope('layers%d' % 1, reuse=reuse): # share W, b
W1 = tf.get_variable(dtype=tf.float32, shape=[input_len, output_len], initializer=tf.random_normal_initializer(), name='W1')
b1 = tf.get_variable(dtype=tf.float32, initializer=tf.constant(0.0, shape=[output_len]), name='b1')
y_hat = tf.add(tf.matmul(x, W1), b1, name='y_hat')
with tf.variable_scope('cost', reuse=reuse): # share W, b
cost = tf.reduce_mean(tf.square(y_hat - y), name='cost')
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, name='train_step')
with tf.name_scope(tensorboard_scope): # don't share
_W1 = tf.summary.histogram(values=W1, name='_W1')
_b1 = tf.summary.histogram(values=b1, name='_b1')
_cost = tf.summary.scalar(tensor=cost, name='_cost')
summary = tf.summary.merge([_W1, _b1, _cost], name='summary') # tf.summary.merge_all()
if verbose:
log.info('')
log.info('mode_scope: %s' % mode_scope)
log.info(x)
log.info(W1)
log.info(b1)
log.info(y)
log.info(y_hat)
log.info(cost)
log.info(train_step.name)
return x, y, learning_rate, W1, b1, y_hat, cost, train_step, summary | 901ea6c32bccc898ca51734c6c779e0a1655ad51 | 3,630,074 |
def update_trip_public(request, trip_id):
"""
Makes given trip public
:param request:
:param trip_id:
:return: 400 if user not present in the trip
:return: 404 if trip or user does not exist
:return: 200 successful
"""
try:
trip = Trip.objects.get(pk=trip_id)
# if signed-in user not associated with requested trip
if request.user not in trip.users.all():
error_message = "User not a part of trip"
return Response(error_message, status=status.HTTP_401_UNAUTHORIZED)
trip.is_public = True
trip.save()
except Trip.DoesNotExist:
error_message = "Trip does not exist"
return Response(error_message, status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_200_OK) | 4805e9c850c314ec8ab9dbf20ded6966a40678cc | 3,630,075 |
def get_peer_count(ihash):
"""Return count of all participating peers we've seen"""
return g.redis.scard("%s:peers:N" % ihash) | be3c08ff07b548123e4b994e0f7539dd82601c22 | 3,630,076 |
import hashlib
def calc_checksum(filename):
"""
Calculates a checksum of the contents of the given file.
:param filename:
:return:
"""
try:
f = open(filename, "rb")
contents = f.read()
m = hashlib.md5()
m.update(contents)
checksum = m.hexdigest()
return checksum
except IOError:
return None | 080e3686279ae126951cd1b66efdb9a0d2448011 | 3,630,077 |
def alignment_matrix(subject, target, precision=BASE_PREC, verbosity=0, max_steps=BASE_STEPS):
"""
Numerically find the rotation matrix necessary to rotate the `subject` vector to the `target` direction.
Args:
subject (np.ndarray): Length-3 vector to rotate.
target (np.ndarray): Length-3 vector to rotate to.
precision (float): Mostly for whether two unit vectors are close enough (L2-norm) to be the same.
verbosity (bool): Whether to print info from Newton's method search.
max_steps (int): Maximum allowable steps in Newton's method when searching for the rotation matrix.
Returns:
3-element tuple containing
- (*np.ndarray*): 3x3 rotation matrix to bring `subject` into alignment with `target`.
- (*np.ndarray*): Length-3 axis of rotation.
- (*float*): Rotation angle (radians).
"""
subject = l2normalize(subject)
target = l2normalize(target)
if vectors_are_parallel(subject, target):
return np.identity(3), None, 0
elif vectors_are_antiparallel(subject, target):
return -np.identity(3), None, np.pi
perp_vec = l2normalize(np.cross(subject, target)) # Mutually perpendicular to rotate about
def err_func(theta):
rot = rotation(theta, perp_vec)
subject_guess = matvec_mult(rot, subject)
err = np.linalg.norm(subject_guess - target)
return err
solved_theta, _, _ = newton1d(err_func, precision=precision, verbosity=verbosity, max_steps=max_steps)
rot_mat = rotation(solved_theta, perp_vec)
return rot_mat, perp_vec, solved_theta | 0c495a03674f2d48c6a63f944a1be969997221d6 | 3,630,078 |
def make_batch(sentences):
"""
create batch data from sentences (list)
"""
input_batch = []
target_batch = []
for sen in sentences:
word = sen.split()
input = [word_dict[n] for n in word[:-1]]
target = word_dict[word[-1]]
input_batch.append(np.eye(n_class)[input]) # one-hot
target_batch.append(np.eye(n_class)[target])# one -hot
return input_batch, target_batch | ac7e0ea67cded93146182a77ba170192e0340094 | 3,630,079 |
def query_to_str(statement, bind=None):
"""
returns a string of a sqlalchemy.orm.Query with parameters bound
WARNING: this is dangerous and ONLY for testing, executing the results
of this function can result in an SQL Injection attack.
"""
if isinstance(statement, sqlalchemy.orm.Query):
if bind is None:
bind = statement.session.get_bind()
statement = statement.statement
elif bind is None:
bind = statement.bind
if bind is None:
raise Exception('bind param (engine or connection object) required when using with an'
' unbound statement')
dialect = bind.dialect
compiler = statement._compiler(dialect)
class LiteralCompiler(compiler.__class__):
def visit_bindparam(
self, bindparam, within_columns_clause=False,
literal_binds=False, **kwargs
):
return super(LiteralCompiler, self).render_literal_bindparam(
bindparam, within_columns_clause=within_columns_clause,
literal_binds=literal_binds, **kwargs
)
compiler = LiteralCompiler(dialect, statement)
return 'TESTING ONLY BIND: ' + compiler.process(statement) | b02bdeae54fab8b93817c6d7c5975e6240c6fe90 | 3,630,080 |
def __filter_card_id(cards: list[str]):
"""Filters an list with card ids to remove
repeating ones and non-ids"""
ids = list()
for c in cards:
try:
int(c)
except ValueError:
continue
else:
if c not in ids:
ids.append(c)
return ids | 53f7cfa979ac8c7bc5357216eb903f5fe5abc02b | 3,630,081 |
import time
def get_elapsed_time(start_time) -> str:
""" Gets nicely formatted timespan from start_time to now """
end = time.time()
hours, rem = divmod(end-start_time, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds) | d75a1873254e1b1cc9ffc714e65d3a9ed95e5803 | 3,630,082 |
def predict_causalforest(cforest, X, num_workers):
"""Predicts individual treatment effects for a causal forest.
Predicts individual treatment effects for new observed features *X*
on a fitted causal forest *cforest*. Predictions are made in parallel with
*num_workers* processes.
Args:
cforest (pd.DataFrame): Fitted causal forest represented in a multi-
index pd.DataFrame consisting of several fitted causal trees
X (np.array): 2d array of new observations for which we predict the
individual treatment effect.
num_workers (int): Number of workers for parallelization.
Returns:
predictions (np.array): 1d array of treatment predictions.
"""
num_trees = len(cforest.groupby(level=0))
n, _ = X.shape
predictions = Parallel(n_jobs=num_workers)(
delayed(predict_causaltree)(cforest.loc[i], X)
for i in range(num_trees)
)
predictions = [arr.reshape((1, n)) for arr in predictions]
predictions = np.concatenate(predictions, axis=0)
predictions = predictions.mean(axis=0)
return predictions | b0ec16ebc0192fe4cb8888713b4ce94d90dcde3a | 3,630,083 |
def sectnum(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Automatic section numbering."""
pending = nodes.pending(parts.SectNum)
pending.details.update(options)
state_machine.document.note_pending(pending)
return [pending] | 794374bdaa1c4bc12ad9543f2aecb31a31c29587 | 3,630,084 |
import logging
import re
def make_vectorized_optimizer_class(cls):
"""Constructs a vectorized DP optimizer class from an existing one."""
child_code = cls.compute_gradients.__code__
if child_code is not parent_code:
logging.warning(
'WARNING: Calling make_optimizer_class() on class %s that overrides '
'method compute_gradients(). Check to ensure that '
'make_optimizer_class() does not interfere with overridden version.',
cls.__name__)
class DPOptimizerClass(cls):
"""Differentially private subclass of given class cls."""
def __init__(
self,
l2_norm_clip,
noise_std, effective_batch_size, #use_pipeline,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initialize the DPOptimizerClass.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)
noise_multiplier: Ratio of the standard deviation to the clipping norm
num_microbatches: How many microbatches into which the minibatch is
split. If None, will default to the size of the minibatch, and
per-example gradients will be computed.
"""
super(DPOptimizerClass, self).__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip
self._effective_batch_size = effective_batch_size
self._noise_std = noise_std * self._l2_norm_clip/np.sqrt(self._effective_batch_size),
def compute_gradients(self,
loss,
var_list,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=True,
grad_loss=None,
gradient_tape=None):
if callable(loss):
# TF is running in Eager mode
raise NotImplementedError('Vectorized optimizer unavailable for TF2.')
else:
# TF is running in graph mode, check we did not receive a gradient tape.
if gradient_tape:
raise ValueError('When in graph mode, a tape should not be passed.')
self._num_microbatches = 1
microbatch_losses = tf.reshape(loss, [self._num_microbatches, -1])
if var_list is None:
var_list = (
tf.trainable_variables() + tf.get_collection(
tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads, var_list = zip(*super(DPOptimizerClass, self).compute_gradients(
microbatch_losses,
var_list,
gate_gradients,
aggregation_method,
colocate_gradients_with_ops,
grad_loss))
grads_list = [
g if g is not None else tf.zeros_like(v)
for (g, v) in zip(list(grads), var_list)
]
pipeline_stages = []
_vars = []
grads_pipeline_stages = []
pipeline_stages_to_grads = {}
for grad, var in zip(grads_list, var_list):
_vars.append(var)
m = re.match(r'.*(Pipeline_stage_[0-9]*).*', grad.op.name)
if m:
pipeline_stage = m.group(1)
grads_pipeline_stages.append(pipeline_stage)
if not pipeline_stage in pipeline_stages:
pipeline_stages.append(pipeline_stage)
if not pipeline_stage in pipeline_stages_to_grads:
pipeline_stages_to_grads[pipeline_stage] = []
pipeline_stages_to_grads[pipeline_stage].append(grad)
normalized_grads = []
for pipeline_stage in pipeline_stages:
with ops.get_default_graph().colocate_with(pipeline_stages_to_grads[pipeline_stage][0].op):
squared_l2_norms = [
math_ops.reduce_sum(input_tensor=gen_math_ops.square(g)) for g in pipeline_stages_to_grads[pipeline_stage]
]
pipeline_stage_norm = math_ops.sqrt(math_ops.add_n(squared_l2_norms))
for grad in pipeline_stages_to_grads[pipeline_stage]:
div = tf.maximum(pipeline_stage_norm / self._l2_norm_clip, 1.)
grad = grad / div
noise = tf.random.normal(tf.shape(input=grad), stddev=self._noise_std, dtype=tf.float32)
noise = tf.cast(noise, tf.float16)
grad += noise
normalized_grads.append(grad)
grads_and_vars = list(zip(normalized_grads, _vars))
return grads_and_vars
return DPOptimizerClass | 5a808036e20692463ccf10a3f596269215789ebb | 3,630,085 |
import fnmatch
def filename_matches_pattern(filepath, pattern):
"""
"""
if isinstance(pattern, string_types):
pattern = (pattern, )
for p in tuple(pattern):
if fnmatch(filepath, p):
return True
return False | 8573040959a659eb5531e7893dbe367f1d967651 | 3,630,086 |
import time
def gmt_time():
"""
Return the time in the GMT timezone
@rtype: string
@return: Time in GMT timezone
"""
return time.strftime('%Y-%m-%d %H:%M:%S GMT', time.gmtime()) | 6009f0c3bc185bca9209827f2c89e39917c1e418 | 3,630,087 |
import string
def convertbase(num, base=10):
"""
Convert a number in base 10 to another base
:type num: number
:param num: The number to convert.
:type base: integer
:param base: The base to convert to.
>>> convertbase(20, 6)
'32'
"""
sign = 1 if num > 0 else -1
alphanum = string.digits + string.ascii_lowercase
nums = alphanum[:base]
res = ''
num *= sign
while num:
num, mod = divmod(num, base)
res += nums[mod]
return ('' if sign == 1 else '-') + res[::-1] | a01ba8f729a9b79b29f042551b6440e17a471c7c | 3,630,088 |
import inspect
def get_riskmodel(taxonomy, oqparam, **extra):
"""
Return an instance of the correct riskmodel class, depending on the
attribute `calculation_mode` of the object `oqparam`.
:param taxonomy:
a taxonomy string
:param oqparam:
an object containing the parameters needed by the riskmodel class
:param extra:
extra parameters to pass to the riskmodel class
"""
riskmodel_class = registry[oqparam.calculation_mode]
# arguments needed to instantiate the riskmodel class
argnames = inspect.getargspec(riskmodel_class.__init__).args[3:]
# arguments extracted from oqparam
known_args = set(name for name, value in
inspect.getmembers(oqparam.__class__)
if isinstance(value, valid.Param))
all_args = {}
for argname in argnames:
if argname in known_args:
all_args[argname] = getattr(oqparam, argname)
if 'hazard_imtls' in argnames: # special case
all_args['hazard_imtls'] = oqparam.imtls
all_args.update(extra)
missing = set(argnames) - set(all_args)
if missing:
raise TypeError('Missing parameter: %s' % ', '.join(missing))
return riskmodel_class(taxonomy, **all_args) | 2c2015da689980edec7af1678d99f6fe7fac9fec | 3,630,089 |
import os
def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
try:
relative = os.path.relpath(path_a, path_b)
except ValueError:
# Different mounts on Windows:
# ValueError: path is on mount 'c:', start on mount 'd:'
#
return False
return not relative.startswith(os.pardir + os.sep) | 935a5897ff447cc3c6e757d6528f795732bed56f | 3,630,090 |
def add_str(arg1, arg2):
"""concatenate arg1 & arg2
Using in template:
'{{ arg1|add_str:arg2 }}'
"""
return str(arg1) + str(arg2) | 2876195d1fe51e0d7f2a86146f0e49f9b4de4598 | 3,630,091 |
def clean_string_columns(df):
"""Clean string columns in a dataframe."""
try:
df.email = df.email.str.lower()
df.website = df.website.str.lower()
except AttributeError:
pass
str_columns = ["name", "trade_name", "city", "county"]
for column in str_columns:
try:
df[column] = df[column].astype(str).str.title()
df[column] = df[column].astype(str).str.replace("Llc", "LLC")
df[column] = df[column].astype(str).str.replace("L.L.C.", "LLC")
df[column] = df[column].astype(str).str.strip()
except (AttributeError, KeyError):
pass
return df | eb9aaa474fe517b346eaa8cd93e669b3fcc3459d | 3,630,092 |
def build_profile(first, last, **user_info):
"""Build a dictionary containing everything we know about a user."""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile | 6d2dbf19d7b4faf283db84485dab6a8f7e2a646b | 3,630,093 |
def _QueryForUser(user, role=None, target=None):
"""Gets all _Permissions for the user's ID and e-mail domain."""
return _Query(user.id, role, target) + _Query(user.email_domain, role, target) | f873e23f302b7bc85ead4e2da5938edc799a80d3 | 3,630,094 |
def decode_transfer2(instruction: TransactionInstruction) -> Transfer2Params:
"""Decode a transfer2 token transaction and retrieve the instruction params."""
parsed_data = __parse_and_validate_instruction(instruction, 4, InstructionType.TRANSFER2)
return Transfer2Params(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
decimals=parsed_data.args.decimals,
source=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
dest=instruction.keys[2].pubkey,
owner=instruction.keys[3].pubkey,
signers=[signer.pubkey for signer in instruction.keys[4:]],
) | 443f1731176b350bcfb24d36d85ac4cf4e18781c | 3,630,095 |
import os
import gzip
def postprocess(annotator, genomes, working_dir):
"""
Finds eCIS-screen output files and creates file with annotations for upload into DB
"""
output_file = os.path.join(annotator.config['cgcms.temp_dir'], 'ecis-screen-plugin-output.txt')
accessions = {}
for genome in genomes:
gbk_file = os.path.join(working_dir, 'genomes', genome, genome + '_genomic.gbff.gz')
with gzip.open(gbk_file, 'rt') as infile:
for line in infile:
if line.startswith('ACCESSION '):
accession = line[12:].rstrip('\n\r').split(' ')[0]
accessions[genome + '_' + accession] = genome
with open(output_file, 'w') as outfile:
with open(os.path.join(working_dir, 'ecis-screen_summary.txt'), 'r') as infile:
header = infile.readline().rstrip('\n\r').split('\t')
for line in infile:
row = line.rstrip('\n\r').split('\t')
try:
genome = accessions[row[0]]
except KeyError:
print ('Unable to find genome name for sequence ID ' + row[0])
raise
for ind, genes in enumerate(row):
if ind == 0:
continue
if genes == '-':
continue
family = header[ind]
description = family + ' subunit of extracellular contractile injection system (eCIS).'
for locus_tag in genes.split(','):
outfile.write('\t'.join([locus_tag, genome, 'eCIS-screen',
'https://github.com/ipb-jianyang/eCIS-screen',
'eCIS gene',
family,
description]) + '\n')
_cleanup(working_dir)
return output_file | 120d6231bd527f74f51055bfaae3e76763bbc8b6 | 3,630,096 |
def deep_predict(data: pd.DataFrame, model, kernel: str):
"""
Deepl Predict Method.
"""
data = feature_time(data)
print(data.shape)
print(data.tail(5))
# normalization
data = data.set_index('time')
data_norm = (data - data.mean()) / data.std()
print("name: ", model.name)
this_predict = model_predict(
model,
data_norm.values.reshape((1, forecast_params['{0}_params'.format(kernel)]['IN_STEPS'], -1))
)
print(this_predict[0].shape)
temp = {}
for i, col in enumerate(MonitorItem.INDEX.value):
temp[col] = this_predict[0][:,i] * data[col].std() + data[col].mean()
this_forecast = pd.DataFrame(temp)
return this_forecast | 9d3ce07840e473e97387f7648a79b1d21f995b9c | 3,630,097 |
def create(user_id):
"""
Create User Function
"""
req_data = request.get_json()
isuser = UserModel.get_one_user(user_id)
#Check if user exist
if isuser:
return custom_response({'error': 'User already exist'}, 400)
req_data["user_guid"] = user_id
user = UserModel(req_data)
message = user.create_user()
return custom_response({"message" :message}, 201) | 1d150be1a7e29fc600cc4f7ddcc49c5a6ecd65db | 3,630,098 |
def ordinal(n: int) -> str:
"""
from: https://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd#answer-4712
"""
result = "%d%s" % (n, "tsnrhtdd"[((n / 10 % 10 != 1) * (n % 10 < 4) * n % 10)::4])
return result.replace('11st', '11th').replace('12nd', '12th').replace('13rd', '13th') | 97eecb539ae37c89ea3e4e76c5c913877fbf2365 | 3,630,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.