content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_geometry_origin_mesh(obj):
"""
Get the mesh which is most suitable for origin of visual mesh.
obj: Object(ID)
"""
# TODO: Use better way than this in finding relative inertial mesh
inertia_meshes = [c for c in obj.parent.children if c.phobostype == 'inertial' and 'inertial_' + obj.name == c.name]
if len(inertia_meshes) == 0:
return obj
else:
assert len(inertia_meshes) == 1
fix_parented_location(inertia_meshes[0])
return inertia_meshes[0] | ab1c447694bdb004bf3d5e16162210f9ef6af7c5 | 3,632,200 |
def read_fchk(in_name):
"""
Read a Gaussian .fchk.
Returns the total energy, gradients and ground state energy.
Parameters
----------
in_name : str
Name of the file to read
Returns
-------
energy : float
Gaussian total calculated energy in Hartree
grad : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. Hartree/Bohr
scf_energy : float
Gaussian ground state calculated energy in Hartree
"""
with open(in_name) as data:
lines = data.readlines()
grad = []
reading = False
for line in lines:
if line[0].isalpha():
reading = False
if reading == True:
for num in map(float, line.split()):
grad.append(num)
if line.startswith("Cartesian Gradient"):
reading = True
if line.startswith("Total Energy"):
energy = float(line.split()[3])
if line.startswith("SCF Energy"):
scf_energy = float(line.split()[3])
grad = np.array(grad)
return energy, grad, scf_energy | c2686ca723dc2e874355879a84ec4b57157a70cf | 3,632,201 |
def miles(kilometers=0, meters=0, feet=0, nautical=0):
"""
Convert distance to miles.
"""
ret = 0.
if nautical:
kilometers += nautical / nm(1.)
if feet:
kilometers += feet / ft(1.)
if meters:
kilometers += meters / 1000.
ret += kilometers / 1.609344
return ret | 6ffd67913280a7148a84463b9679a17e004176d6 | 3,632,202 |
def LookupScope(scope):
"""Helper to produce a more readable scope line.
Args:
scope: String url that reflects the authorized scope of access.
Returns:
Line of text with more readable explanation of the scope with the scope.
"""
readable_scope = _SCOPE_MAP.get(scope.rstrip('/'))
if readable_scope:
return '%s [%s]' % (readable_scope, scope)
return scope | de7d24b6769830d55e6260d5ee2ac5b4846e854c | 3,632,203 |
def label2binary(y, label):
"""
Map label val to +1 and the other labels to -1.
Paramters:
----------
y : `numpy.ndarray`
(nData,) The labels of two classes.
val : `int`
The label to map to +1.
Returns:
--------
y : `numpy.ndarray`
(nData,) Maps the val label to +1 and the other label to -1.
"""
return (2*(y == label).astype(int))-1 | 5bce8491e9eef3a8c36b784ee0e252c641b24fdf | 3,632,204 |
from typing import Tuple
def detect_corners(R: np.array, threshold: float = 0.1) -> Tuple[np.array, np.array]:
"""Computes key-points from a Harris response image.
Key points are all points where the harris response is significant and greater than its neighbors.
Args:
R: A float image with the harris response
threshold: A float determining which Harris response values are significant.
Returns:
A tuple of two 1D integer arrays containing the x and y coordinates of key-points in the image.
"""
"""
points = []
maxima = peak_local_max(response, min_distance=1, threshold_abs=threshold)
for maximum in maxima:
points.append(cv2.KeyPoint(maximum[1], maximum[0], 1))
return points
"""
# Step 1 (recommended) : pad the response image to facilitate vectorization (1 line)
R =np.pad(R, ((1, 1), (1, 1)), mode='constant', constant_values=0)
# Step 2 (recommended) : create one image for every offset in the 3x3 neighborhood (6 lines).
A = np.array([0, 0, 0, 1, 1, 2, 2, 2])
B = np.array([-2, -2, -2, -1, -1, R.shape[0], R.shape[0], R.shape[0]])
C = np.array([0, 1, 2, 0, 2, 0, 1, 2])
D = np.array([-2, -1, R.shape[1], -2, R.shape[1], -2, -1, R.shape[1]])
points = []
for y_s, y_f, x_s, x_f in zip(a, b, c, d):
points.append(R[y_s:y_f, x_s:x_f])
list = np.array(list)
# Step 3 (recommended) : compute the greatest neighbor of every pixel (1 line)
maxima = np.max(points,axis=0)
# Step 4 (recommended) : Compute a boolean image with only all key-points set to True (1 line)
B = np.logical_and(maxima > threshold, )
# Step 5 (recommended) : Use np.nonzero to compute the locations of the key-points from the boolean image (1 line)
point_x, point_y = tuple(map(tuple, np.nonzero(B!=0)))
return point_x, point_y
raise NotImplementedError | b8cb5ae857a8255b263af2034703a0cf08ce66ca | 3,632,205 |
import os
def allocate_transects(mmt, transect_type='Q', checked=False):
"""Method to load transect data. Changed from Matlab approach by Greg to allow possibility
of multi-thread approach.
Parameters
----------
mmt: MMT_TRDI
Object of MMT_TRDI
transect_type: str
Type of transect (Q: discharge or MB: moving-bed test)
checked: bool
Determines if all files are loaded (False) or only checked files (True)
"""
# DEBUG, set threaded to false to get manual serial commands
multi_threaded = False
file_names = []
file_idx = []
# Setup processing for discharge or moving-bed transects
if transect_type == 'Q':
# Identify discharge transect files to load
if checked:
for idx, transect in enumerate(mmt.transects):
if transect.Checked == 1:
file_names.append(transect.Files[0])
file_idx.append(idx)
# file_names = [transect.Files[0] for transect in mmt.transects if transect.Checked == 1]
else:
file_names = [transect.Files[0] for transect in mmt.transects]
file_idx = list(range(0, len(file_names)))
elif transect_type == 'MB':
file_names = [transect.Files[0] for transect in mmt.mbt_transects]
file_idx = list(range(0, len(file_names)))
# Determine if any files are missing
valid_files = []
valid_indices = []
for index, name in enumerate(file_names):
fullname = os.path.join(mmt.path, name)
if os.path.exists(fullname):
valid_files.append(fullname)
valid_indices.append(file_idx[index])
# Multi-thread for Pd0 files
# -------------------------
# Seems like this section belongs in Pd0TRDI.py
# Initialize thread variables
pd0_data = []
pd0_threads = []
thread_id = 0
# DSM 1/24/2018 could this be moved to Pd0TRDI.py as a method
def add_pd0(file_name):
pd0_data.append(Pd0TRDI(file_name))
if multi_threaded:
# TODO this belongs in the pd0 class
for file in valid_files:
pd0_thread = MultiThread(thread_id=thread_id, function=add_pd0, args={'file_name': file})
thread_id += 1
pd0_thread.start()
pd0_threads.append(pd0_thread)
else:
for file in valid_files:
pd0_data.append(Pd0TRDI(file))
for thrd in pd0_threads:
thrd.join()
# Multi-thread for transect data
# Initialize thread variables
processed_transects = []
transect_threads = []
thread_id = 0
# DSM 1/24/2018 couldn't this be added to the TransectData class
def add_transect(transect_data, mmt_transect, mt_pd0_data, mt_mmt):
transect_data.trdi(mmt=mt_mmt,
mmt_transect=mmt_transect,
pd0_data=mt_pd0_data)
processed_transects.append(transect_data)
# Process each transect
for k in range(len(pd0_data)):
transect = TransectData()
if pd0_data[k].Wt is not None:
if transect_type == 'MB':
# Process moving-bed transect
if multi_threaded:
t_thread = MultiThread(thread_id=thread_id,
function=add_transect,
args={'transect': transect,
'mmt_transect': mmt.mbt_transects[valid_indices[k]],
'mt_pd0_data': pd0_data[k],
'mt_mmt': mmt})
t_thread.start()
transect_threads.append(t_thread)
else:
transect = TransectData()
add_transect(transect_data=transect,
mmt_transect=mmt.mbt_transects[valid_indices[k]],
mt_pd0_data=pd0_data[k],
mt_mmt=mmt)
else:
# Process discharge transects
if multi_threaded:
t_thread = MultiThread(thread_id=thread_id,
function=add_transect,
args={'transect': transect,
'mmt_transect': mmt.transects[valid_indices[k]],
'mt_pd0_data': pd0_data[k],
'mt_mmt': mmt})
t_thread.start()
transect_threads.append(t_thread)
else:
add_transect(transect_data=transect,
mmt_transect=mmt.transects[valid_indices[k]],
mt_pd0_data=pd0_data[k],
mt_mmt=mmt)
if multi_threaded:
for x in transect_threads:
x.join()
return processed_transects | af68f72e25f2c8cca6130a0517b3a69e6d44a894 | 3,632,206 |
from typing import Union
from typing import Iterable
def add_file_replica_records(
files: Union[Iterable[File], QuerySet],
compute_resource: Union[str, ComputeResource],
set_as_default=False,
) -> int:
"""
Adds a new laxy+sftp:// file location to every files in a job, given
a ComputeResource. If set_as_default=True, the new replica location becomes the
default location.
NOTE: This task doesn't actually move any data - it's more intended for situations
where data has been moved out-of-band (eg, rsynced manually, not via an
internal Laxy task) but records in the database need to be updated.
"""
if isinstance(compute_resource, str):
compute_resource = ComputeResource.objects.get(id=compute_resource)
# if isinstance(job, str):
# job = Job.objects.get(id=job)
# files = job.get_files()
new_prefix = f"laxy+sftp://{compute_resource.id}"
n_added = 0
with transaction.atomic():
for f in files:
replica_url = f"{new_prefix}/{f.fileset.job.id}/{f.full_path}"
# try:
loc, created = FileLocation.objects.get_or_create(file=f, url=replica_url)
if set_as_default:
loc.set_as_default(save=True)
if created:
n_added += 1
#
# except IntegrityError as ex:
# if 'unique constraint' in ex.message:
# pass
return n_added | 0a703205bd0e972b8c1e829bd850946cc1fe855a | 3,632,207 |
from typing import List
import os
def generate_config(imgs_dir: str, cursor_sizes: List[Num], hotspots: any) -> str:
"""
Generate helpers files.
hotspots is JSON data for each cursor having xhot & yhot parameters.Provide `None` value set hotspots to middle of cursor.
"""
helpers.generate_static_cursor(
imgs_dir, sizes=cursor_sizes, hotspots=hotspots)
helpers.generate_animated_cursor(
imgs_dir, sizes=cursor_sizes, hotspots=hotspots)
return (os.path.abspath(imgs_dir)) | d68c4fee25d073321e0c3a015279b6bedd3df4e5 | 3,632,208 |
import os
import fnmatch
def index_returns_directory():
"""
Prior to compiling a master, it is useful to get the order of projects
by their file name, as the compile.run() function traverses the directory
top to bottom to build the master. We can then use this to compare with the
order or projects (columns) in the old master document we are comparing
the current compile. This is pretty hackish but needs must...
"""
target_files = []
for f in os.listdir(RETURNS_DIR):
target_files.append(f)
pnames_in_returns_dir = []
for f in target_files:
if fnmatch.fnmatch(f, "*.xlsm"):
wb = load_workbook(os.path.join(RETURNS_DIR, f))
ws = wb[runtime_config["TemplateSheets"]["summary_sheet"]]
pnames_in_returns_dir.append(ws["B5"].value)
return pnames_in_returns_dir | c6599d21f0be6d9e0dc7785bdd8461b3b9cde6ce | 3,632,209 |
def in_suit3(list, list0):
"""
test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True
:param a: Int of number
:param b: String List of number
:return: boolean
"""
text = list.replace("-", "")
text0 = list0.replace("-", "")
if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
list2 = list0.split("-")
y = int(list2[0])
suit0 = set()
suit0.add(y)
while y < int(list2[len(list2) - 1]):
y += 1
suit0.add(y)
suit0.add(int(list2[len(list2) - 1]))
temp = [item for item in suit if item in suit0]
if len(temp) > 0: return True
return False | 57409220b93c66ab4b957a05713e5e89b380253a | 3,632,210 |
def mp_wfs_110_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 1.1.0, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_110_nometadata.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read) | 1c3f83f9ae7665a3cf77e5e2fa6217f72b7dc4ea | 3,632,211 |
def extract_power(eeg, D=3, dt=0.2, start=0):
""" extract power vaules for image
Parameters
----------
seizure : EEG | dict
eeg data
D : int, optional
epoch duration, by default 3
dt : float, optional
time step (seconds), by default 0.2
start : int, optional
time to start, by default 0
Returns
-------
baseline_ex_power : ndarray
baseline power
seizure_ex_power : ndarray
seizure power
"""
assert int(D/dt)*dt == D
num_steps = int(D/dt)
seiz = eeg['seizure']['eeg']
sfreq = seiz.info['sfreq']
onset = seiz.annotations.onset[0] - (seiz.first_samp/sfreq) + start
first = int(onset/dt)
baseline_ex_power = eeg['baseline']['ave_power'][:, :num_steps]
seizure_ex_power = eeg['seizure']['ave_power'][:, first:first+num_steps]
return baseline_ex_power, seizure_ex_power | 04c3fed38fa2a2d46ba7edee4bb3f04011d9d2a7 | 3,632,212 |
import time
import tqdm
def extract_HOG(generator, category='species', image_size=256, train=True, verbose=True):
"""
Extract HOG features for specified dataset (train/test).
Args:
generator (Generator class object): Generator class object.
train (bool): Am I working with train or test data?
verbose (bool): Should I print some additional info?
category (str): What category do you want: species or breeds?
Returns:
(ndarray of size [images, features_no], ndarray of size [images]) Features and labels.
"""
all_featrs = []
all_labels = []
batch_size = 64
start_time = time.time()
batches = generator.images_count(train=train) / batch_size
print("Calculating HOG featues..")
for images, labels in tqdm.tqdm(generator.generate_batch(train=train, batch_size=batch_size, category=category, image_size=image_size), total=batches):
all_featrs.append(_HOG(images, image_size))
all_labels.append(labels)
all_featrs = np.concatenate(all_featrs, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
hog_time = time.time()
if verbose:
print ("Features calculated in ", hog_time - start_time, " seconds")
return all_featrs, all_labels | 76cd3a6ed206bbbe258ade6433708dd8df20f990 | 3,632,213 |
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values | 79790a639ee5946f81c033584904491bfcd9be8b | 3,632,214 |
def build_training_response(mongodb_result, hug_timer, remaining_count):
"""
For reducing the duplicate lines in the 'get_single_training_movie' function.
"""
return {'movie_result': list(mongodb_result)[0],
'remaining': remaining_count,
'success': True,
'valid_key': True,
'took': float(hug_timer)} | 77d541957ff9abaa51bd3eb7fd06b550f41291e2 | 3,632,215 |
def calc_fm_perp_for_fm_loc(k_loc_i, fm_loc):
"""Calculate perpendicular component of fm to scattering vector."""
k_1, k_2, k_3 = k_loc_i[0], k_loc_i[1], k_loc_i[2]
mag_1, mag_2, mag_3 = fm_loc[0], fm_loc[1], fm_loc[2]
mag_p_1 = (k_3*mag_1 - k_1*mag_3)*k_3 - (k_1*mag_2 - k_2*mag_1)*k_2
mag_p_2 = (k_1*mag_2 - k_2*mag_1)*k_1 - (k_2*mag_3 - k_3*mag_2)*k_3
mag_p_3 = (k_2*mag_3 - k_3*mag_2)*k_2 - (k_3*mag_1 - k_1*mag_3)*k_1
return mag_p_1, mag_p_2, mag_p_3 | 00ba68c74d781748f39d2a577f227316dc523f0f | 3,632,216 |
def generate(
song_name,
raw_data_name,
base_path=utils.BASE_PATH,
chunk_size=100,
context_size=7,
drop_diffs=[],
log=False):
"""
Generate an SMDataset from SM/wav files.
Only creates datasets with no step predictions.
"""
sm = SMData.SMFile(song_name, raw_data_name, base_path)
# May want to save the time mapping later.
btc = BTC.BeatTimeConverter(sm.offset, sm.bpms, sm.stops)
# Will want to mantain order.
# List of strings, not ints.
diffs = list(filter(lambda x: x != 'Edit', sm.note_charts.keys()))
if drop_diffs is not None:
diffs = list(filter(lambda x: x not in drop_diffs, diffs))
notes = {} # Contains only a list of notes for each difficulty.
times = {} # List of times per diff.
frames = {}
# labels = {} # List of note aligned labels for note events. {0, 1} for now.
# Track first and last notes for wav padding.
first_frame = np.inf
last_frame = -np.inf
# Find note times and frames for alignment to features.
for diff in diffs:
times[diff], notes[diff] = \
btc.gen_time_notes(sm.note_charts[diff].notes)
frames[diff] = btc.align_to_frame(times[diff])
if frames[diff][0] < first_frame:
first_frame = frames[diff][0]
if frames[diff][-1] > last_frame:
last_frame = frames[diff][-1]
# Test this!
# Test by writing beeps again.
front_pad_frames, padded_wav = \
wavutils.pad_wav(first_frame, last_frame, sm.wavdata)
fft_features = wavutils.gen_fft_features(padded_wav, log=log)
# N_channels = 3 (1024, 2048, 4096)
# N_frames ~ song length * 44100 / 512
# N_freqs = 80 (Number of mel coefs per frame)
N_channels, N_frames, N_freqs = fft_features.shape
step_pos_labels = np.zeros((len(diffs), N_frames))
step_type_labels = np.zeros((len(diffs), N_frames, 4))
for i, diff in enumerate(diffs):
# Adjusting for the new frames added on to the front.
frames[diff] += front_pad_frames
# Generating final frame-aligned labels for note event:
step_pos_labels[i, frames[diff]] = 1
for j, note in zip(frames[diff], notes[diff]):
step_type_labels[i, j, :] = np.array(list(map(int, note)))
return SMDataset.SMDataset(
song_name, diffs, fft_features, step_pos_labels, step_type_labels,
chunk_size, context_size) | b61a20cfe1080139d80b3a1ee4ad8e9a7a7c0928 | 3,632,217 |
from functools import reduce
def cmap_map(function, cmap):
""" from scipy cookbook
Applies function (which should operate on vectors of shape 3: [r, g, b], on colormap cmap.
This routine will break any discontinuous points in a colormap.
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = [x[0] for x in cdict[key]]
step_list = reduce(lambda x, y: x + y, list(step_dict.values()))
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
# reduced_cmap = lambda step: np.array(cmap(step)[0:3])
# old_lut = np.array(list(map(reduced_cmap, step_list)))
# new_lut = np.array(list(map(function, old_lut)))
old_lut = np.array([reduced_cmap(cmap, s) for s in step_list])
new_lut = np.array([function(s) for s in step_list])
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(('red', 'green', 'blue')):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_lut[j, i]
elif new_lut[j, i] != old_lut[j, i]:
this_cdict[step] = new_lut[j, i]
colorvector = [x + (x[1],) for x in list(this_cdict.items())]
colorvector.sort()
cdict[key] = colorvector
return mc.LinearSegmentedColormap('colormap', cdict, 1024) | 874618f4a685ae46edf896d759beafdd222dedf5 | 3,632,218 |
from datetime import datetime
from typing import Dict
from typing import Any
def get_legislation_passed_legislature_within_time_frame(
begin_date: datetime, end_date: datetime
) -> Dict[str, Any]:
"""See: http://wslwebservices.leg.wa.gov/legislationservice.asmx?op=GetLegislationPassedLegislatureWithinTimeFrame"""
argdict: Dict[str, Any] = dict(beginDate=begin_date, endDate=end_date)
keydict: Dict[str, Any] = {
"billnumber": int,
"substituteversion": int,
"engrossedversion": int,
"active": lambda boolstr: (boolstr.lower() == "true"),
}
return waleg.call("Legislation", "GetLegislationPassedLegislatureWithinTimeFrame", argdict, keydict) | ed851675da641b33665a814f8d50ee936c97af7d | 3,632,219 |
import glob
import os
def find_output(directory, extension="out", abspath=True):
""" Find output file in a directory.
Parameters
----------
directory : str
Path to folder in which output should be located.
extension : str
File extension of output file (default: 'out').
abspath : bool
Whether to return absolute path (default: True).
Returns
-------
outpath : str
Path to output file (relative or absolute, default: absolute).
"""
dir_list = [fn for fn in glob.glob(directory+"/*."+extension)
if not os.path.basename(fn).startswith("slurm")]
if len(dir_list) != 1:
err = f"Could not determine unique .{extension} file in {directory}/ !"
raise FileNotFoundError(err)
else:
outpath = dir_list[0]
if abspath:
absdir = os.path.abspath(directory)
outpath = os.path.join(absdir, dir_list[0])
return outpath | bb45e97990ea0a6ec98c9212ce9aa1b9effe3040 | 3,632,220 |
def trim_mismatches(gRNA):
"""
trim off 3' mismatches
1. first trim to prevent long alignments past end of normal expressed gRNAs
2. second trim to mismatches close to end of gRNA
"""
pairing = gRNA['pairing']
# 1. index of right-most mismatch before index -40
# if no MM is found trim will equal 0
trim = pairing.rfind('.', 0, -40)+1
# 2.
while '.' in pairing[trim:]:
mm = pairing.find('.', trim)
# if distance to next mismatch is less than 4 then trim
# otherwise stop trimming
if mm-trim < 4:
# trim += mm+1
trim = mm+1
else:
break
# trim alignment information
if trim > 0:
if gRNA['strand'] == 'coding':
gRNA['circle_end'] -= trim
else:
gRNA['circle_start'] += trim
gRNA['mRNA_start'] += trim
gRNA['length'] -= trim
gRNA['mRNA_seq'] = gRNA['mRNA_seq'][trim:]
gRNA['gRNA_seq'] = gRNA['gRNA_seq'][trim:]
gRNA['pairing'] = gRNA['pairing'][trim:]
gRNA['mismatches'] = gRNA['pairing'].count('.')
return gRNA | ab0bed78d29d64e9218201a561bb857fd80ed885 | 3,632,221 |
def validateRequest(endpoint, params):
"""Validate that a valid argument with the correct number of parameters has been supplied."""
if endpoint not in endpoints:
helpInfo = ", ".join(endpoints.keys())
print(f"Invalid endpoint. Please use one of the following: {helpInfo}")
return False
elif len(params) != len(endpoints[endpoint]["params"]):
helpInfo = ", ".join(endpoints[endpoint]["params"])
print(f"Invalid params. {endpoint} requires the following: {helpInfo}")
else:
return True | b2037f9ea1706f39c406e3708b445c0fb3474c01 | 3,632,222 |
import random
def augment_sequence(seq):
""" Flip / rotate a sequence with some random variations"""
imw, imh = 1024, 570
if random.random()>0.5:
for frame in range(len(seq)):
for m in [0, 1]:
seq[frame][m][0] = imw-seq[frame][m][0] # X flip
if random.random()>0.5:
for frame in range(len(seq)):
for m in [0, 1]:
seq[frame][m][1] = imh-seq[frame][m][1] # X flip
if random.random()>0.7:
angle = (np.random.rand()-0.5) * (np.pi * 2)
c, s = np.cos(angle), np.sin(angle)
rot = np.array([[c, -s], [s, c]])
for frame in range(len(seq)):
for m in [0, 1]:
seq[frame][m] -= np.array([[imw/2]*7, [imh/2]*7])
seq[frame][m] = np.dot(np.array(seq[frame][m]).T, rot).T
seq[frame][m] += np.array([[imw/2]*7, [imh/2]*7])
return seq | 1f95c7e183cd696c985f30da335bcbc9337b5255 | 3,632,223 |
from mapalign import embed
from sklearn import metrics
from sklearn.utils.extmath import _deterministic_vector_sign_flip
def dme(network, threshold=90, n_components=10, return_result=False, **kwargs):
"""
Threshold, cosine similarity, and diffusion map embed `network`
Parameters
----------
network : (N, N) array_like
Symmetric network on which to perform diffusion map embedding
threshold : [0, 100] float, optional
Threshold used to "sparsify" `network` prior to embedding. Default: 90
n_components : int, optional
Number of components to retain from embedding of `network`. Default: 10
return_result : bool, optional
Whether to return result dictionary including eigenvalues, original
eigenvectors, etc. from embedding. Default: False
kwargs : key-value pairs, optional
Passed directly to :func:`mapalign.embed.compute_diffusion_map`
Returns
-------
embedding : (N, C) numpy.ndarray
Embedding of `N` samples in `C`-dimensional spaces
res : dict
Only if `return_result=True`
"""
# threshold
network = network.copy()
threshold = np.percentile(network, threshold, axis=1, keepdims=True)
network[network < threshold] = 0
# cosine similarity
network = metrics.pairwise.cosine_similarity(network)
# embed (and ensure consistent output with regard to sign flipping)
emb, res = embed.compute_diffusion_map(network, n_components=n_components,
return_result=True, **kwargs)
emb = _deterministic_vector_sign_flip(emb.T).T
if return_result:
return emb, res
return emb | 97a5014fc0cf174ea10dca171fdaea96509ac3ef | 3,632,224 |
def constant_substitution(text, constants_dict=None):
"""
Substitute some constant in the text.
:param text:
:param constants_dict:
:return:
"""
if not constants_dict:
# No constants, so return the same text
return text
template = Template(text)
return template.safe_substitute(constants_dict) | ea5b6e49818064fc6051740d85b9661ca431d3f7 | 3,632,225 |
def zticks(model_name, dat, let):
"""
Read model name and return the corresponding
array of z-coordinates of front side of cells.
"""
delzarr = delz(model_name, dat, let)
arr = np.array([np.sum(delzarr[:i]) for i in range(delzarr.size+1)])
return arr | 12c48e5d73c364436aadaaa6a1d468d2e319c75c | 3,632,226 |
import copy
def reformat_args(args):
"""
reformat_args(args)
Returns reformatted args for analyses, specifically
- Sets stimulus parameters to "none" if they are irrelevant to the
stimtype
- Changes stimulus parameters from "both" to actual values
- Changes grps string values to a list
- Sets seed, though doesn't seed
- Modifies analyses (if "all" or "all_" in parameter)
Adds the following args:
- dend (str) : type of dendrites to use ("allen" or "extr")
- omit_sess (str): sess to omit
- omit_mice (str): mice to omit
Required args:
- args (Argument parser): parser with the following attributes:
visflow_dir (str) : visual flow direction values to include
(e.g., "right", "left" or "both")
visflow_size (int or str): visual flow size values to include
(e.g., 128, 256, "both")
gabfr (int) : gabor frame value to start sequences at
(e.g., 0, 1, 2, 3)
gabk (int or str) : gabor kappa values to include
(e.g., 4, 16 or "both")
gab_ori (int or str) : gabor orientation values to include
(e.g., 0, 45, 90, 135, 180, 225 or "all")
runtype (str) : runtype ("pilot" or "prod")
stimtype (str) : stimulus to analyse (visflow or gabors)
grps (str) : set or sets of groups to plot,
(e.g., "all change no_change reduc incr").
Returns:
- args (Argument parser): input parser, with the following attributes
modified:
visflow_dir, visflow_size, gabfr, gabk,
gab_ori, grps, analyses, seed
and the following attributes added:
omit_sess, omit_mice, dend
"""
args = copy.deepcopy(args)
if args.plane == "soma": args.dend = "allen"
[args.visflow_dir, args.visflow_size, args.gabfr,
args.gabk, args.gab_ori] = sess_gen_util.get_params(
args.stimtype, args.visflow_dir, args.visflow_size, args.gabfr,
args.gabk, args.gab_ori)
args.grps = gen_util.str_to_list(args.grps)
args.omit_sess, args.omit_mice = sess_gen_util.all_omit(
args.stimtype, args.runtype, args.visflow_dir, args.visflow_size,
args.gabk)
# choose a seed if none is provided (i.e., args.seed=-1), but seed later
args.seed = rand_util.seed_all(
args.seed, "cpu", log_seed=False, seed_now=False)
# collect analysis letters
all_analyses = "".join(get_analysis_fcts().keys())
if "all" in args.analyses:
if "_" in args.analyses:
excl = args.analyses.split("_")[1]
args.analyses, _ = gen_util.remove_lett(all_analyses, excl)
else:
args.analyses = all_analyses
elif "_" in args.analyses:
raise ValueError("Use '_' in args.analyses only with 'all'.")
return args | 305999d1e1c252660314d837b7bbce8a8d5e7cc8 | 3,632,227 |
def load_xlsx_to_plan_list(
filename, sort_by=["sample_num"], rev=[False], retract_when_done=False
):
"""
run all sample dictionaries stored in the list bar
@param bar: a list of sample dictionaries
@param sort_by: list of strings determining the sorting of scans
strings include project, configuration, sample_id, plan, plan_args, spriority, apriority
within which all of one acquisition, etc
@param dryrun: Print out the list of plans instead of actually doing anything - safe to do during setup
@param rev: list the same length of sort_by, or booleans, wetierh to reverse that sort
@param delete_as_complete: remove the acquisitions from the bar as we go, so we can automatically start back up
@param retract_when_done: go to throughstation mode at the end of all runs.
@param save_as_complete: if a valid path, will save the running bar to this position in case of failure
@return:
"""
bar = load_samplesxls(filename)
list_out = []
for samp_num, s in enumerate(bar):
sample = s
sample_id = s["sample_id"]
sample_project = s["project_name"]
for acq_num, a in enumerate(s["acquisitions"]):
if "priority" not in a.keys():
a["priority"] = 50
list_out.append(
[
sample_id, # 0 X
sample_project, # 1 X
a["configuration"], # 2 X
a["plan_name"], # 3
avg_scan_time(a["plan_name"], 50), # 4 calculated plan time
sample, # 5 full sample dict
a, # 6 full acquisition dict
samp_num, # 7 sample index
acq_num, # 8 acq index
a["args"], # 9 X
s["density"], # 10
s["proposal_id"], # 11 X
s["sample_priority"], # 12 X
a["priority"],
]
) # 13 X
switcher = {
"sample_id": 0,
"project": 1,
"config": 2,
"plan": 3,
"plan_args": 9,
"proposal": 11,
"spriority": 12,
"apriority": 13,
"sample_num": 7,
}
# add anything to the above list, and make a key in the above dictionary,
# using that element to sort by something else
try:
sort_by.reverse()
rev.reverse()
except AttributeError:
if isinstance(sort_by, str):
sort_by = [sort_by]
rev = [rev]
else:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
try:
for k, r in zip(sort_by, rev):
list_out = sorted(list_out, key=itemgetter(switcher[k]), reverse=r)
except KeyError:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
plan_list = []
for step in list_out:
kwargs = step[6]["kwargs"]
sample_md = step[5]
# del sample_md['acquisitions']
if hasattr(rsoxs_queue_plans, step[3]):
kwargs.update(
{
"configuration": step[2],
"sample_md": sample_md,
"acquisition_plan_name": step[3],
}
)
plan = {"name": "run_queue_plan", "kwargs": kwargs, "item_type": "plan"}
plan_list.append(plan)
else:
print(f"Invalid acquisition:{step[3]}, skipping")
if retract_when_done:
plan_list.append({"name": "all_out", "item_type": "plan"})
return plan_list | dc106e243cf5174c263954a33a968a0ac2a83af1 | 3,632,228 |
def WTERMSIG(status):
"""Return the signal which caused the process to exit."""
return 0 | d4f45d41de95308c4a16f374e58c16b4384f8fc0 | 3,632,229 |
import io
def create_toplevel_function_string(args_out, args_in, pm_or_pf):
"""
Create a string for a function of the form:
def hl_func(x_0, x_1, x_2, ...):
outputs = (...) = calc_func(...)
header = [...]
return DataFrame(data, columns=header)
Parameters
----------
args_out: iterable of the out arguments
args_in: iterable of the in arguments
pm_or_pf: iterable of strings for object that holds each arg
Returns
-------
a String representing the function
"""
fstr = io.StringIO()
fstr.write("def hl_func(pm, pf")
fstr.write("):\n")
fstr.write(" from pandas import DataFrame\n")
fstr.write(" import numpy as np\n")
fstr.write(" import pandas as pd\n")
fstr.write(" def get_values(x):\n")
fstr.write(" if isinstance(x, pd.Series):\n")
fstr.write(" return x.values\n")
fstr.write(" else:\n")
fstr.write(" return x\n")
fstr.write(" outputs = \\\n")
outs = []
for ppp, attr in zip(pm_or_pf, args_out + args_in):
outs.append(ppp + "." + attr + ", ")
outs = [m_or_f + "." + arg for m_or_f, arg in zip(pm_or_pf, args_out)]
fstr.write(" (" + ", ".join(outs) + ") = \\\n")
fstr.write(" " + "applied_f(")
for ppp, attr in zip(pm_or_pf, args_out + args_in):
# Bring Policy parameter values down a dimension.
if ppp == "pm":
attr += "[0]"
fstr.write("get_values(" + ppp + "." + attr + ")" + ", ")
fstr.write(")\n")
fstr.write(" header = [")
col_headers = ["'" + out + "'" for out in args_out]
fstr.write(", ".join(col_headers))
fstr.write("]\n")
if len(args_out) == 1:
fstr.write(" return DataFrame(data=outputs,"
"columns=header)")
else:
fstr.write(" return DataFrame(data=np.column_stack("
"outputs),columns=header)")
return fstr.getvalue() | 44d78a9d0a3146b4008868073e5828422b819cc8 | 3,632,230 |
def remove_duplicates(df: pd.DataFrame, **kwargs: dict) -> pd.DataFrame:
"""
Remove duplicates entries
Args:
df (pd.DataFrame): DataFrame to check
Returns:
pd.DataFrame: DataFrame with duplicates removed
"""
return df[~(df.duplicated(**kwargs))] | 5d4f290acfda2f51334fc58bad71dd79f7b1a185 | 3,632,231 |
import sys
import imp
def mainfrozen():
"""return True if we are a frozen executable.
The code supports py2exe (most common, Windows only) and tools/freeze
(portable, not much used).
"""
return (
pycompat.safehasattr(sys, "frozen") # new py2exe
or pycompat.safehasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__") # tools/freeze
) | 1e565fab2750127e0e451e53c2e74549f21521b9 | 3,632,232 |
from npc_engine.exporters.base_exporter import Exporter
import click
def test_model(models_path: str, model_id: str):
"""Send test request to the model and print reply."""
if not validate_local_model(models_path, model_id):
click.echo(
click.style(f"{(model_id)} is not a valid npc-engine model.", fg="red",)
)
return 1
model_type = get_model_type_name(models_path, model_id)
exporters = Exporter.get_exporters()
for exporter in exporters:
if exporter.get_model_name() == model_type:
exporter.test_model(models_path, model_id)
return 0 | bf05a6147ceb8769c4cbe5bac25ceab906b4fa92 | 3,632,233 |
def create_x11_client_listener(loop, display, auth_path):
"""Create a listener to accept X11 connections forwarded over SSH"""
host, dpynum, _ = _parse_display(display)
auth_proto, auth_data = yield from lookup_xauth(loop, auth_path,
host, dpynum)
return SSHX11ClientListener(loop, host, dpynum, auth_proto, auth_data) | 806230b424f3ce2efa0197d50762b5e7ee497521 | 3,632,234 |
def expand_tile(expand_info):
"""Tile expander"""
# get op info.
input_desc = expand_info['input_desc'][0]
attrs = expand_info['attr']
multiples = None
for item in attrs:
if 'multiples' in item:
multiples = item['multiples']
output_shape, _, _, shape_compatible = _get_tile_output_shape(input_desc['shape'], multiples)
graph_builder = builder.GraphBuilder()
# generate a graph.
with graph_builder.graph_scope('main') as graph_scope:
# create tensor input.
input_x = graph_builder.tensor(input_desc['shape'], input_desc['data_type'], input_desc['format'])
# create op.
if shape_compatible:
result = graph_builder.emit('BroadcastTo', [input_x], attrs={'shape': output_shape})
else:
result = graph_builder.emit('Tile', [input_x], attrs={'multiples': multiples})
# set graph output.
graph_scope.set_output(result)
graph = graph_builder.get()[0]
return graph | b4550770d82387d7245b142975b1e6d347a5b630 | 3,632,235 |
def compare_system_and_attributes_csv(self, file_number):
"""compare systems and associated attributes"""
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
)
systemstatus_1 = Systemstatus.objects.get(systemstatus_name='systemstatus_1')
# compare - existence of objects
self.assertTrue(
System.objects.filter(system_name=f'system_csv_{file_number}_001').exists()
)
self.assertTrue(
System.objects.filter(system_name=f'system_csv_{file_number}_002').exists()
)
self.assertTrue(
System.objects.filter(system_name=f'system_csv_{file_number}_003').exists()
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_001').analysisstatus,
analysisstatus_1,
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_002').analysisstatus,
analysisstatus_1,
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_003').analysisstatus,
analysisstatus_1,
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_001').systemstatus,
systemstatus_1,
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_002').systemstatus,
systemstatus_1,
)
self.assertEqual(
System.objects.get(system_name=f'system_csv_{file_number}_003').systemstatus,
systemstatus_1,
)
# return to test function
return self | d438b767af16deaa4d8d435a6ba7d4c6a943d026 | 3,632,236 |
def is_live_request(request):
"""
Helper to differentiate between live requests and scripts.
Requires :func:`~.request_is_live_tween_factory`.
"""
return request.environ.get("LIVE_REQUEST", False) | 1e5e64901715131f363d6d343acbd4d631cf6b6f | 3,632,237 |
import requests
from bs4 import BeautifulSoup
def get_videos(episode):
"""
Get the list of videos.
:return: list
"""
videos = []
html = requests.get(episode).text
mlink = SoupStrainer('p', {'class':'vidLinksContent'})
soup = BeautifulSoup(html, parseOnlyThese=mlink)
items = soup.findAll('a')
for item in items:
try:
vid_name = item['title']
except:
vid_name = item.text
vid_url = item['href']
videos.append((vid_name, vid_url))
mlink = SoupStrainer('div', {'class':'post-content'})
soup = BeautifulSoup(html, parseOnlyThese=mlink)
#items = soup.findAll('div', {'class':'video-shortcode'})
items = soup.findAll('iframe')
for item in items:
try:
vid_name = item['title']
except:
vid_name = item['class']
vid_url = item['src']
videos.append((vid_name, vid_url))
return videos | 34f13b9ace698738c62be6ac3794c9937844ab03 | 3,632,238 |
from typing import List
import logging
def validate_documentation_files(documentation_dir: str,
files_to_validate: List[str] = None):
"""Validate documentation files in a directory."""
file_paths = list(filesystem_utils.recursive_list_dir(documentation_dir))
do_smoke_test = bool(files_to_validate)
validated = 0
for file_path in file_paths:
if files_to_validate and file_path[len(documentation_dir) +
1:] not in files_to_validate:
continue
logging.info("Validating %s.", file_path)
documentation_parser = DocumentationParser(documentation_dir)
documentation_parser.validate(file_path, do_smoke_test)
validated += 1
logging.info("Found %d matching files - all validated successfully.",
validated)
if not do_smoke_test:
logging.info(
"No models were smoke tested. To download and smoke test a specific "
"model, specify files directly in the command line, for example: "
"'python tools/validator.py vtab/models/wae-ukl/1.md'")
return validated | 80a4b4b5f3ca42234cf3d2db86669f71c245e313 | 3,632,239 |
def _add_reference_resources(data):
"""Add genome reference information to the item to process.
"""
aligner = data["config"]["algorithm"].get("aligner", None)
align_ref, sam_ref = genome.get_refs(data["genome_build"], aligner, data["dirs"]["galaxy"])
data["align_ref"] = align_ref
data["sam_ref"] = sam_ref
data["genome_resources"] = genome.get_resources(data["genome_build"], sam_ref)
return data | 63b4d7f70e6074341f84429d4d4e90cfa56d8fab | 3,632,240 |
def parse_solid_selection(pipeline_def, solid_selection):
"""Take pipeline definition and a list of solid selection queries (inlcuding names of solid
invocations. See syntax examples below) and return a set of the qualified solid names.
It currently only supports top-level solids.
Query syntax examples:
- "some_solid": select "some_solid" itself
- "*some_solid": select "some_solid" and all ancestors (upstream dependencies)
- "some_solid*": select "some_solid" and all descendants (downstream dependencies)
- "*some_solid*": select "some_solid" and all of its ancestors and descendants
- "+some_solid": select "some_solid" and its ancestors at 1 level up
- "some_solid+++": select "some_solid" and its descendants within 3 levels down
Note:
- If one of the query clauses is invalid, we will skip that one and continue to parse the valid
ones.
Args:
pipeline_def (PipelineDefinition): the pipeline to execute.
solid_selection (List[str]): a list of the solid selection queries (including single solid
names) to execute.
Returns:
FrozenSet[str]: a frozenset of qualified deduplicated solid names, empty if no qualified
subset selected.
"""
check.list_param(solid_selection, "solid_selection", of_type=str)
graph = generate_dep_graph(pipeline_def)
solids_set = set()
# loop over clauses
for clause in solid_selection:
subset = clause_to_subset(graph, clause)
if len(subset) == 0:
raise DagsterInvalidSubsetError(
"No qualified solids to execute found for solid_selection={requested}".format(
requested=solid_selection
)
)
solids_set.update(subset)
return frozenset(solids_set) | 6a69d79f0bdcf459c213262f05c06f9c0b256854 | 3,632,241 |
def _equal(v1, v2):
"""Same type as well."""
if isinstance(v2, float) and np.isinf(v2):
return True
if isinstance(v2, str):
v2 = th.string(v2)
return v1 == v2 | 0e5803da376019b93b6f3084cfb10fc0c36d873a | 3,632,242 |
def dec_prefix(value, restricted=True):
"""Get an appropriate decimal prefix for a number.
:param value: the number
:type value: int or float
:param bool restricted: if ``True`` only integer powers of 1000 are used,
i.e. *hecto, deca, deci, centi* are skipped
:return: decimal prefix
:rtype: Prefix
:raises TypeError: if value is not of type int or float
"""
check_type(value, (int, float), 'value')
if value == 0:
return NO_PREFIX
value = abs(value)
for p in DECIMAL_PREFIXES:
if restricted and p.name in ('hecto', 'deca', 'deci', 'centi'):
continue
if value / p.factor >= 1.0:
return p
return p | ecd78692a2638aa06b44292219f35488eb294667 | 3,632,243 |
from datetime import datetime
def _download_coaching(
loc_id: str,
start_date: datetime.datetime,
end_date: datetime.datetime = None,
collection: str = "CoachingActionEntries",
base = "prod",
pipeline_name = "sleep_quality"):
"""Queries the database for given location id, source id and in given datarange.
Returns the response form server as string."""
if not end_date:
end_date = datetime.datetime.utcnow()
assert start_date < end_date, "Start_date should be less than end_date."
db = _get_db(base)
collection = db[collection]
results = collection.find({
"LocationId": loc_id,
"PipelineName": pipeline_name,
"Timestamp": {"$gt": start_date.timestamp(),
"$lt": end_date.timestamp()
}
})
return list(results) | 8247446d6358a09b752c45d83d20bda59d3dc296 | 3,632,244 |
import random
def Make_Random(sents):
"""
Make random parses (from LG-parser "any"), to use as baseline
"""
any_dict = Dictionary('any') # Opens dictionary only once
po = ParseOptions(min_null_count=0, max_null_count=999)
po.linkage_limit = 100
options = 0x00000000 | BIT_STRIP #| BIT_ULL_IN
options |= BIT_CAPS
random_parses = []
for sent in sents:
num_words = len(sent)
curr_parse = []
# subtitute words with numbers, as we only care about the parse tree
fake_words = ["w{}".format(x) for x in range(1, num_words + 1)]
# restore final dot to maintain --ignore functionality
if sent[-1] == ".":
fake_words[-1] = "."
sent_string = " ".join(fake_words)
sentence = Sentence(sent_string, any_dict, po)
linkages = sentence.parse()
num_parses = len(linkages) # check nbr of linkages in sentence
if num_parses > 0:
idx = random.randint(0, num_parses - 1) # choose a random linkage index
linkage = Linkage(idx, sentence, po._obj) # get the random linkage
tokens, links = parse_postscript(linkage.postscript().replace("\n", ""), options)
for link in links:
llink = link[0]
rlink = link[1]
curr_parse.append([str(llink), tokens[llink], str(rlink), tokens[rlink]])
random_parses.append(curr_parse)
return random_parses | d32d0b17935c7c5951e817f156de6223ee7f0d1d | 3,632,245 |
def get_published_online_date(crossref_data):
"""
This function pulls the published online date out of the crossref data and returns it as an arrow date object
:param doi: the DOI of interest that you want the published online date for
:returns: arrow date object for published online date if it exists
"""
published_online_date = None
if crossref_data and crossref_data.get('status') == 'ok':
published_online = crossref_data.get('message', {}).get('published-online')
if published_online:
online_date_parts = published_online.get('date-parts', [None])[0]
if len(online_date_parts) >= 3:
online_date = arrow.get(online_date_parts[0], online_date_parts[1], online_date_parts[2])
elif len(online_date_parts) == 2:
online_date = arrow.get(online_date_parts[0], online_date_parts[1], 1)
else:
online_date = None
published_online_date = online_date
return published_online_date | 5d2f506f34ff956b344d1663b78ef1fb72dc3d04 | 3,632,246 |
def ingredients_available(menu_item):
"""Accepts a dictionary of ingedients and checks if the machine has enough to make the drink"""
for item in MENU[menu_item]['ingredients']:
if resources[item] < MENU[menu_item]['ingredients'][item]:
print(f"There is not enough {item} to make your order.")
return False
else:
for item in MENU[menu_item]['ingredients']:
resources[item] -= MENU[menu_item]['ingredients'][item]
# for debugging
print(resources)
return True | 946ad6fe8ceadc2ec7a0889ff2b3a8d48191ea7e | 3,632,247 |
from typing import OrderedDict
def pyvcf_calls_to_sample_info_list(calls):
"""
Given pyvcf.model._Call instances, return a dict mapping each sample
name to its per-sample info:
sample name -> field -> value
"""
return OrderedDict(
(call.sample, call.data._asdict()) for call in calls) | 937a748b3a0ff26a28ff4a4db5e1505dbb927ff9 | 3,632,248 |
def DesignPatch(Er, h, Freq):
"""
Returns the patch_config parameters for standard lambda/2 rectangular microstrip patch. Patch length L and width W are calculated and returned together with supplied parameters Er and h.
Returned values are in the same format as the global patchr_config variable, so can be assigned directly. The patchr_config variable is of the following form [Er,W,L,h].
Usage: patchr_config=design_patchr(Er,h,Freq)
Er.....Relative dielectric constant
h......Substrate thickness (m)
Freq...Frequency (Hz)
e.g. patchr_config=design_patchr(3.43,0.7e-3,2e9)
"""
Eo = 8.854185e-12
lambd = light_velocity / Freq
lambdag = lambd / sqrt(Er)
W = (light_velocity / (2 * Freq)) * sqrt(2 / (Er + 1))
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving actual length L = Lambda/2 - 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
lambdag = lambd / sqrt(Ereff)
L = (lambdag / 2) - 2 * dL
print('Rectangular Microstrip Patch Design')
print("Frequency (GHz): " + str(1e-9*Freq))
print("Dielec Const, Er : " + str(Er))
print("Patch Width, W: " + str(W) + "m")
print("Patch Length, L: " + str(L) + "m")
print("Patch Height, h: " + str(h) + "m")
return W, L, h, Er | 90b35a7c46f96c977ccd5ce4fed987d0c1beccd6 | 3,632,249 |
from typing import Optional
def injection_file_name(
science_case: str, num_injs_per_redshift_bin: int, task_id: Optional[int] = None
) -> str:
"""Returns the file name for the raw injection data without path.
Args:
science_case: Science case.
num_injs_per_redshift_bin: Number of injections per redshift major bin.
task_id: Task ID.
"""
file_name = f"injections_SCI-CASE_{science_case}_INJS-PER-ZBIN_{num_injs_per_redshift_bin}.npy"
if task_id is not None:
file_name = file_name.replace(".npy", f"_TASK_{task_id}.npy")
return file_name | 57b034b6a60c317f0c071c1313d0d99f2802db30 | 3,632,250 |
def focal_general_triplet_loss(embs, labels, minibatch_size, alpha=0.2):
"""
NOTE: In order for this loss to work properly, it is prefered that
labels contains several repetitions. In other word:
len(np.unique(labels))!=len(labels)
"""
classes = tf.one_hot(labels,depth=minibatch_size)
# Classes matrix, inner product of the classes
class_mat = tf.matmul(classes, tf.transpose(classes))
# Create mask for the same and different classes
# The class matrix is symetric, so only the upper/lower triangular matrix
# is important.
mask_same = tf.matrix_band_part(class_mat - tf.matrix_band_part(class_mat,0,0), 0,-1)
mask_same = tf.reshape(mask_same,[-1])
mask_diff = tf.matrix_band_part(1-class_mat, 0,-1)
mask_diff = tf.reshape(mask_diff,[-1])
# Predictions matrix, inner product of the predictions
tiled_emb = tf.expand_dims(embs,0)
dist_mat = tf.reduce_sum(tf.square(tiled_emb-tf.transpose(tiled_emb,(1,0,2))),axis=-1)
dist_mat = tf.reshape(dist_mat,[-1])
# Distances and loss
dist_same = tf.expand_dims(tf.boolean_mask(dist_mat,mask_same),0)
dist_diff = tf.expand_dims(tf.boolean_mask(dist_mat,mask_diff),1)
loss = dist_same - dist_diff + alpha
eps = 1e-16
rectified_loss = tf.clip_by_value(loss/(2+alpha+eps),eps,1-eps)
rectified_loss = 1/(1+tf.exp(-10*(rectified_loss-0.6)))
p = 1-rectified_loss
new_loss = - (tf.square(p)*tf.log(p))
return tf.reduce_mean(new_loss) | 2dc3d531051a61217d1429c1c10c44f9422cec29 | 3,632,251 |
def angle_between(v1, v2):
""" Returns the angle in degrees between vectors 'v1' and 'v2'."""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))) | 01ccbdd26398b1dbb14e08766c6fee4ec9d8006e | 3,632,252 |
def SUM(A: pd.DataFrame, n) -> pd.DataFrame:
"""Sum (Time Series)
Args:
A (pd.DataFrame): factor data with multi-index
n: days
Returns:
pd.DataFrame: sum data with multi-index
"""
At = pivot_table(A)
res = At.rolling(n, min_periods=int(n/2)).sum()
res = stack_table(res)
return res | a761d854b516be3dd3d52bcc0060d87e074c9bf9 | 3,632,253 |
def load_properties(filepath, sep='=', comment_char='#'):
"""
Read the file passed as parameter as a properties file.
"""
props = {}
with open(filepath, 'rt') as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
key_value = l.split(sep)
key = key_value[0].strip()
value = sep.join(key_value[1:]).strip().strip('"')
props[key] = value
return props | 05769171ffe2e57e1022ff40b9c3ed2b82bb31bc | 3,632,254 |
import posixpath
import os
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parts = list(urlparse.urlparse(url))
parts[2] = os.path.normpath(parts[2].replace('/', os.sep)).replace(os.sep, '/')
return urlparse.urlunparse(parts)
# return os.path.normpath(url)
parsed = urlparse.urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
cleaned = parsed._replace(path=new_path)
return cleaned.geturl() | 78048dbcb48a62ad33b8587b23b9d731ed76bfa5 | 3,632,255 |
def query_add(session, *objs):
"""Add `objs` to `session`."""
for obj in objs:
session.add(obj)
session.commit()
return objs | 95ffa9e0f5a4a9255f8b0b063c5bd092f0f66039 | 3,632,256 |
from re import T
from typing import Iterator
def scale_streams(s: Stream[T], factor: T) -> Stream[T]:
"""
scale streams
"""
def scale_generator(g: Iterator[T]) -> Iterator[T]:
"""
scale generator
"""
yield next(iter(g)) * factor
yield from scale_generator(g)
return make_stream(scale_generator(s)) | d6ddfc6c031c4e74ae284e9b92325bd86f6bcead | 3,632,257 |
def sd_title(bs4_object, target=None):
"""
:param bs4_object: An object of class BeautifulSoup
:param target: Target HTML tag. Defaults to class:title-text, a dict.
:return: Returns paper title from Science Direct
"""
if target is None:
target = {"class": "title-text"}
return bs4_object.find_all("span", target)[0].text | 8429fe680fafb86c773a0cd2b3280e893b95fc9a | 3,632,258 |
def generate_word_feat(sentence,
word_vocab_index,
word_max_size,
word_pad):
"""process words for sentence"""
sentence_words = tf.string_split([sentence], delimiter=' ').values
sentence_words = tf.concat([sentence_words[:word_max_size],
tf.constant(word_pad, shape=[word_max_size])], axis=0)
sentence_words = tf.reshape(sentence_words[:word_max_size], shape=[word_max_size])
sentence_words = tf.cast(word_vocab_index.lookup(sentence_words), dtype=tf.float32)
sentence_words = tf.expand_dims(sentence_words, axis=-1)
return sentence_words | 433dc0c79998828a95d87f8dce16f4209a017229 | 3,632,259 |
def split_formula(formula, net_names_list):
"""
Splits the formula into two parts - the structured and unstructured part.
Parameters
----------
formula : string
The formula to be split, e.g. '~ 1 + bs(x1, df=9) + dm1(x2, df=9)'.
net_names_list : list of strings
A list of all network names defined by the user.
Returns
-------
structured_part : string
A string holding only the structured part of the original formula.
unstructured_terms: list of strings
A list holding all the unstructured parts of the original formula.
"""
structured_terms = []
unstructured_terms = []
# remove spaces the tilde and split into formula terms
formula = formula.replace(' ','')
formula = formula.replace('~','')
formula_parts = formula.split('+')
# for each formula term
for part in formula_parts:
term = part.split('(')[0]
# if it an unstructured part
if term in net_names_list:
# append it to a list
unstructured_terms.append(part)
else:
structured_terms.append(part)
# join the structured terms together again
structured_term = '+'.join(structured_terms)
return structured_term, unstructured_terms | 1fce8617cbdaf767c1aebb6d0d685ca63975c820 | 3,632,260 |
from sys import intern
def joinHostmask(nick, ident, host):
"""nick, user, host => hostmask
Joins the nick, ident, host into a user hostmask."""
assert nick and ident and host
return intern('%s!%s@%s' % (nick, ident, host)) | e039f6afe37638a24f07a924bd537e6f6b6eb415 | 3,632,261 |
from math import ceil
from struct import pack
def message(
command=0,
payload_size=0,
data_type=0,
data_count=0,
parameter1=0,
parameter2=0,
payload=b"",
):
"""Assemble a Channel Access message datagram for network transmission"""
if type(command) == str:
command = commands[command]
assert data_type is not None
assert data_count is not None
assert parameter1 is not None
assert parameter2 is not None
if payload_size == 0 and len(payload) > 0:
# Pad to multiple of 8.
payload_size = int(ceil(len(payload) / 8.0) * 8)
while len(payload) < payload_size:
payload += b"\0"
# Truncate oversized payloads
max_payload_size = 2**16-8
if payload_size > max_payload_size:
warning("Truncating oversized payload of %s bytes (max allowed %d)"
% (payload_size,max_payload_size))
payload_size = max_payload_size
payload = payload[0:payload_size]
# 16-byte header consisting of four 16-bit integers
# and two 32-bit integers in big-edian byte order.
header = pack(
">HHHHII", command, payload_size, data_type, data_count, parameter1, parameter2
)
message = header + payload
return message | babbc4830a147f47819733095e0afe45b77c1b33 | 3,632,262 |
def polynomial(x, degree=1, add_bias_coefs=False):
"""used to calculate the polynomial coefficients of a given array.
Args:
x (array): the input array to be calculated.
degree (int, optional): polynominal degree. Defaults to 1.
add_bias_coefs (bool, optional): set True if you wan to add bias coefficients to the input array. This will add a column to the input array containing only ones.
Equivalent to np.ones(x.shape[0]). Defaults to False.
Returns:
Array
"""
if degree > 1:
for i in range(2, degree+1):
x = np.hstack((x, x**i))
if add_bias_coefs:
x = np.hstack((np.ones((x.shape[0], 1)), x))
return x | fdfc6e8c7e00e56636f07cc115a57f997630b5e2 | 3,632,263 |
def plot_energy_group_comparison(df: pd.DataFrame, reverse_axes: bool = False, size: float = 5.0) -> \
sns.axisgrid.FacetGrid:
"""
Plot energy level recovery for a group of conformation sets.
:param df: DataFrame where the columns are Energy, Method, and Discovery. Energy is the energy level (kcal/mol),
Method is a label for a sampler, and Discovery indicates whether or not a given Energy was found by a Method.
:param reverse_axes: Whether or not to reverse axes, i.e., put Method on y-axis instead of x-axis.
:param size: Plot marker size.
:return: Seaborn FacetGrid.
"""
if reverse_axes:
fig = sns.catplot(x='Energy (kcal/mol)', y='Method', hue='Discovery', kind='swarm', data=df, s=size)
else:
fig = sns.catplot(x='Method', y='Energy (kcal/mol)', hue='Discovery', kind='swarm', data=df, s=size)
return fig | fbde0758fcb060c419bc1f55f07dcba6b1667111 | 3,632,264 |
def get_summary_mapping(inputs_df, oed_hierarchy, is_fm_summary=False):
"""
Create a DataFrame with linking information between Ktools `OasisFiles`
And the Exposure data
:param inputs_df: datafame from gul_inputs.get_gul_input_items(..) / il_inputs.get_il_input_items(..)
:type inputs_df: pandas.DataFrame
:param is_fm_summary: Indicates whether an FM summary mapping is required
:type is_fm_summary: bool
:return: Subset of columns from gul_inputs_df / il_inputs_df
:rtype: pandas.DataFrame
"""
# Case GUL+FM (based on il_inputs_df)
if is_fm_summary:
summary_mapping = get_xref_df(inputs_df).drop_duplicates(subset=['gul_input_id', 'layer_id'], keep='first')
summary_mapping['agg_id'] = summary_mapping['gul_input_id']
summary_mapping = summary_mapping.reindex(sorted(summary_mapping.columns), axis=1)
summary_mapping['output_id'] = factorize_ndarray(
summary_mapping.loc[:, ['gul_input_id', 'layer_id']].values,
col_idxs=range(2)
)[0]
# GUL Only
else:
summary_mapping = inputs_df.copy(deep=True)
summary_mapping['layer_id']=1
summary_mapping['agg_id'] = summary_mapping['item_id']
summary_mapping.drop(
[c for c in summary_mapping.columns if c not in get_usefull_summary_cols(oed_hierarchy)],
axis=1,
inplace=True
)
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
dtypes = {
**{t: 'str' for t in [portfolio_num, policy_num, acc_num, loc_num, 'peril_id']},
**{t: 'uint8' for t in ['coverage_type_id']},
**{t: 'uint32' for t in [SOURCE_IDX['loc'], SOURCE_IDX['acc'], 'loc_id', 'item_id', 'layer_id', 'coverage_id', 'agg_id', 'output_id']},
**{t: 'float64' for t in ['tiv']}
}
summary_mapping = set_dataframe_column_dtypes(summary_mapping, dtypes)
return summary_mapping | 2000cad2e91009807d9907ae671f97e5038456c1 | 3,632,265 |
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
# TODO: Copy your code from the classroom and modify it to complete this function. DONE
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values)
# Use the Only Choice Strategy
values = only_choice(values)
# Use the naked twins Strategy
values = naked_twins(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
# end if
# end while
return values
# raise NotImplementedError | 389d720231e8395b59adee667e2a23f699e55411 | 3,632,266 |
def run_tests():
"""Run test suite.
"""
with virtualenv("benlew.is"):
with cd('~/repos/me'):
return run("nosetests") | ab56bb53e43f7f204782191130436f944d2dcdcf | 3,632,267 |
from typing import Dict
import requests
def ls( # pylint: disable=invalid-name
url: str, resource_type: str, headers: Dict[str, str]
) -> requests.Response:
"""
Get a list of all of the resources of a certain type.
"""
resource_url = generate_resource_url(url, resource_type)
return requests.get(resource_url, headers=headers) | f028a26eb4dc14f70811f175f159b45579869869 | 3,632,268 |
def uniform(iterable):
"""
Returns a random variable that takes each value in `iterable` with equal
probability.
"""
iterable = tuple(iterable)
return RandomVariable({val: 1 for val in iterable}) | 80046c04ba91a4a09287241c9821a192b2f6dfe2 | 3,632,269 |
def remove_id3v2_footer( data ):
"""Remove ID3v2 footer tag if present"""
pos = len( data ) - 10
while pos > 0:
if data[pos:pos+3] == b'ID3' and data[pos+4] == 0:
if data[pos+3] == 2 or data[pos+3] == 3:
return data[:pos] + data[pos+decode_synchsafe_int( data[6:10] )+10:]
elif data[pos+3] == 4:
if data[pos+5] & 0x10:
return data[:pos] + data[pos+decode_synchsafe_int( data[6:10] )+20:]
else:
return data[:pos] + data[pos+decode_synchsafe_int( data[6:10] )+10:]
pos -= 1
return data | ab4014d7b14ac2027ed994cfbaefe7b4e46c2a5b | 3,632,270 |
def get_ilorest_client(oneview_client, server_hardware):
"""Generate an instance of the iLORest library client.
:param oneview_client: an instance of a python-hpOneView
:param: server_hardware: a server hardware uuid or uri
:returns: an instance of the iLORest client
:raises: InvalidParameterValue if mandatory information is missing on the
node or on invalid input.
"""
remote_console = oneview_client.server_hardware.get_remote_console_url(
server_hardware
)
host_ip, ilo_token = _get_ilo_access(remote_console)
base_url = "https://%s:%s" % (host_ip, ILOREST_BASE_PORT)
return redfish.rest_client(base_url=base_url, sessionkey=ilo_token) | c1de72a30d3814f7b869d905d9cf2c2584a425b3 | 3,632,271 |
def get_metric(metric):
"""获取使用的评估函数实例.
Arguments:
metric: str or classicML.metrics.Metric 实例,
评估函数.
Raises:
AttributeError: 模型编译的参数输入错误.
"""
if isinstance(metric, str):
if metric == 'binary_accuracy':
return metrics.BinaryAccuracy()
elif metric == 'categorical_accuracy':
return metrics.CategoricalAccuracy()
elif metric == 'accuracy':
return metrics.Accuracy()
elif isinstance(metric, metrics.Metric):
return metric
else:
CLASSICML_LOGGER.error('评估函数调用错误')
raise AttributeError | 34eed6adabe622a975e14936b0b85f86b30cd28f | 3,632,272 |
import numpy
def compute_ld(chromosome, position, genotype_name, N=20):
"""
Returns ordered list of the N neighboring SNPs positions in high LD
---
parameters:
- name: snp_pk
description: pk of the SNP of interest
required: true
type: string
paramType: path
- name: N
description: number of top LD snps to return (default = 20, max 500)
required: true
type: bool
paramType: path
serializer: SNPListSerializer
omit_serializer: false
"""
# Load hdf5 genotype file:
try:
genotype_file = h5py.File(genotype_name + ".hdf5", 'r')
except:
raise FileNotFoundError("Impossible to find the appropriate genotype ({})".format(genotype_name))
# Get SNP position in file
h5gen = genotype_file['Genotype']
n_snps = len(h5gen['chr_index'])
# Find chromosome sub-portion:
started = False
completed = False
chr_string = "Chr{}".format(chromosome)
for idx, c in enumerate(h5gen['chr_index']):
if c == numpy.bytes_(chr_string):
if not started:
started = True
start_idx = idx
continue
if started:
end_idx = idx
completed = True
break
if not completed:
raise ValueError("No values matching chromosome {} in genotype {}".format(chromosome, genotype_name))
found = False
for idx, c in enumerate(h5gen['position_index'][start_idx:end_idx]):
if c == position:
snp_idx = idx
found = True
break
if not found:
raise ValueError("No values matching the position {} in chromosome {} on genotype {}".format(position,chromosome,genotype_name))
idx_window = [max(snp_idx - 250, start_idx), min(snp_idx + 251, end_idx)]
# Retrieve genotype data for SNPs in window !!! FOR NOW ALL SAMPLES ARE CONSIDERED!!! IF WE WANT TO ADD ONLY SPECIFIC SAMPLES, WE NEED TO STORE THE SAMPLE LIST (IDS) ASSOCIATED WITH A STUDY SOMEWHERE...
if h5gen['raw'][:, snp_idx][0].decode('UTF-8').isalpha():
transform = True
else:
transform = False
genotype_data_dict = dict()
freq_dict = dict()
snp_positions = []
# Genotype is stored in its encoded form (0,1, no 2 because all samples are homozygous) in a dictionary
for idx in range(idx_window[0], idx_window[1]):
snp_positions.append(h5gen['position_index'][idx])
if transform:
gen_str = ""
acgt = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
for a in h5gen['raw'][:, idx]:
acgt[a.decode('UTF-8').upper()] += 1
gen_str += a.decode('UTF-8').upper()
# Find major and minor alleles
sorted_acgt = sorted(acgt.items(), key=lambda x: x[1])
if sorted_acgt[1][1] != 0:
raise Warning("Three or more alleles")
maj_a = sorted_acgt[3][0]
min_a = sorted_acgt[2][0]
# Save the minor allele frequencies
freq_dict[h5gen['position_index'][idx]] = sorted_acgt[2][1] / len(h5gen['raw'][:, idx])
genotype_encoded = numpy.zeros(len(h5gen['raw'][:, idx]))
for string_idx, a in enumerate(gen_str):
if a == min_a:
genotype_encoded[string_idx] = 1
else:
genotype_encoded = []
for a in h5gen['raw'][:, idx]:
genotype_encoded.append(int(a.decode('UTF-8')))
genotype_data_dict[h5gen['position_index'][idx]] = genotype_encoded
# Compute correlation matrix
n_typed_snps = idx_window[1] - idx_window[0]
ld_vector = []
# Need to add some filtering for low freq
# Estimate sigma_tt
main_snp_pos = h5gen['position_index'][snp_idx]
pi = freq_dict[main_snp_pos]
for position_index in snp_positions:
pj = freq_dict[position_index]
pij = 0.
for l in range(len(genotype_data_dict[main_snp_pos])):
if genotype_data_dict[position_index] == 1 and genotype_data_dict[main_snp_pos] == 1:
pij += 1
pij = pij / len(genotype_data_dict[main_snp_pos])
r = (pij - pi * pj) / numpy.sqrt(pi * (1.0 - pi) * pj * (1.0 - pj))
ld_vector.append(r)
# Sort highest values
sorted_lists = reversed(sorted(zip(ld_vector, snp_positions)))
ordered_ld = []
ordered_positions = []
for i in sorted_lists:
ordered_ld.append(i[0])
ordered_positions.append(i[1])
ordered_ld = ordered_ld[:N]
ordered_positions = ordered_positions[:N]
# Return ordered lists
return ordered_positions, ordered_ld | 1b4c0449941876cbc8481d894d03a7178624ceee | 3,632,273 |
def load_user(userid):
"""
Flask-Login user_loader callback.
The user_loader function asks this function to get a User Object or return
None based on the userid.
The userid was stored in the session environment by Flask-Login.
user_loader stores the returned User object in current_user during every
flask request.
"""
return User.get(userid) | 0059c0dd65790dee0bca52acd6f9657de8113e96 | 3,632,274 |
def flow_read(input_file, format=None):
"""
Reads optical flow from file
Parameters
----------
output_file: {str, pathlib.Path, file}
Path of the file to read or file object.
format: str, optional
Specify in what format the flow is raed, accepted formats: "png" or "flo"
If None, it is guess on the file extension
Returns
-------
flow: numpy.ndarray
3D flow in the HWF (Height, Width, Flow) layout.
flow[..., 0] is the x-displacement
flow[..., 1] is the y-displacement
Notes
-----
The flo format is dedicated to optical flow and was first used in Middlebury optical flow database.
The original defition can be found here: http://vision.middlebury.edu/flow/code/flow-code/flowIO.cpp
The png format uses 16-bit RGB png to store optical flows.
It was developped along with the KITTI Vision Benchmark Suite.
More information can be found here: http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow
The both handle flow with invalid ``invalid'' values, to deal with occlusion for example.
We convert such invalid values to NaN.
See Also
--------
flow_write
"""
input_format = guess_extension(input_file, override=format)
with FileManager(input_file, "rb") as f:
if input_format == "png":
output = flow_read_png(f)
else:
output = flow_read_flo(f)
return output | 590cc6bf5a041a569a3f2b202435940ce3936d57 | 3,632,275 |
from itertools import combinations
from typing import List
from functools import reduce
from operator import mul
def max_triple_product_bare_bones(nums: List[int]) -> int:
"""
A bare-bones O(n3) method to determine the largest product of three numbers in a list
:param nums: the list of numbers
:return: the highest prodict
"""
return max([reduce(mul, lst, 1) for lst in combinations(nums, 3)]) | 8053bc6e35120f6ee8eca2b24e81cbaa7713dfb3 | 3,632,276 |
def is_number(s: str):
"""
Args:
s: (str) string to test if it can be converted into float
Returns:
True or False
"""
try:
# Try the conversion, if it is not possible, error will be raised
float(s)
return True
except ValueError:
return False | 08b0572e66fafdcd239e9f419fa41b31620de2c5 | 3,632,277 |
def getProxyVirtualHostConfig( nodename, proxyname,):
"""Gets or creates a ProxyVirtualHostConfig object."""
m = "getProxyVirtualHostConfig:"
sop(m,"Entry. nodename=%s proxyname=%s" % ( nodename, proxyname, ))
proxy_id = AdminConfig.getid( '/Node:%s/Server:%s' % ( nodename, proxyname ) )
sop(m,"proxy_id=%s" % ( proxy_id, ))
pvhc_id = AdminConfig.getid('/ProxyVirtualHostConfig:/')
sop(m,"pvhc_id=%s" % ( pvhc_id, ))
if emptyString(pvhc_id):
#sopAdminConfigCreate( 'ProxyVirtualHostConfig', proxy_id, [] )
pvhc_id = AdminConfig.create( 'ProxyVirtualHostConfig', proxy_id, [] )
sop(m,"Created new ProxyVirtualHostConfig object. pvhc_id=%s" % ( pvhc_id, ))
else:
sop(m,"Referenced existing ProxyVirtualHostConfig object. pvhc_id=%s" % ( pvhc_id, ))
sop(m,"Exit. Returning pvhc_id=%s" % ( pvhc_id ))
return pvhc_id | e115985ebbcb6db88814dc49783e3d1b54ef7a43 | 3,632,278 |
def relu6(name=None, collect=False):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
name: operation name.
collect: whether to collect this metric under the metric collection.
"""
return built_activation(tf.nn.relu6, name, collect) | b649a5fd646815053f956bc8d1d838330935666a | 3,632,279 |
def announcements(soup):
"""
** Announcements Tab**
"""
try:
_div = soup.find('div', {'class':'ex1'})
z= _div.find_all('a')
return True,collection(z)
except Exception as e:
return False,[str(e)] | 2c4013e954903f7e275a32e37f70293a086a1201 | 3,632,280 |
import requests
def analyze_comments_page(username, repo, per_page, page, print_comments, print_stage_results):
"""
Analyzes one page of GitHub comments. Helping function.
Parameters
----------
username : str
The GitHub alias of the repository owner
repo : str
The GitHub repository name
per_page : int
The number of comments on the page (from 0 to 100)
page : int
The page number of the results to fetch
print_comments : bool
If True, each fetched comment and its analysis will be printed
print_stage_results : bool
If True, final statistics of the analyzed comments will be printend in the end
Returns
-------
total : int
The number of comments fetched (if number of comments on the page is less than per_page parameter all the available comments will be processed and their number will be returned. Else, equal to per_page)
pos : int
The number of positive comments fetched
neg : int
The number of negative comments fetched
neut : int
The number of neutral comments fetched
"""
total = 0
pos = 0
neg = 0
neut = 0
print("Processing page #"+str(page)+"...\n")
query = {'per_page': per_page, 'page': page}
resp = requests.get("https://api.github.com/repos/" +
username+"/"+repo+"/issues/comments", params=query)
comments = resp.json()
for comment in comments:
total = total+1
if print_comments:
print(str(total) + '. ' + comment.get("body"))
query = {'text': comment.get("body")}
response = requests.post(
"http://text-processing.com/api/sentiment/", data=query)
if print_comments:
print(response.json())
print('\n')
sentiment = response.json().get("label")
if sentiment == 'pos':
pos = pos+1
elif sentiment == 'neg':
neg = neg+1
else:
neut = neut+1
if print_stage_results:
print('Processed: '+str(total))
print('Positive comments: '+str(pos))
print('Negative comments: '+str(neg))
print('Neutral comments: '+str(neut))
return total, pos, neg, neut | e3d153a0319db0bc723df65cb8a92533f9b37b82 | 3,632,281 |
def get_remotes(y, x):
"""
For a given pair of ``y`` (tech) and ``x`` (location), return
``(y_remote, x_remote)``, a tuple giving the corresponding indices
of the remote location a transmission technology is connected to.
Example: for ``(y, x) = ('hvdc:region_2', 'region_1')``,
returns ``('hvdc:region_1', 'region_2')``
"""
y_remote = y.split(':')[0] + ':' + x
x_remote = y.split(':')[1]
return (y_remote, x_remote) | 3c479d818947362349982c77a9bbd87a97a3d4d5 | 3,632,282 |
from typing import List
def ingrid(x: float, y: float, subgrid: List[int]) -> bool:
"""Check if position (x, y) is in a subgrid"""
i0, i1, j0, j1 = subgrid
return (i0 <= x) & (x <= i1 - 1) & (j0 <= y) & (y <= j1 - 1) | d296d8a7abe5eeb3da8d57691755a2bd19dd15b6 | 3,632,283 |
from typing import Union
from pathlib import Path
from typing import Any
import json
def load_jsonl(path: Union[Path, str]) -> list[dict[str, Any]]:
""" Load from jsonl.
Args:
path: path to the jsonl file
"""
path = Path(path)
return [json.loads(line) for line in path.read_text().splitlines()] | a59d2920bfa491b1d4daa693b5e2e1b4846d6fc6 | 3,632,284 |
def getComUser(userId):
"""ユーザー情報を取得を処理するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param userId: ユーザーデータID
"""
try:
result = __selectUser(userId)
return result
except OperationalError:
abort(500) | 90c656e9ac4646b651c40e7147aead20ebe2fe61 | 3,632,285 |
def to_rgb_array(image):
"""Convert a CARLA raw image to a RGB numpy array."""
array = to_bgra_array(image)
# Convert BGRA to RGB.
#print(array.shape)
array = array[:, :, :3]
array = array[:, :, ::-1]
return array | 2feeef439b25692ecc137d3bc4b1d1385b95ee7c | 3,632,286 |
import subprocess
import re
def git_version():
""" Get the full and python standardized version from Git tags (if possible) """
try:
# Full version includes the Git commit hash
full_version = subprocess.check_output('git describe --dirty', shell=True).decode("utf-8").strip(" \n")
# Python standardized version in form major.minor.patch.post<build>
version_regex = re.compile(r"v?(\d+\.\d+(\.\d+)?(-\d+)?).*")
match = version_regex.match(full_version)
if match:
std_version = match.group(1).replace("-", ".post")
else:
raise RuntimeError("Failed to parse version string %s" % full_version)
return full_version, std_version
except:
# Any failure, return None. We may not be in a Git repo at all
return None, None | 12f90dc2dc6cd620acff215aa2b0ad20079a2484 | 3,632,287 |
import typing
def extract_features(data: typing.Union[list, np.ndarray],
attributes: list = None,
nvd_attributes: list = None,
nltk_feed_attributes: list = None,
share_hooks=True,
**kwargs):
"""Extract data by fitting the extraction pipeline.
:param data: input data to the pipeline
:param attributes: list, attributes for NLTKPreprocessor
List of attributes which will be extracted from NVD and passed to NLTK
preprocessor.
:param nvd_attributes: list, attributes to output by NVDPreprocessor
The attributes are outputed by NVDPreprocessor and passed
to FeatureExtractor.
By default same as `attributes`.
:param nltk_feed_attributes: list, attributes for NLTKPreprocessor
List of attributes which will be fed to NLTKPreprocessor.
By default same as `attributes`.
:param share_hooks: bool, whether to reuse hooks
:param kwargs: optional, key word arguments
:feature_hooks: list of feature hooks to be used for feature extraction
:returns: ndarray, featureset
"""
if not any([attributes, nvd_attributes, nltk_feed_attributes]):
raise ValueError("No attributes were provided.")
feature_hooks = kwargs.get('feature_hooks', None)
extraction_pipeline = get_extraction_pipeline(
feature_hooks=feature_hooks,
share_hooks=share_hooks
)
featureset = extraction_pipeline.fit_transform(
data,
# it is important not to filter the data by the handler here
nvd_feed_preprocessor__attributes=nvd_attributes or attributes,
nvd_feed_preprocessor__use_filter=False,
nltk_preprocessor__feed_attributes=nltk_feed_attributes or attributes,
nltk_preprocessor__output_attributes=nvd_attributes
)
return featureset | 3fd8f3b582dad0375b343b7ba6d1f4de225dc4eb | 3,632,288 |
import os
import json
def load_json(path, api_version=None):
"""
Loads JSON to the given path, ignoring any C-style comments.
"""
path = os.path.abspath(path)
with open(path, 'r') as fd:
content = fd.read()
content = minify_json.json_minify(content)
d = json.loads(content)
if api_version is not None:
if 'version' in d:
if d['version'] < api_version:
raise RuntimeError(
"{0} is stored in an old file format. Run "
"`asv update` to update it.".format(path))
elif d['version'] > api_version:
raise RuntimeError(
"{0} is stored in a format that is newer than "
"what this version of asv understands. Update "
"asv to use this file.".format(path))
del d['version']
else:
raise RuntimeError(
"No version specified in {0}.".format(path))
return d | 0fb433454dd012229b3db748ecd2ba0ffee2dadd | 3,632,289 |
def wrap(get_io_helper_func):
"""A decorator that takes one argument. The argument should be an instance
of the helper class returned by new_helper(). This decorator wraps a method
so that is may perform asynchronous IO using the helper instance. The
method being wrapped should take a keyword argument 'io_helper' which will
be set to the helper instance passed in."""
def decorator_factory(func):
def func_wrapper(*args, **kwargs):
if not "io_helper" in kwargs:
kwargs["io_helper"] = get_io_helper_func()
helper = kwargs["io_helper"]
helper._generator = func(*args, **kwargs)
helper._next()
func_wrapper.__doc__ = func.__doc__
func_wrapper.__name__ = func.__name__
return func_wrapper
return decorator_factory | f2cdd8009d1722a81d848ab05c3cb6f3acaf5e50 | 3,632,290 |
def loads(fn, sdata=None):
"""
Load compressed pickle
"""
print " loading", fn
fhd = gzip.open(fn, 'rb')
print " loading", fn, 'opened'
data = cPickle.load( fhd )
print " loading", fn, 'loaded'
fhd.close()
#print " loading", fn, 'closed', len(data), data.keys()
if sdata is not None:
print " saving", fn,"in shared memory"
sdata[fn] = data
print " saved", fn,"in shared memory"
else:
print " returning", fn
return data
print " returning", fn
return 0 | 8d500eebe23a39ceaec90c113175b811f8b6a6c4 | 3,632,291 |
from typing import Set
import os
import shutil
def copy_to_build_dir(variables: dict) -> Set[int]:
"""
Copy files from the source directory and paste
them in the destination directory.
Args:
`variables` dict keys used:
workspace_path: The path of the workspace we are working within (absolute).
build_path: The path of the build directory (Relative to the workspace or absolute).
code_path: The path to the code directory.
(Relative to the build directory or absolute).
build_name: (str)
Unique name for the build.
Returns:
Tuple:
[0]: A set of error codes if any occured. If no errors occured it will only return {0}.
[1]: The UUID assigned to the build as a string.
"""
try:
LOGGER.info("UUID (%s) assigned to packager build for code %s",
variables['build_name'], variables['code_path'])
full_build_path = join(
variables['workspace_path'],
variables['build_path'],
variables['build_name']
)
# First remove the build directory if it exists
if os.path.exists(full_build_path):
shutil.rmtree(full_build_path)
# Then copy the files over to the build directory.
shutil.copytree(
join(variables['workspace_path'], variables['code_path']),
full_build_path,
ignore=shutil.ignore_patterns('*.zip')
)
return {0}
except Exception: # pylint: disable=broad-except
LOGGER.exception("Copying files to build directory failed:")
return {1} | c720d566e464e70fa358194939dc99543c45d9e9 | 3,632,292 |
def internal_superset_url():
"""The URL under which the Superset instance can be reached by from mara (usually circumventing SSOs etc.)"""
return 'http://localhost:8088' | 8a66c1d2c0587e9e6a563d08506606d389c2e6be | 3,632,293 |
import tempfile
def temp():
"""
Create a temporary file
Returns
-------
str
Path of temporary file
"""
handle, name = tempfile.mkstemp()
return name | 5955f3ceabd30ba5bb487677d9382253e1fde50a | 3,632,294 |
def ignoreNamePath(path):
"""
For shutil.copytree func
:param path:
:return:
"""
path += ['.idea', '.git', '.pyc']
def ignoref(directory, contents):
ig = [f for f in contents if
(any([f.endswith(elem) for elem in path]))]
return ig
return ignoref | 9d51d53c8dae8fb2322c3f90ea0f451731395816 | 3,632,295 |
def cos_np(data1,data2):
"""numpy implementation of cosine similarity for matrix"""
print("warning: the second matrix will be transposed, so try to put the simpler matrix as the second argument in order to save time.")
dotted = np.dot(data1,np.transpose(data2))
norm1 = np.linalg.norm(data1,axis=1)
norm2 = np.linalg.norm(data2,axis=1)
matrix_vector_norms = np.multiply(norm1, norm2)
neighbors = np.divide(dotted, matrix_vector_norms)
return neighbors | 69680cbf1cef58e96ddcd16109dd9408054732e1 | 3,632,296 |
import logging
def log_scale_dataset(df, add_small_value=1, set_NaNs_to=-10):
"""
Takes the log10 of a DF + a small value (to prevent -infs),
and replaces NaN values with a predetermined value.
Adds the new columns to the dataset, and renames the original ones.
"""
number_columns = get_number_columns(df)
columns = [x for x in number_columns if "perc" not in x.lower()]
logging.info("Applying log10() to the columns {}".format(columns))
for c in columns:
if c == 'runtime' or c == 'nprocs':
df["LOG10_" + c] = np.log10(df[c] + add_small_value).fillna(value=set_NaNs_to)
df.rename(columns={c: "RAW_" + c}, inplace=True)
else:
df[c.replace("POSIX", "POSIX_LOG10")] = np.log10(df[c] + add_small_value).fillna(value=set_NaNs_to)
df.rename(columns={c: c.replace("POSIX", "POSIX_RAW")}, inplace=True)
return df | 5dfcefc6e7a3dc5b5d8c7a5a05c8e42fbbf8e2cc | 3,632,297 |
def isoparse(s: str) -> tzdatetime:
"""
Parses timestamps formatted like 2020-05-01T10:32:02.925961Z
"""
# TODO could use dateutil? but it's quite slow as far as I remember..
# TODO support non-utc.. somehow?
assert s.endswith('Z'), s
s = s[:-1] + '+00:00'
return fromisoformat(s) | 1ffeb44c736d0fd43e1e877181229c768669458a | 3,632,298 |
def ul_model_evaluation(classifier, train_set, test_set, attack_set, beta=20):
"""
Evaluates performance of supervised and unsupervised learning algorithms
"""
y_pred_test = classifier.predict(test_set).astype(float)
y_pred_outliers = classifier.predict(attack_set).astype(float)
n_accurate_test = y_pred_test[y_pred_test == 1].size
n_accurate_outliers = y_pred_outliers[y_pred_outliers != 1].size
f_beta = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]),
-1 * np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]),
beta=beta,
pos_label=1)
tnr = n_accurate_outliers / attack_set.shape[0]
tpr_test = n_accurate_test / test_set.shape[0]
return f_beta, tnr, tpr_test | d83a23fb0a5f7257fa434bb24d49a324e62659ac | 3,632,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.