content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import time
def acquire_lease(lease_id, client_id, ttl=DEFAULT_LOCK_DURATION_SECONDS, timeout=DEFAULT_ACQUIRE_TIMEOUT_SECONDS):
"""
Try to acquire the lease. If fails, return None, else return lease object. The timeout is crudely implemented with backoffs and retries so the timeout is not precise
:param client_id: the id to use to set the lease to the caller's identity
:param lease_id: the lease name to aqcuire
:param timeout: int (seconds) to keep retrying and waiting before giving up.
:return: expiration time of the acquired lease or None if not acquired
"""
logger.debug('Attempting to acquire lease {} for {}'.format(lease_id, client_id))
t = 0
while t < timeout:
try:
with session_scope() as db:
ent = db.query(Lease).with_for_update(nowait=False, of=Lease).get((lease_id))
if not ent:
raise KeyError(lease_id)
if ent.do_acquire(client_id, duration_sec=ttl):
return ent.to_json()
except Exception as e:
if not is_lock_acquisition_error(e):
logger.exception('Unexpected exception during lease acquire. Will retry')
logger.debug('Retrying acquire of lease {} for {}'.format(lease_id, client_id))
t += 1
time.sleep(1)
else:
# Exceeded retry count, so failed to get lease
logger.info('Failed to get lease {} for {} after {} retries'.format(lease_id, client_id, t))
return None
|
77de69b03dab5d529872c66bc2a5648d656c829c
| 3,642,000
|
def create_index(conn, column_list, table='perfdata', unique=False):
"""Creates one index on a list of/one database column/s.
"""
table = base2.filter_str(table)
index_name = u'idx_{}'.format(base2.md5sum(table + column_list))
c = conn.cursor()
if unique:
sql = u'CREATE UNIQUE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
else:
sql = u'CREATE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
try:
c.execute(sql)
except Exception as e:
return(False, u'Query failed: {}, Error: {}'.format(sql, e))
return (True, True)
|
38b71cededb8500452cd3eac53609cc4ee384566
| 3,642,001
|
from typing import Type
from typing import Any
import inspect
def produce_hash(self: Type[PCell], extra: Any = None) -> str:
"""Produces a hash of a PCell instance based on:
1. the source code of the class and its bases.
2. the non-default parameter with which the pcell method is called
3. the name of the pcell
"""
# copy source code of class and all its ancestors
source_code = "".join(
[inspect.getsource(klass) for klass in self.__class__.__mro__ if issubclass(klass, PCell)]
)
diff_params = dict(self.params)
# str(diff_params) calls __repr__ in inner values, instead of __str__ ()
# therefore it would fail for instances without readable __repr__ methods
str_diff_params = "{%s}" % ", ".join("%r: %s" % p for p in diff_params.items())
long_hash_pcell = sha256(
(source_code + str_diff_params + self.name + str(extra)).encode()
).hexdigest()
short_hash_pcell = long_hash_pcell[0:7]
return short_hash_pcell
|
859f51600a9cc4da8ba546afebf0ecdf20959ec8
| 3,642,002
|
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
|
139a3da3ac8e6c09a376d976d0e96a8d91f9a74a
| 3,642,003
|
def combine_per_choice(*args):
"""
Combines two or more per-choice analytics results into one.
"""
args = list(args)
result = args.pop()
new_weight = None
new_averages = None
while args:
other = args.pop()
for key in other:
if key not in result:
result[key] = other[key]
else:
old_weight, old_averages = result[key]
other_weight, other_averages = other[key]
if (
new_averages
and set(old_averages.keys()) != set(new_averages.keys())
):
raise ValueError(
"Can't combine per-choice results which used different sets of "
"player models."
)
new_weight = old_weight + other_weight
new_averages = {}
for pmn in old_averages:
new_averages[pmn] = (
old_averages[pmn] * old_weight
+ other_averages[pmn] * other_weight
) / new_weight
result[key] = (new_weight, new_averages)
return result
|
63e482a60b521744c94d80b0b8a740ff74f4b197
| 3,642,004
|
import string
def prepare_input(dirty: str) -> str:
"""
Prepare the plaintext by up-casing it
and separating repeated letters with X's
"""
dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters])
clean = ""
if len(dirty) < 2:
return dirty
for i in range(len(dirty) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(clean) & 1:
clean += "X"
return clean
|
5c55ba770e024b459d483fd168978437b8d48c21
| 3,642,005
|
def calc_center_from_box(box_array):
"""calculate center point of boxes
Args:
box_array (array): N*4 [left_top_x, left_top_y, right_bottom_x, right_bottom_y]
Returns:
array N*2: center points array [x, y]
"""
center_array=[]
for box in box_array:
center_array.append([(box[0]+box[2])//2, (box[1]+box[3])//2])
return np.array(center_array)
|
1f713a2f6900678ad1760ea60456bbf44b9f06af
| 3,642,006
|
import scipy
def getW3D(coords):
"""
#################################################################
The calculation of 3-D Wiener index based
gemetrical distance matrix optimized
by MOPAC(Not including Hs)
-->W3D
#################################################################
"""
temp = []
for i in coords:
if i[3] != 'H':
temp.append([float(i[0]), float(i[1]), float(i[2])])
DistanceMatrix = GetGementricalDistanceMatrix(temp)
return scipy.sum(DistanceMatrix) / 2.0
|
25fb1596b0d818d7f7f120289a79ccbf9b4f1ae4
| 3,642,007
|
from typing import Any
from typing import Sequence
def state_vectors(
draw: Any,
max_num_qudits: int = 3,
allowed_bases: Sequence[int] = (2, 3),
min_num_qudits: int = 1,
) -> StateVector:
"""Hypothesis strategy for generating `StateVector`'s."""
num_qudits, radixes = draw(
num_qudits_and_radixes(
max_num_qudits, allowed_bases, min_num_qudits,
),
)
return StateVector.random(num_qudits, radixes)
|
b81059843f94cb1c36581ac0a7dd5d3c17f39839
| 3,642,008
|
def ProcessChainsAndLigandsOptionsInfo(ChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue, LigandsOptionName = None, LigandsOptionValue = None):
"""Process specified chain and ligand IDs using command line options.
Arguments:
ChainsAndLigandsInfo (dict): A dictionary containing information
existing chains and ligands.
ChainsOptionName (str): Name of command line chains option.
ChainsOptionValue (str): Value for command line chains option.
LigandsOptionName (str): Name of command line ligands option.
LigandsOptionValue (str): Value for command line ligands option.
Returns:
dict: A dictionary containing list of chain identifiers and dictionaries
of chains containing lists of ligand names for each chain.
Examples:
ChainsAndLigandsInfo = ProcessChainsAndLigandsOptionsInfo(Infile,
MolName)
for ChainID in ChainsAndLigandsInfo["ChainIDs"]:
for LigandID in ChainsAndLigandsInfo["LigandIDs"][ChainID]:
MiscUtil.PrintInfo("ChainID: %s; LigandID: %s" % (ChainID,
LigandID))
"""
SpecifiedChainsAndLigandsInfo = {}
SpecifiedChainsAndLigandsInfo["ChainIDs"] = []
SpecifiedChainsAndLigandsInfo["LigandIDs"] = {}
if ChainsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessChainIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue)
if LigandsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessLigandIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, LigandsOptionName, LigandsOptionValue)
return SpecifiedChainsAndLigandsInfo
|
0bdadbc08512957269a179df24c1202a56870d42
| 3,642,009
|
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
|
41fa7aaf7a57e0f6c363eb1efa7413b2a1a34d47
| 3,642,010
|
from typing import Optional
from typing import Iterable
import re
def get_languages(translation_dir: str, default_language: Optional[str] = None) -> Iterable[str]:
"""
Get a list of available languages.
The default language is (generic) English and will always be included. All other languages will be read from
the folder `translation_dir`. A folder within that directory is considered to contain a language for the
application if its name is either two lowercase letters, or two lowercase letters, a dash, and two uppercase
letters.
:Example:
The following list contains *valid* language codes:
* ``de``
* ``de-DE``
* ``de-AT``
* ``de-CH``
:Example:
The following list contains *invalid* language codes:
* ``EN``
* ``EN-us``
* ``EN-US``
* ``en-us``
* ``en-USA``
:param default_language: The default language as used in the GetText functions within the code. If not given,
the default language from :meth:`get_default_language` will be used.
:param translation_dir: The directory within which the translation folders can be found.
:return: A list of language codes supported by the application.
"""
if not default_language:
default_language = get_default_language()
# Get a list of all entries in the translations folder and filter it. If the given folder could not be read, do not
# include any additional languages.
pattern = re.compile('^([a-z]{2})(-[A-Z]{2})?$')
try:
languages = [language for language in listdir(translation_dir) if pattern.match(language)]
except OSError:
languages = []
return [default_language] + languages
|
a25c9c2672f4f8d96cffc363aa922424b031aea7
| 3,642,011
|
def calculate_direction(a, b):
"""Calculates the direction vector between two points.
Args:
a (list): the position vector of point a.
b (list): the position vector of point b.
Returns:
array: The (unnormalised) direction vector between points a and b. The smallest magnitude of an element is 1 (eg: [1,1,2]).
"""
difference = np.subtract(a, b)
if np.count_nonzero(difference) < 1:
print("The two k-points are equal")
return np.array([0, 0, 0])
# we need to find the smallest non-zero value within a-b
a = np.array(a)
b = np.array(b)
direction_masked = ma.masked_equal(
a - b, 0) # return array with invalid entries where values are equal
direction_filled = ma.filled(
direction_masked, 10
**6) # fill invalid elements of array with a large number s
direction_absolute = np.absolute(
direction_filled) # return absolute values of each element
smallest = np.amin(direction_absolute)
direction = (
b - a) / smallest # use the minimum absolute value as a divisor a-b
if -1 in direction:
direction = np.multiply(direction, -1)
return direction
|
9e0297560bb48d57cd4e1d5f12788a62ae7d9b3b
| 3,642,012
|
def argmin(a, axis=None, out=None, keepdims=None, combine_size=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input tensor.
axis : int, optional
By default, the index is into the flattened tensor, otherwise
along the specified axis.
out : Tensor, optional
If provided, the result will be inserted into this tensor. It should
be of the appropriate shape and dtype.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input tensor.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`Tensor`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
index_array : Tensor of ints
Tensor of indices into the tensor. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
Tensor.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> import mars.tensor as mt
>>> from mars.session import new_session
>>> sess = new_session().as_default()
>>> a = mt.arange(6).reshape(2,3)
>>> a.execute()
array([[0, 1, 2],
[3, 4, 5]])
>>> mt.argmin(a).execute()
0
>>> mt.argmin(a, axis=0).execute()
array([0, 0, 0])
>>> mt.argmin(a, axis=1).execute()
array([0, 0])
Indices of the minimum elements of a N-dimensional tensor:
>>> ind = mt.unravel_index(mt.argmin(a, axis=None), a.shape)
>>> sess.run(ind)
(0, 0)
>>> a[ind] # TODO(jisheng): accomplish when fancy index on tensor is supported
>>> b = mt.arange(6)
>>> b[4] = 0
>>> b.execute()
array([0, 1, 2, 3, 0, 5])
>>> mt.argmin(b).execute() # Only the first occurrence is returned.
0
"""
op = TensorArgmin(axis=axis, dtype=np.dtype(int), keepdims=keepdims, combine_size=combine_size)
return op(a, out=out)
|
d289cf508720c5d93ef1622d3e960e9063ab5131
| 3,642,013
|
import json
def parse_game_state(gs_json: dict) -> game_state_pb2.GameState:
"""Deserialize a JSON-formatted game state to protobuf."""
if 'provider' not in gs_json:
raise InvalidGameStateException(gs_json)
try:
map_ = parse_map(gs_json.get('map'))
provider = parse_provider(gs_json['provider'])
round_ = parse_round(gs_json.get('round'))
player = parse_player(gs_json['player'])
allplayers = [
parse_allplayers_entry(steam_id, allplayers_entry)
for steam_id, allplayers_entry in gs_json.get('allplayers', {}).items()
]
previously = parse_previously(gs_json['previously']) \
if 'previously' in gs_json else None
added = parse_added(gs_json['added']) if 'added' in gs_json else None
return game_state_pb2.GameState(
provider=provider, map=map_, round=round_, player=player,
allplayers=allplayers, previously=previously, added=added)
except DeserializationError as e:
logger.error('Failed to deserialize game_state: %s', json.dumps(gs_json))
raise e
|
d78868c24807a3cac469b87990e5a74f88876eb5
| 3,642,014
|
def state2bin(s, num_bins, limits):
"""
:param s: a state. (possibly multidimensional) ndarray, with dimension d =
dimensionality of state space.
:param num_bins: the total number of bins in the discretization
:param limits: 2 x d ndarray, where row[0] is a row vector of the lower
limit of each discrete dimension, and row[1] are corresponding upper
limits.
Returns the bin number (index) corresponding to state s given a
discretization num_bins between each column of limits[0] and limits[1].
The return value has same dimensionality as ``s``. \n
Note that ``s`` may be continuous. \n
\n
Examples: \n
s = 0, limits = [-1,5], num_bins = 6 => 1 \n
s = .001, limits = [-1,5], num_bins = 6 => 1 \n
s = .4, limits = [-.5,.5], num_bins = 3 => 2 \n
"""
if s == limits[1]:
return num_bins - 1
width = limits[1] - limits[0]
if s > limits[1]:
print 'Tools.py: WARNING: ', s, ' > ', limits[1], '. Using the chopped value of s'
print 'Ignoring', limits[1] - s
s = limits[1]
elif s < limits[0]:
print 'Tools.py: WARNING: ', s, ' < ', limits[0], '. Using the chopped value of s'
# print("WARNING: %s is out of limits of %s . Using the chopped value of s" %(str(s),str(limits)))
s = limits[0]
return int((s - limits[0]) * num_bins / (width * 1.))
|
8c0a1d559a332b1a015bde78c9eca413eeae942c
| 3,642,015
|
def _fix_json_agents(ag_obj):
"""Fix the json representation of an agent."""
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret
|
be73467edc1dc30ac0be1f6804cdb19cf5f942bf
| 3,642,016
|
def vflip(img):
"""Vertically flip the given CV Image.
Args:
img (CV Image): Image to be flipped.
Returns:
CV Image: Vertically flipped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
return cv2.flip(img, 1)
|
7b678327a15876a98e0622eacc012f86a8ffd432
| 3,642,017
|
def list_pending_tasks():
"""List all pending tasks in celery cluster."""
inspector = celery_app.control.inspect()
return inspector.reserved()
|
3d1785dd9ac8fd91f1f0ceb72eeb7df5671b55d5
| 3,642,018
|
def get_attack(attacker, defender):
"""
Returns a value for an attack roll.
Args:
attacker (obj): Character doing the attacking
defender (obj): Character being attacked
Returns:
attack_value (int): Attack roll value, compared against a defense value
to determine whether an attack hits or misses.
Notes:
By default, returns a random integer from 1 to 100 without using any
properties from either the attacker or defender.
This can easily be expanded to return a value based on characters stats,
equipment, and abilities. This is why the attacker and defender are passed
to this function, even though nothing from either one are used in this example.
"""
# For this example, just return a random integer up to 100.
attack_value = randint(1, 100)
return attack_value
|
3ec24ab34a02c2572ee62d1cc18079bdb7ef10ee
| 3,642,019
|
def colored(s, color=None, attrs=None):
"""Call termcolor.colored with same arguments if this is a tty and it is available."""
if HAVE_COLOR:
return colored_impl(s, color, attrs=attrs)
return s
|
a8c4f56e55721ec464728fbe9af4453cd98400ba
| 3,642,020
|
def _combine_by_cluster(ad, clust_key='leiden'):
"""
Given a new AnnData object, we want to create a new object
where each element isn't a cell, but rather is a cluster.
"""
clusters = []
X_mean_clust = []
for clust in sorted(set(ad.obs[clust_key])):
cells = ad.obs.loc[ad.obs[clust_key] == clust].index
X_clust = ad[cells,:].X
x_clust = _aggregate_expression(X_clust)
X_mean_clust.append(x_clust)
clusters.append(str(clust))
X_mean_clust = np.array(X_mean_clust)
ad_mean_clust = AnnData(
X=X_mean_clust,
var=ad.var,
obs=pd.DataFrame(
data=clusters,
index=clusters
)
)
return ad_mean_clust
|
2d48a9f504050604a679f35c06916c175e813ffe
| 3,642,021
|
def subsample_data(features, scaled_features, labels, subsamp): # This is only for poker dataset
""" Subsample the data. """
# k is class, will iterate from class 0 to class 1
# v is fraction to sample, i.e. 0.1, sample 10% of the current class being iterated
for k, v in subsamp.items():
ix = np.where(labels == k)[0]
ix_rest = np.where(labels != k)[0]
sample_ix = np.random.choice(ix, int(v * len(ix)), replace=False)
keep_ix = np.union1d(ix_rest, sample_ix)
# subsample
features = features[keep_ix, :]
scaled_features = scaled_features[keep_ix, :]
labels = labels[keep_ix]
return features, scaled_features, labels
|
3981fb0c793f64d52fc21cf2bbf87975bf97c786
| 3,642,022
|
def anomary_scores_ae(df_original, df_reduced):
"""AEで再生成された特徴量から異常度を計算する関数"""
"""再構成誤差を計算する異常スコア関数
Args:
df_original(array-like): training data of shape (n_samples, n_features)
df_reduced(array-like): prediction of shape (n_samples, n_features)
Returns:
pd.Series: 各データごとの異常スコア(二乗誤差をMinMaxScalingしたもの)
"""
# サンプルごとの予測値との二乗誤差を計算
loss = np.sum((np.array(df_original) - np.array(df_reduced)) ** 2, axis=1)
# lossをpd.Seriesに変換
loss = pd.Series(data=loss, index=df_original.index)
# 二乗誤差をMinMaxScalingして0~1のスコアに変換
min_max_normalized_loss = (loss - np.min(loss)) / (np.max(loss) - np.min(loss))
return min_max_normalized_loss
|
f6fe2dca7c10e19ee1e0a03a8d94c663ef5d77fd
| 3,642,023
|
import os
def merge_runs(input_dir1, input_dir2, map_df, output, arguments):
"""
:param input_dir1: Directory 1 contains mutation counts files
:param input_dir2: Directory 2 contains mutation counts files
:param map_df: A dataframe that maps the samples to be merged
:param output: output directory to save merged mut_counts
"""
# make a new file
new_samples = []
for index, row in map_df.iterrows():
merged_header = ""
mut_count_f1 = os.path.join(input_dir1, f"counts_sample_{row['Sample ID_run1']}.csv")
mut_count_f2 = os.path.join(input_dir2, f"counts_sample_{row['Sample ID_run2']}.csv")
new_samples.append(f"{row['Sample ID_run1']}-{row['Sample ID_run2']}")
# join headers
with open(mut_count_f1) as file1, open(mut_count_f2) as file2:
for line1, line2 in zip(file1, file2):
if "#" not in line1: break
if "Final read-depth:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1+n_depth_2
merged_header+= f"#Final read-depth:{total_depth}\n"
continue
if "Number of read pairs without mutations:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of read pairs without mutations:{total_depth}\n"
continue
if "Total read pairs with mutations:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Total read pairs with mutations:{total_depth}\n"
continue
if "Number of read pairs did not map to gene:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of read pairs did not map to gene:{total_depth}\n"
continue
if "Number of reads outside of the tile:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of reads outside of the tile:{total_depth}\n"
continue
if "Raw read depth:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Raw read depth:{total_depth}\n"
continue
merged_header += line1.strip() + "; " + line2
output_f = open(os.path.join(output, f"counts_sample_{row['Sample ID_run1']}-{row['Sample ID_run2']}.csv"), "w")
output_f.write(merged_header)
# skip header and read the mutations
df1 = pd.read_csv(mut_count_f1, skiprows=20)
df2 = pd.read_csv(mut_count_f2, skiprows=20)
# for each file find the corresponded wt file and read the wt
merge = [df1, df2]
merge = pd.concat(merge)
merge_counts = merge.groupby(by=["HGVS"], as_index=False)["count"].sum().sort_values("count", ascending=False)
# save header and counts to file
merge_counts.to_csv(output_f, mode="a", index=False)
output_f.close()
if not arguments.covOverride:
cov_f1 = os.path.join(input_dir1, f"coverage_{row['Sample ID_run1']}.csv")
cov_f2 = os.path.join(input_dir2, f"coverage_{row['Sample ID_run2']}.csv")
# read coverage files
# sum up two df
cov_d1 = pd.read_csv(cov_f1).set_index("pos")
cov_d2 = pd.read_csv(cov_f2).set_index("pos")
df_sum = cov_d1.add(cov_d2, fill_value=0)
output_cov = os.path.join(output, f"coverage_{row['Sample ID_run1']}-{row['Sample ID_run2']}.csv")
df_sum.to_csv(output_cov)
return new_samples
|
d2e27bb99d7e6e75110a1ed9a51ab285ee2c39d2
| 3,642,024
|
import logging
def latest_res_ords():
"""Get last decade from reso and ords table"""
filename = 'documentum_scs_council_reso_ordinance_v.csv'
save_path = f"{conf['prod_data_dir']}/documentum_scs_council_reso_ordinance_v"
df = pd.read_csv(f"{conf['prod_data_dir']}/{filename}",
low_memory=False)
df['DOC_DATE'] = pd.to_datetime(df['DOC_DATE'],errors='coerce')
df_current = df.loc[df['DOC_DATE'] >= f"01/01/2016"]
general.pos_write_csv(df_current, f"{save_path}_2016_current.csv")
logging.info(f"Wrote 2016_current")
return f"Successfully extracted this decade of resos and ords"
|
c23b26e878887758c6822164bcac33eb7c28f765
| 3,642,025
|
def langevin_coefficients(temperature, dt, friction, masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
collision rate in 1 / picoseconds
masses: array
mass of each atom in standard mass units. np.inf masses will
effectively freeze the particles.
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics as follows:
during heat-bath update
v -> ca * v + cc * gaussian
during force update
v -> v + cb * force
"""
kT = BOLTZ * temperature
nscale = np.sqrt(kT / masses)
ca = np.exp(-friction * dt)
cb = dt / masses
cc = np.sqrt(1 - np.exp(-2 * friction * dt)) * nscale
return ca, cb, cc
|
a95ba22bda908fdd10171ed63eba1dc7906c0c1f
| 3,642,026
|
def get_description():
"""
Read full description from 'README.md'
:return: description
:rtype: str
"""
with open('README.md', 'r', encoding='utf-8') as f:
return f.read()
|
9a73c9dbaf88977f8c96eee056f92a7d5ff938fd
| 3,642,027
|
def qt_matrices(matrix_dim, selected_pp_indices=[0, 5, 10, 11, 1, 2, 3, 6, 7]):
"""
Get the elements of a special basis spanning the density-matrix space of
a qutrit.
The returned matrices are given in the standard basis of the
density matrix space. These matrices form an orthonormal basis
under the trace inner product, i.e. Tr( dot(Mi,Mj) ) == delta_ij.
Parameters
----------
matrix_dim : int
Matrix-dimension of the density-matrix space. Must equal 3
(present just to maintain consistency which other routines)
Returns
-------
list
A list of 9 numpy arrays each of shape (3, 3).
"""
if matrix_dim == 1: # special case of just identity mx
return [_np.identity(1, 'd')]
assert(matrix_dim == 3)
A = _np.array([[1, 0, 0, 0],
[0, 1. / _np.sqrt(2), 1. / _np.sqrt(2), 0],
[0, 0, 0, 1]], 'd') # projector onto symmetric space
def _toQutritSpace(inputMat):
return _np.dot(A, _np.dot(inputMat, A.transpose()))
qt_mxs = []
pp_mxs = pp_matrices(4)
#selected_pp_indices = [0,5,10,11,1,2,3,6,7] #which pp mxs to project
# labels = ['II', 'XX', 'YY', 'YZ', 'IX', 'IY', 'IZ', 'XY', 'XZ']
qt_mxs = [_toQutritSpace(pp_mxs[i]) for i in selected_pp_indices]
# Normalize so Tr(BiBj) = delta_ij (done by hand, since only 3x3 mxs)
qt_mxs[0] *= 1 / _np.sqrt(0.75)
#TAKE 2 (more symmetric = better?)
q1 = qt_mxs[1] - qt_mxs[0] * _np.sqrt(0.75) / 3
q2 = qt_mxs[2] - qt_mxs[0] * _np.sqrt(0.75) / 3
qt_mxs[1] = (q1 + q2) / _np.sqrt(2. / 3.)
qt_mxs[2] = (q1 - q2) / _np.sqrt(2)
#TAKE 1 (XX-II and YY-XX-II terms... not symmetric):
#qt_mxs[1] = (qt_mxs[1] - qt_mxs[0]*_np.sqrt(0.75)/3) / _np.sqrt(2.0/3.0)
#qt_mxs[2] = (qt_mxs[2] - qt_mxs[0]*_np.sqrt(0.75)/3 + qt_mxs[1]*_np.sqrt(2.0/3.0)/2) / _np.sqrt(0.5)
for i in range(3, 9): qt_mxs[i] *= 1 / _np.sqrt(0.5)
return qt_mxs
|
8e444fae5b936f4e20f615404712c91a5bbe3f4c
| 3,642,028
|
def get_signed_value(bit_vector):
"""
This function will generate the signed value for a given bit list
bit_vector : list of bits
"""
signed_value = 0
for i in sorted(bit_vector.keys()):
if i == 0:
signed_value = int(bit_vector[i])
else:
signed_value += ((2 << 7) << (int(i) - 1)) * int(bit_vector[i])
return signed_value
|
6b2b9a968576256738f396eeefba844561e2d2c7
| 3,642,029
|
def values_to_colors(values, cmap, vmin=None, vmax=None):
"""
Function to map a set of values through a colormap
to get RGB values in order to facilitate coloring of meshes.
Parameters
----------
values: array-like, (n_vertices, )
values to pass through colormap
cmap: array-like, (n_colors, 3)
colormap describing the RGB values from vmin to vmax
vmin : float
(optional) value that should receive minimum of colormap.
default to minimum of values
vmax : float
(optional) values that should receive maximum of colormap
default to maximum of values
Output
------
colors: array-like, (n_vertices, 3)
RGB values for each entry in values (as np.uint8 [0-255])
Example
-------
Assuming mesh object and 'values' have been calculated already
::
import seaborn as sns
cmap = np.array(sns.color_palette('viridis', 1000))
clrs = trimesh_vtk.values_to_colors(values, cmap)
mesh_actor = trimesh_io.mesh_actor(mesh, vertex_colors = clrs, opacity=1.0)
trimesh_vtk.render_actors([mesh_actor])
"""
n_colors = cmap.shape[0]
if vmin is None:
vmin = np.nanmin(values)
if vmax is None:
vmax = np.nanmax(values)
values = np.clip(values, vmin, vmax)
r = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 0])
g = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 1])
b = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 2])
colors = np.vstack([r, g, b]).T
colors = (colors * 255).astype(np.uint8)
return colors
|
d26bcf4daaeb5247a11576547cdce8417b8d4b19
| 3,642,030
|
from typing import List
import sys
def _clean_sys_argv(pipeline: str) -> List[str]:
"""Values in sys.argv that are not valid option values in Where
"""
reserved_opts = {pipeline, "label", "id", "only_for_rundate", "session", "stage", "station", "writers"}
return [o for o in sys.argv[1:] if o.startswith("--") and o[2:].split("=")[0] not in reserved_opts]
|
551f4fdca5d7cc276b03943f3679cc2eff8ce89e
| 3,642,031
|
def get_number_from_user_input(prompt: str, min_value: int, max_value: int) -> int:
"""gets a int integer from user input"""
# input loop
user_input = None
while user_input is None or user_input < min_value or user_input > max_value:
raw_input = input(prompt + f" ({min_value}-{max_value})? ")
try:
user_input = int(raw_input)
if user_input < min_value or user_input > max_value:
print("Invalid input, please try again")
except ValueError:
print("Invalid input, please try again")
return user_input
|
c9df4ac604b3bf8f0f9c2a35added1f23e88048e
| 3,642,032
|
def word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate word saliency according to [Chuang2012]_ as ``saliency(w) = p(w) * distinctiveness(w)`` for a word ``w``.
.. [Chuang2012] J. Chuang, C. Manning, J. Heer. 2012. Termite: Visualization Techniques for Assessing Textual Topic
Models
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:param doc_topic_distrib: document-topic distribution; shape NxK, where N is the number of documents, K is the
number of topics
:param doc_lengths: array of size N (number of docs) with integers indicating the number of terms per document
:return: array of size M (vocabulary size) with word saliency
"""
p_t = marginal_topic_distrib(doc_topic_distrib, doc_lengths)
p_w = marginal_word_distrib(topic_word_distrib, p_t)
return p_w * word_distinctiveness(topic_word_distrib, p_t)
|
47acaa848601192837eceef210389ada090b1fec
| 3,642,033
|
import json
def parse_cl_items(s):
"""Take a json string of checklist items and make a dict of item objects keyed on
item name (id)"""
dispatch = {"floating":Floating,
"weekly":Weekly,
"monthly": Monthly,
"daily":Daily
}
if len(s) == 0:
return []
raw = json.loads(s)
il = []
for d in raw:
t = d.pop('type')
t = t.lower()
il.append(dispatch[t](**d))
return il
|
274374f8ad3048f7bf9f17ed7d43740a83900a63
| 3,642,034
|
def get_queue(queue, flags=FLAGS.ALL, **conn):
"""
Orchestrates all the calls required to fully fetch details about an SQS Queue:
{
"Arn": ...,
"Region": ...,
"Name": ...,
"Url": ...,
"Attributes": ...,
"Tags": ...,
"DeadLetterSourceQueues": ...,
"_version": 1
}
:param queue: Either the queue name OR the queue url
:param flags: By default, set to ALL fields.
:param conn: dict containing enough information to make a connection to the desired account. Must at least have
'assume_role' key.
:return: dict containing a fully built out SQS queue.
"""
# Check if this is a Queue URL or a queue name:
if queue.startswith("https://") or queue.startswith("http://"):
queue_name = queue
else:
queue_name = get_queue_url(QueueName=queue, **conn)
sqs_queue = {"QueueUrl": queue_name}
return registry.build_out(flags, sqs_queue, **conn)
|
b39ea959835fc3ae32042cabac4bc4f9b5f1c425
| 3,642,035
|
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype('float32')
df.B = df.B.astype('float32')
df.C = df.C.astype('float16')
df.D = df.D.astype('float64')
return df
|
aaef420666cf714c45bb87bf7e1eb484a4c06f69
| 3,642,036
|
import unittest
def create_parsetestcase(durationstring, expectation, format, altstr):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
PARSE_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestParseDuration(unittest.TestCase):
'''
A test case template to parse an ISO duration string into a
timedelta or Duration object.
'''
def test_parse(self):
'''
Parse an ISO duration string and compare it to the expected value.
'''
result = parse_duration(durationstring)
self.assertEqual(result, expectation)
def test_format(self):
'''
Take duration/timedelta object and create ISO string from it.
This is the reverse test to test_parse.
'''
if altstr:
self.assertEqual(duration_isoformat(expectation, format),
altstr)
else:
# if durationstring == '-P2W':
# import pdb; pdb.set_trace()
self.assertEqual(duration_isoformat(expectation, format),
durationstring)
return unittest.TestLoader().loadTestsFromTestCase(TestParseDuration)
|
b74dbb969743bc98e22bfd80677da7f02891391e
| 3,642,037
|
import time
import traceback
def draw_data_from_db(host, port=None, pid=None, startTime=None, endTime=None, system=None, disk=None):
"""
Get data from InfluxDB, and visualize
:param host: client IP, required
:param port: port, visualize port data; optional, choose one from port, pid and system
:param pid: pid, visualize pid data; optional, choose one from port, pid and system
:param startTime: Start time; optional
:param endTime: end time; optional
:param system: visualize system data; optional, choose one from port, pid and system
:param disk: disk number; optional
:return:
"""
post_data = {
'types': 'system',
'cpu_time': [],
'cpu': [],
'iowait': [],
'usr_cpu': [],
'mem': [],
'mem_available': [],
'jvm': [],
'io_time': [],
'io': [],
'disk_r': [],
'disk_w': [],
'disk_d': [],
'rec': [],
'trans': [],
'nic': [],
'tcp': [],
'close_wait': [],
'time_wait': [],
'retrans': [],
'disk': disk}
res = {'code': 1, 'flag': 1, 'message': 'Successful!'}
connection = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database'))
try:
if startTime and endTime: # If there is a start time and an end time
pass
elif startTime is None and endTime is None: # If the start time and end time do not exist, use the default time.
startTime = '2020-05-20 20:20:20'
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
else: # If the end time does not exist, the current time is used
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
s_time = time.time()
if port:
sql = f"select cpu, wait_cpu, mem, tcp, jvm, rKbs, wKbs, iodelay, close_wait, time_wait from \"{host}\" " \
f"where time>'{startTime}' and time<'{endTime}' and type='{port}' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'port'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['wait_cpu'])
post_data['mem'].append(data['mem'])
post_data['tcp'].append(data['tcp'])
post_data['jvm'].append(data['jvm'])
post_data['io'].append(data['iodelay'])
post_data['disk_r'].append(data['rKbs'])
post_data['disk_w'].append(data['wKbs'])
post_data['close_wait'].append(data['close_wait'])
post_data['time_wait'].append(data['time_wait'])
else:
res['message'] = f'No monitoring data of the port {port} is found, ' \
f'please check the port or time setting.'
res['code'] = 0
if disk:
sql = f"select rec, trans, net from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
for data in datas.get_points():
post_data['nic'].append(data['net'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
if pid:
pass
if system and disk:
disk_n = disk.replace('-', '')
disk_r = disk_n + '_r'
disk_w = disk_n + '_w'
disk_d = disk_n + '_d'
sql = f"select cpu, iowait, usr_cpu, mem, mem_available, {disk_n}, {disk_r}, {disk_w}, {disk_d}, rec, trans, " \
f"net, tcp, retrans from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'system'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['iowait'])
post_data['usr_cpu'].append(data['usr_cpu'])
post_data['mem'].append(data['mem'])
post_data['mem_available'].append(data['mem_available'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
post_data['nic'].append(data['net'])
post_data['io'].append(data[disk_n])
post_data['disk_r'].append(data[disk_r])
post_data['disk_w'].append(data[disk_w])
post_data['disk_d'].append(data[disk_d])
post_data['tcp'].append(data['tcp'])
post_data['retrans'].append(data['retrans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
res.update({'post_data': post_data})
logger.info(f'Time consuming to query is {time.time() - s_time}')
# lines = get_lines(post_data) # Calculate percentile, 75%, 90%, 95%, 99%
# res.update(lines)
except Exception as err:
logger.error(traceback.format_exc())
res['message'] = str(err)
res['code'] = 0
del connection, post_data
return res
|
73aa86b18dff59fdf88eff0b173e32fa4f3ed3ed
| 3,642,038
|
def get_sample_generator(filenames, batch_size, model_config):
"""Set data loader generator according to different tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
model_config(dict): the dictionary containing model configuration.
Raises:
NameError: if key ``task`` in ``model_config`` is invalid.
Returns:
reader(func): data reader.
"""
task = model_config['task']
if task == 'pretrain':
return pretrain_sample_reader(filenames, batch_size)
elif task == 'seq_classification':
label_name = model_config.get('label_name', 'labels')
return sequence_sample_reader(filenames, batch_size, label_name)
elif task in ['classification', 'regression']:
label_name = model_config.get('label_name', 'labels')
return normal_sample_reader(filenames, batch_size, label_name)
else:
raise NameError('Task %s is unsupport.' % task)
|
2033e081addf26a8f9074591b2f8992f39ed86c1
| 3,642,039
|
def execute_batch(table_type, bulk, count, topic_id, topic_name):
"""
Execute bulk operation. return true if operation completed successfully
False otherwise
"""
errors = False
try:
result = bulk.execute()
if result['nModified'] != count:
print(
"bulk execute of {} data for {}:{}.\nnumber of op sent to "
"bulk execute ({}) does not match nModified count".format(
table_type, topic_id, topic_name, count))
print ("bulk execute result {}".format(result))
errors = True
except BulkWriteError as ex:
print(str(ex.details))
errors = True
return errors
|
954de6b5bfefcea7a7bfdebdc7cb7b1b1ba1dd95
| 3,642,040
|
def process_domain_assoc(url, domain_map):
"""
Replace domain name with a more fitting tag for that domain.
User defined. Mapping comes from provided config file
Mapping in yml file is as follows:
tag:
- url to map to tag
- ...
A small example domain_assoc.yml is included
"""
if not domain_map:
return url
for key in domain_map:
if url in domain_map[key]:
return key
return url
|
29c0f81a4959d97cd91f839cbe511eb46872b5ec
| 3,642,041
|
def process_auc(gt_list, pred_list):
"""
Process AUC (AUROC) over lists.
:param gt_list: Ground truth list
:type gt_list: np.array
:param pred_list: Predictions list
:type pred_list: np.array
:return: Mean AUC over the lists
:rtype: float
"""
res = []
for i, gt in enumerate(gt_list):
if np.amax(gt) != 0:
pred = pred_list[i].flatten()
gt = gt.flatten()
# res.append(roc_auc_score(gt, pred))
res.append(f1_score(gt, pred))
return np.mean(res)
|
2f1de3ba0d5f1154ef4a5888d01d398fd8533793
| 3,642,042
|
def transform(
Y,
transform_type=None,
dtype=np.float32):
""" Transform STFT feature
Args:
Y: STFT
(n_frames, n_bins)-shaped np.complex array
transform_type:
None, "log"
dtype: output data type
np.float32 is expected
Returns:
Y (numpy.array): transformed feature
"""
Y = np.abs(Y)
if transform_type == 'log':
Y = np.log(np.maximum(Y, 1e-06))
return Y.astype(dtype)
|
96613eb77c20c1a09a3e41af176a36d2ce4d8080
| 3,642,043
|
def tol_vif_table(df, n = 5):
"""
:param df: dataframe
:param n: number of pairs to show
:return: table of correlations, tolerances, and VIF
"""
cor = get_top_abs_correlations(df, n)
tol = 1 - cor ** 2
vif = 1 / tol
cor_table = pd.concat([cor, tol, vif], axis=1)
cor_table.columns = ['Correlation', 'Tolerance', 'VIF']
return cor_table
|
94cf5715951892375e92ada019476b9ae3d09577
| 3,642,044
|
import random
def shuffled(iterable):
"""Randomly shuffle a copy of iterable."""
items = list(iterable)
random.shuffle(items)
return items
|
cd554d4a31e042dc1d2b4c7b246528a5184d558e
| 3,642,045
|
from ray.autoscaler._private.constants import RAY_PROCESSES
from typing import Optional
from typing import List
from typing import Tuple
import psutil
import subprocess
import tempfile
import yaml
def get_local_ray_processes(archive: Archive,
processes: Optional[List[Tuple[str, bool]]] = None,
verbose: bool = False):
"""Get the status of all the relevant ray processes.
Args:
archive (Archive): Archive object to add process info files to.
processes (list): List of processes to get information on. The first
element of the tuple is a string to filter by, and the second
element is a boolean indicating if we should filter by command
name (True) or command line including parameters (False)
verbose (bool): If True, show entire executable command line.
If False, show just the first term.
Returns:
Open archive object.
"""
if not processes:
# local import to avoid circular dependencies
processes = RAY_PROCESSES
process_infos = []
for process in psutil.process_iter(["pid", "name", "cmdline", "status"]):
try:
with process.oneshot():
cmdline = " ".join(process.cmdline())
process_infos.append(({
"executable": cmdline
if verbose else cmdline.split("--", 1)[0][:-1],
"name": process.name(),
"pid": process.pid,
"status": process.status(),
}, process.cmdline()))
except Exception as exc:
raise LocalCommandFailed(exc) from exc
relevant_processes = {}
for process_dict, cmdline in process_infos:
for keyword, filter_by_cmd in processes:
if filter_by_cmd:
corpus = process_dict["name"]
else:
corpus = subprocess.list2cmdline(cmdline)
if keyword in corpus and process_dict["pid"] \
not in relevant_processes:
relevant_processes[process_dict["pid"]] = process_dict
with tempfile.NamedTemporaryFile("wt") as fp:
for line in relevant_processes.values():
fp.writelines([yaml.dump(line), "\n"])
fp.flush()
with archive.subdir("meta") as sd:
sd.add(fp.name, "process_info.txt")
return archive
|
7ed76d4e93198ce0afef0fbf5cf5b37a7b92e0ed
| 3,642,046
|
def test_rule(rule_d, ipv6=False):
""" Return True if the rule is a well-formed dictionary, False otherwise """
try:
_encode_iptc_rule(rule_d, ipv6=ipv6)
return True
except:
return False
|
7435cb900117e4c273b4157b2f25ba56aefd1355
| 3,642,047
|
def intersects(hp, sphere):
"""
The closed, upper halfspace intersects the sphere
(i.e. there exists a spatial relation between the two)
"""
return signed_distance(sphere.center, hp) + sphere.radius >= 0.0
|
9366824f03a269d0fa9e96f34260c621ad610d16
| 3,642,048
|
def invert_center_scale(X_cs, X_center, X_scale):
"""
This function inverts whatever centering and scaling was done by
``center_scale`` function:
.. math::
\mathbf{X} = \mathbf{X_{cs}} \\cdot \mathbf{D} + \mathbf{C}
**Example:**
.. code:: python
from PCAfold import center_scale, invert_center_scale
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Center and scale:
(X_cs, X_center, X_scale) = center_scale(X, 'range', nocenter=False)
# Uncenter and unscale:
X = invert_center_scale(X_cs, X_center, X_scale)
:param X_cs:
centered and scaled data set :math:`\mathbf{X_{cs}}`.
:param X_center:
vector of centers :math:`\mathbf{C}` applied on the original data set :math:`\mathbf{X}`.
:param X_scale:
vector of scales :math:`\mathbf{D}` applied on the original data set :math:`\mathbf{X}`.
:return:
- **X** - original data set :math:`\mathbf{X}`.
"""
try:
(_, n_variables) = np.shape(X_cs)
except:
n_variables = 1
if n_variables == 1:
X = X_cs * X_scale + X_center
else:
X = np.zeros_like(X_cs, dtype=float)
for i in range(0, n_variables):
X[:, i] = X_cs[:, i] * X_scale[i] + X_center[i]
return(X)
|
e37d82e7da932ea760981bb5a472799cd5a4d3d9
| 3,642,049
|
def weighted_smoothing(image, diffusion_weight=1e-4, data_weight=1.0,
weight_function_parameters={}):
"""Weighted smoothing of images: smooth regions, preserve sharp edges.
Parameters
----------
image : NumPy array
diffusion_weight : float or NumPy array, optional
The weight of the diffusion for smoothing. It can be provided
as an array the same shape as the image, or as a scalar that
will multiply the default weight matrix obtained from the edge
indicator function.
data_weight : float or NumPy array, optional
The weight of the image data to preserve fidelity to the image.
It can be provided as an array the same shape as the image, or
as a scalar that will multiply the default weight matrix obtained
by subtracting the edge indicator function from 1.0.
weight_function_parameters : dict, optional
The parameters sigma and rho for the edge indicator function.
Sigma is the standard deviation for the gaussian gradient
applied to the image: dI = N * gaussian_gradient_magnitude(image),
where N = min(image.shape) - 1, and rho is the scaling weight in
the definition of the edge indicator function: 1/(1 + (dI/rho)**2)
The default values are: {'sigma': 3.0, 'rho': None}.
If rho is None, it is calculated by rho = 0.23 * dI.max().
Returns
-------
smoothed_image : NumPy array
Raises
------
ValueError
If diffusion_weight or data_weight have the wrong type.
"""
if type( diffusion_weight ) is np.ndarray:
beta = diffusion_weight
elif not np.isscalar( diffusion_weight ):
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
else:
sigma = weight_function_parameters.get('sigma', 3.0)
rho = weight_function_parameters.get('rho', None)
if rho is None:
N = min(image.shape) - 1.0
dI = N * gaussian_gradient_magnitude( image, sigma, mode='nearest' )
rho = 0.23 * dI.max()
g = EdgeIndicatorFunction( image, rho, sigma )
G = g._g
beta = (G - G.min()) / (G.max() - G.min())
beta *= diffusion_weight
if type(data_weight) is np.ndarray:
alpha = data_weight
elif np.isscalar(data_weight):
alpha = data_weight * (beta.max() - beta) / (beta.max() - beta.min())
else:
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
rhs = alpha * image
grid = Grid2d( image.shape )
smooth_image = fem.solve_elliptic_pde( grid, alpha, beta, rhs )
return smooth_image
|
3c861b9a8878f2d85d581d8f8d1f62cf017a7920
| 3,642,050
|
import random
def get_random_tablature(tablature : Tablature, constants : Constants):
"""make a copy of the tablature under inspection and generate new random tablatures"""
new_tab = deepcopy(tablature)
for tab_instance, new_tab_instance in zip(tablature.tablature, new_tab.tablature):
if tab_instance.string == 6:
string, fret = random.choice(determine_combinations(tab_instance.fundamental, constants))
new_tab_instance.string, new_tab_instance.fret = string, fret
elif constants.init_mutation_rate > random.random():
string, fret = get_random_position(tab_instance.string, tab_instance.fret, constants)
new_tab_instance.string, new_tab_instance.fret = string, fret
return new_tab
|
befa3a488e2ca53e37032ed102a12b250594bd90
| 3,642,051
|
def _staticfy(value):
"""
Allows to keep backward compatibility with instances of OpenWISP which
were using the previous implementation of OPENWISP_ADMIN_THEME_LINKS
and OPENWISP_ADMIN_THEME_JS which didn't automatically pre-process
those lists of static files with django.templatetags.static.static()
and hence were not configured to allow those files to be found
by the staticfile loaders, if static() raises ValueError, we assume
one of either cases:
1. An old instance has upgraded and we keep returning the old value
so the file will continue being found although unprocessed by
django's static file machinery.
2. The value passed is wrong, instead of failing loudly we fail silently.
"""
try:
return static(value)
# maintain backward compatibility
except ValueError:
return value
|
2ac932a178a86d301dbb15602f3b59edb39cf3c1
| 3,642,052
|
def compare_rep(topic, replication_factor):
# type: (str, int) -> bool
"""Compare replication-factor in the playbook with the one actually set.
Keyword arguments:
topic -- topicname
replication_factor -- number of replications
Return:
bool -- True if change is needed, else False
"""
try:
metadata = admin.list_topics() # type(metadata.topics) = dict
except KafkaException as e:
msg = (
"Can not get metadata of topic %s: %s"
% (topic, e)
)
fail_module(msg)
old_rep = len(metadata.topics[topic].partitions[0].replicas) #type(partitions) = dict, access replicas with partition-id as key over .replicas-func
if replication_factor != old_rep:
if module.params['zookeeper'] is None:
msg = (
"For modifying the replication_factor of a topic,"
" you also need to set the zookeeper-parameter."
" At the moment, replication_factor is set to %s"
" and you tried to set it to %s."
% (old_rep, replication_factor)
)
fail_module(msg)
diff['before']['replication_factor'] = old_rep
diff['after']['replication_factor'] = replication_factor
return True
# if replication_factor == old_rep:
return False
|
882b028d078e507f9673e3ead6549da100a83226
| 3,642,053
|
def verbosity_option_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the verbosity option in different subparsers
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('--verbosity', dest=VERBOSITY_ARGNAME, type=str.upper,
choices=ALLOWED_VERBOSITY,
help='verbosity level to use for this command and subsequent ones.')
return parser
|
c24f0704f1632cc0af416cf2c0a3e65c6845566f
| 3,642,054
|
import snappi
def b2b_config(api):
"""Demonstrates creating a back to back configuration of tx and rx
ports, devices and a single flow using those ports as endpoints for
transmit and receive.
"""
config = api.config()
config = snappi.Api().config()
config.options.port_options.location_preemption = True
tx_port, rx_port = config.ports \
.port(name='Tx Port', location='10.36.74.26;02;13') \
.port(name='Rx Port', location='10.36.74.26;02;14')
tx_device, rx_device = (config.devices \
.device(name='Tx Devices')
.device(name='Rx Devices')
)
tx_device.ethernets.ethernet(port_name=tx_port.name)
rx_device.ethernets.ethernet(port_name=rx_port.name)
tx_device.ethernets[-1].name = 'Tx Eth'
tx_device.ethernets[-1].mac = '00:00:01:00:00:01'
tx_device.ethernets[-1].ipv4_addresses.ipv4()
tx_device.ethernets[-1].ipv4_addresses[-1].name = 'Tx Ipv4'
tx_device.ethernets[-1].ipv4_addresses[-1].address = '1.1.1.1'
tx_device.ethernets[-1].ipv4_addresses[-1].gateway = '1.1.2.1'
tx_device.ethernets[-1].ipv4_addresses[-1].prefix = 16
vlan1, vlan2 = tx_device.ethernets[-1].vlans.vlan(name='v1').vlan(name='v2')
vlan1.id = 1
vlan2.id = 2
rx_device.ethernets[-1].name = 'Rx Eth'
rx_device.ethernets[-1].mac = '00:00:01:00:00:02'
flow = config.flows.flow(name='Tx -> Rx Flow')[0]
flow.tx_rx.port.tx_name = tx_port.name
flow.tx_rx.port.rx_name = rx_port.name
flow.size.fixed = 128
flow.rate.pps = 1000
flow.duration.fixed_packets.packets = 10000
eth, vlan, ip, tcp = flow.packet.ethernet().vlan().ipv4().tcp()
eth.src.value = '00:00:01:00:00:01'
eth.dst.values = ['00:00:02:00:00:01', '00:00:02:00:00:01']
eth.dst.metric_group = 'eth dst mac'
ip.src.increment.start = '1.1.1.1'
ip.src.increment.step = '0.0.0.1'
ip.src.increment.count = 10
ip.dst.decrement.start = '1.1.2.200'
ip.dst.decrement.step = '0.0.0.1'
ip.dst.decrement.count = 10
ip.priority.dscp.phb.values = [8, 16, 32]
ip.priority.dscp.ecn.value = 1
tcp.src_port.increment.start = 10
tcp.dst_port.increment.start = 1
return config
|
60a838885c058f5c65d3b331082f525b2e04b5c7
| 3,642,055
|
import os
def apply_patch(ffrom, fpatch, fto):
"""Apply given normal patch `fpatch` to `ffrom` to create
`fto`. Returns the size of the created to-data.
All arguments are file-like objects.
>>> ffrom = open('foo.mem', 'rb')
>>> fpatch = open('foo.patch', 'rb')
>>> fto = open('foo.new', 'wb')
>>> apply_patch(ffrom, fpatch, fto)
2780
"""
compression, to_size = read_header_normal(fpatch)
if to_size == 0:
return to_size
patch_reader = PatchReader(fpatch, compression)
dfdiff, ffrom = create_data_format_readers(patch_reader, ffrom, to_size)
to_pos = 0
while to_pos < to_size:
# Diff data.
for chunk_size, patch_data in iter_diff_chunks(patch_reader,
to_pos,
to_size):
from_data = ffrom.read(chunk_size)
if dfdiff is not None:
dfdiff_data = dfdiff.read(chunk_size)
data = bytearray(
(pb + fb + db) & 0xff for pb, fb, db in zip(patch_data,
from_data,
dfdiff_data)
)
else:
data = bytearray(
(pb + fb) & 0xff for pb, fb in zip(patch_data, from_data)
)
fto.write(data)
to_pos += chunk_size
# Extra data.
for chunk_size, patch_data in iter_extra_chunks(patch_reader,
to_pos,
to_size):
if dfdiff is not None:
dfdiff_data = dfdiff.read(chunk_size)
data = bytearray(
(dd + db) & 0xff for dd, db in zip(patch_data, dfdiff_data)
)
else:
data = patch_data
fto.write(data)
to_pos += chunk_size
# Adjustment.
size = unpack_size(patch_reader)
ffrom.seek(size, os.SEEK_CUR)
if not patch_reader.eof:
raise Error('End of patch not found.')
return to_size
|
b847f7641bdd34c935d2f88fd8ca114abb0b4033
| 3,642,056
|
def fib(n):
"""Return the n'th Fibonacci number."""
if n < 0:
raise ValueError("Fibonacci number are only defined for n >= 0")
return _fib(n)
|
4aee5fbb4c9a497ffc4f63529b226ad3b08c0ef4
| 3,642,057
|
def gen(n):
"""
Compute the n-th generator polynomial.
That is, compute (x + 2 ** 1) * (x + 2 ** 2) * ... * (x + 2 ** n).
"""
p = Poly([GF(1)])
two = GF(1)
for i in range(1, n + 1):
two *= GF(2)
p *= Poly([two, GF(1)])
return p
|
29e6b1f164d93b21a2d98352ff60c8cf7a8d5864
| 3,642,058
|
def get_prefix(bot, message):
"""A callable Prefix for our bot. This could be edited to allow per server prefixes."""
# Notice how you can use spaces in prefixes. Try to keep them simple though.
prefixes = ['!']
# If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list.
return commands.when_mentioned_or(*prefixes)(bot, message)
|
35567d49b747f51961fad861e00d9f9524126641
| 3,642,059
|
def parse_discontinuous_phrase(phrase: str) -> str:
"""
Transform discontinuous phrase into a regular expression. Discontinuity is
interpreted as taking place at any whitespace outside of terms grouped by
parentheses. That is, the whitespace indicates that anything can be in between
the left side and right side.
Example 1: x1 (x2 (x3"x4")) becomes x1.+(x2 (x3|x4))
"""
level = 0
parsed_phrase = ""
for index, char in enumerate(phrase):
if char == "(":
level += 1
elif char == ")":
level -= 1
elif char == " " and level == 0:
char = ".+"
parsed_phrase += char
return parsed_phrase
|
58fe394a08931e7e79afc00b9bb0e8e9981f3c81
| 3,642,060
|
def preprocess(frame):
"""
Preprocess the images before they are sent into the model
"""
#Read the image
bgr_img = frame.astype(np.float32)
#Opencv reads the picture as (N) HWC to get the HW value
orig_shape = bgr_img.shape[:2]
#Normalize the picture
bgr_img = bgr_img / 255.0
#Convert the picture to Lab space
lab_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2Lab)
#Gets the L
orig_l = lab_img[:, :, 0]
if not orig_l.flags['C_CONTIGUOUS']:
orig_l = np.ascontiguousarray(orig_l)
#resize
lab_img = cv.resize(lab_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.float32)
l_data = lab_img[:, :, 0]
if not l_data.flags['C_CONTIGUOUS']:
l_data = np.ascontiguousarray(l_data)
#The L-part minus the average
l_data = l_data - 50
return orig_shape, orig_l, l_data
|
33a24a31ae9e25efb080037a807897f2762656c0
| 3,642,061
|
def draw_roc_curve(y_true, y_score, annot=True, name=None, ax=None):
"""Draws a ROC (Receiver Operating Characteristic) curve using class rankings predicted by a classifier.
Args:
y_true (array-like): True class labels (0: negative; 1: positive)
y_score (array-like): Predicted probability of positive-class membership
annot (bool, optional): Whether to create and add a label to the curve with the computed AUC
name (str, optional): Name of the curve to add to the AUC label
ax (Matplotlib.Axes, optional): The axes on which to draw the ROC curve
Returns:
ax (Matplotlib.Axes): The axes containing the ROC curve
"""
fpr, tpr, _ = roc_curve(y_true, y_score)
if ax is None:
ax = plt.gca()
# Add a label displaying the computed area under the curve
if annot:
roc_auc = auc(fpr, tpr)
if name is not None:
label = f'{name} AUC = {roc_auc:.3f}'
else:
label = f'AUC = {roc_auc:.3f}'
else:
label=None
ax.plot(fpr, tpr, label=label)
ax.set_xlabel('False positive rate')
ax.set_ylabel('True positive rate')
ax.legend(loc='best')
return ax
|
cf59a02c5f72f728b0d9179a4eee3f012da564df
| 3,642,062
|
def make_links_absolute(soup, base_url):
"""
Replace relative links with absolute links.
This one modifies the soup object.
"""
assert base_url is not None
#
for tag in soup.findAll('a', href=True):
tag['href'] = urljoin(base_url, tag['href'])
return soup
|
52d328c944d4a80b4f0a027a3b72f2fcebc152a9
| 3,642,063
|
def get_predictions(model, dataloader):
"""takes a trained model and validation or test dataloader
and applies the model on the data producing predictions
binary version
"""
model.eval()
all_y_hats = []
all_preds = []
all_true = []
all_attention = []
for batch_id, (data, label) in enumerate(dataloader):
label = label.squeeze()
bag_label = label[0]
bag_label = bag_label.cpu()
y_hat, preds, attention = model(data.to("cuda:0"))
y_hat = y_hat.squeeze(dim=0) # for binary setting
y_hat = y_hat.cpu()
preds = preds.squeeze(dim=0) # for binary setting
preds = preds.cpu()
all_y_hats.append(y_hat.numpy().item())
all_preds.append(preds.numpy().item())
all_true.append(bag_label.numpy().item())
attention_scores = np.round(attention.cpu().data.numpy()[0], decimals=3)
all_attention.append(attention_scores)
print("Bag Label:" + str(bag_label))
print("Predicted Label:" + str(preds.numpy().item()))
print("attention scores (unique ones):")
print(np.unique(attention_scores))
# print(attention_scores)
del data, bag_label, label
return all_y_hats, all_preds, all_true
|
f58a5863c211a24665db7348606d39232ad8af19
| 3,642,064
|
def _getPymelType(arg, name) :
""" Get the correct Pymel Type for an object that can be a MObject, PyNode or name of an existing Maya object,
if no correct type is found returns DependNode by default.
If the name of an existing object is passed, the name and MObject will be returned
If a valid MObject is passed, the name will be returned as None
If a PyNode instance is passed, its name and MObject will be returned
"""
obj = None
results = {}
isAttribute = False
#--------------------------
# API object testing
#--------------------------
if isinstance(arg, _api.MObject) :
results['MObjectHandle'] = _api.MObjectHandle( arg )
obj = arg
elif isinstance(arg, _api.MObjectHandle) :
results['MObjectHandle'] = arg
obj = arg.object()
elif isinstance(arg, _api.MDagPath) :
results['MDagPath'] = arg
obj = arg.node()
elif isinstance(arg, _api.MPlug) :
isAttribute = True
obj = arg
results['MPlug'] = obj
if _api.isValidMPlug(arg):
pymelType = Attribute
else :
raise MayaAttributeError, "Unable to determine Pymel type: the passed MPlug is not valid"
# #---------------------------------
# # No Api Object : Virtual PyNode
# #---------------------------------
# elif objName :
# # non existing node
# pymelType = DependNode
# if '.' in objName :
# # TODO : some better checking / parsing
# pymelType = Attribute
else :
raise ValueError( "Unable to determine Pymel type for %r" % (arg,) )
if not isAttribute:
pymelType = _getPymelTypeFromObject( obj, name )
return pymelType, results
|
b303bebfc5c97ac6a969c151cb78de7f28523476
| 3,642,065
|
def _parse_objective(objective):
"""
Modified from deephyper/nas/run/util.py function compute_objective
"""
if isinstance(objective, str):
negate = (objective[0] == '-')
if negate:
objective = objective[1:]
split_objective = objective.split('__')
kind = split_objective[1] if len(split_objective) > 1 else 'last'
mname = split_objective[0]
# kind: min/max/last
if negate:
if kind == 'min':
kind = 'max'
elif kind == 'max':
kind = 'min'
return mname, kind
elif callable(objective):
logger.warn('objective is a callable, not a str, setting kind="last"')
return None, 'last'
else:
raise TypeError(f'unknown objective type {type(objective)}')
|
df8f23464cd04be9a3c61a2969200a0c98c4471e
| 3,642,066
|
def redirect_path_context_processor(request):
"""Procesador para generar el redirect_to para la localización en el selector de idiomas"""
return {'language_select_redirect_to': translate_url(request.path, settings.LANGUAGE_CODE)}
|
fdb62f3079079d63c280d2b887468c7893aafcf8
| 3,642,067
|
def RightCenter(cell=None):
"""Take up horizontal and vertical space, and place the cell on the right center of it."""
return FillSpace(cell, "right", "center")
|
ce64a346658813ab281168864e357cec1ba09c0b
| 3,642,068
|
def name_standard(name):
""" return the Standard version of the input word
:param name: the name that should be standard
:return name: the standard form of word
"""
reponse_name = name[0].upper() + name[1:].lower()
return reponse_name
|
65273cafaaa9aceb803877c2071dc043a0d598eb
| 3,642,069
|
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value):
"""
This method takes a parent element as input and finds all the sub elements (children)
containing specified tag and an attribute with the specified value.
Returns a list of child elements.
Arguments:
parent = parent element
tag = tag value of the sub-element(child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value))
return child_elements
|
cae87e6548190ad0a675019b397eeb88289533ee
| 3,642,070
|
def f_engine (air_volume, energy_MJ):
"""Прямоточный воздушно реактивный двигатель.
Набегающий поток воздуха попадает в нагреватель, где расширяется,
А затем выбрасывается из сопла реактивной струёй.
"""
# Рабочее вещество, это атмосферный воздух:
working_mass = air_volume * AIR_DENSITY
# Делим энергию на полезную и бесполезную:
useful_energy_MJ = energy_MJ * ENGINE_USEFUL_ENERGY
useless_energy_MJ = energy_MJ - useful_energy_MJ
useful_energy_KJ = useful_energy_MJ * 1000
# Полезную энергию пускаем на разогрев воздуха:
working_mass_heat = AIR_HEAT_CAPACITY * useful_energy_KJ / working_mass
# Делаем поправку на температуру воздуха и переводим градусы в шкалу Кельвина:
working_mass_heat = AIR_TEMPERATURE + working_mass_heat + KELVIN_SCALE
# Давление разогретого воздуха увеличивается:
working_mass_pressure = f_heated_gas_pressure(working_mass_heat)
# Воздух подаётся на сопло Лаваля, так мы получаем скорость реактивной струи:
reactive_speed = f_de_laval_nozzle(working_mass_heat,
working_mass_pressure, ATMOSPHERIC_PRESSURE)
# Максимальная тяга, это масса рабочего вещества умноженная на его скорость:
max_engine_thrust = f_jet_force(working_mass, reactive_speed)
# Бесполезную тепловую энергию тоже требуется куда-то отводить:
engine_output = (max_engine_thrust, working_mass, reactive_speed, useless_energy_MJ)
return engine_output
|
3d307622fca17f16f03e49bd7f8b5b742217ee37
| 3,642,071
|
def not_numbers():
"""Non-numbers for (i)count."""
return [None, [1, 2], {-3, 4}, (6, 9.7)]
|
31f935916c8463f6192d0b2770c1034ee70a4fc5
| 3,642,072
|
import requests
def get_agol_token():
"""requests and returns an ArcGIS Token for the pre-registered application.
Client id and secrets are managed through the ArcGIS Developer's console.
"""
params = {
'client_id': app.config['ESRI_APP_CLIENT_ID'],
'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],
'grant_type': "client_credentials"
}
request = requests.get(
'https://www.arcgis.com/sharing/oauth2/token',
params=params
)
token = request.json()
print("AGOL token acquired: {0}".format(token))
return token
|
7b240ef57264c1a88f10f4c06c9492a71dac8c11
| 3,642,073
|
def default_validate(social_account):
"""
Функция по-умолчанию для ONESOCIAL_VALIDATE_FUNC. Ничего не делает.
"""
return None
|
634382dbfe64eeed38225f8dca7e16105c40f7c2
| 3,642,074
|
import requests
def pull_early_late_by_stop(line_number,SWIFTLY_API_KEY, dateRange, timeRange):
"""
Pulls from the Swiftly APIS to get OTP.
Follow the docs: http://dashboard.goswift.ly/vta/api-guide/docs/otp
"""
line_table = pd.read_csv('line_table.csv')
line_table.rename(columns={"DirNum":"direction_id","DirectionName":"DIRECTION_NAME"},inplace=True)
line_table['direction_id'] = line_table['direction_id'].astype(str)
headers = {'Authorization': SWIFTLY_API_KEY}
payload = {'agency': 'vta', 'route': line_number, 'dateRange': dateRange,'timeRange': timeRange, 'onlyScheduleAdherenceStops':'True'}
url = 'https://api.goswift.ly/otp/by-stop'
r = requests.get(url, headers=headers, params=payload)
try:
swiftly_df = pd.DataFrame(r.json()['data'])
swiftly_df.rename(columns={"stop_id":"STOP_ID"},inplace=True)
swiftly_df = pd.merge(swiftly_df,line_table.query('lineabbr==%s'%line_number)[['direction_id','DIRECTION_NAME']])
swiftly_df['STOP_ID'] = swiftly_df['STOP_ID'].astype(int)
return swiftly_df
except KeyError:
print(r.json())
|
a64ad2e5fe84ee5ab5a49c8122f28b693382cf8e
| 3,642,075
|
def create_build_job(user, project, config, code_reference):
"""Get or Create a build job based on the params.
If a build job already exists, then we check if the build has already an image created.
If the image does not exists, and the job is already done we force create a new job.
Returns:
tuple: (build_job, image_exists[bool], build_status[bool])
"""
build_job, rebuild = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference)
if build_job.succeeded and not rebuild:
# Check if image was built in less than an 6 hours
return build_job, True, False
if check_image(build_job=build_job):
# Check if image exists already
return build_job, True, False
if build_job.is_done:
build_job, _ = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference,
nocache=True)
if not build_job.is_running:
# We need to build the image first
auditor.record(event_type=BUILD_JOB_STARTED_TRIGGERED,
instance=build_job,
actor_id=user.id,
actor_name=user.username)
build_status = start_dockerizer(build_job=build_job)
else:
build_status = True
return build_job, False, build_status
|
879ed02f142326b4a793bef2d8bcbc1de4faf64c
| 3,642,076
|
import json
def create(ranger_client: RangerClient, config: str):
"""
Creates a new Apache Ranger service repository.
"""
return ranger_client.create_service(json.loads(config))
|
a53fa80f94960f89410a60aae04a04417491f332
| 3,642,077
|
def populate_glue_catalogue_from_metadata(table_metadata, db_metadata, check_existence = True):
"""
Take metadata and make requisite calls to AWS API using boto3
"""
database_name = db_metadata["name"]
database_description = ["description"]
table_name = table_metadata["table_name"]
tbl_def = metadata_to_glue_table_definition(table_metadata, db_metadata)
if check_existence:
try:
glue_client.get_database(Name=database_name)
except glue_client.exceptions.EntityNotFoundException:
overwrite_or_create_database(database_name, db_metadata["description"])
try:
glue_client.delete_table(DatabaseName=database_name, Name=table_name)
except glue_client.exceptions.EntityNotFoundException:
pass
return glue_client.create_table(
DatabaseName=database_name,
TableInput=tbl_def)
|
c09af0344b523213010af8fdbcc0ed35328f165e
| 3,642,078
|
def choose_komoot_tour_live():
"""
Login with user credentials, download tour information,
choose a tour, and download it. Can be passed to
:func:`komoog.gpx.convert_tour_to_gpx_tracks`
afterwards.
"""
tours, session = get_tours_and_session()
for idx in range(len(tours)):
print(f"({idx+1}) {tours[idx]['name']}")
tour_id = int(input("Tour ID: "))
tour_id -= 1
tour = get_tour(tours,tour_id,session)
return tour
|
3be625643d6861c9aaff910506dce53e3f336e40
| 3,642,079
|
def root():
"""
The root stac page links to each collection (product) catalog
"""
return _stac_response(
dict(
**stac_endpoint_information(),
links=[
dict(
title="Collections",
description="All product collections",
rel="children",
type="application/json",
href=url_for(".collections"),
),
dict(
title="Arrivals",
description="Most recently added items",
rel="child",
type="application/json",
href=url_for(".arrivals"),
),
dict(
title="Item Search",
rel="search",
type="application/json",
href=url_for(".stac_search"),
),
dict(rel="self", href=request.url),
# Individual Product Collections
*(
dict(
title=product.name,
description=product.definition.get("description"),
rel="child",
href=url_for(".collection", collection=product.name),
)
for product, product_summary in _model.get_products_with_summaries()
),
],
conformsTo=[
"https://api.stacspec.org/v1.0.0-beta.1/core",
"https://api.stacspec.org/v1.0.0-beta.1/item-search",
],
)
)
|
0904122654a1ce71264489590a2a1813dad31689
| 3,642,080
|
def recursive_dict_of_lists(d, helper=None, prev_key=None):
"""
Builds dictionary of lists by recursively traversing a JSON-like
structure.
Arguments:
d (dict): JSON-like dictionary.
prev_key (str): Prefix used to create dictionary keys like: prefix_key.
Passed by recursive step, not intended to be used.
helper (dict): In case d contains nested dictionaries, you can specify
a helper dictionary with 'key' and 'value' keys to specify where to
look for keys and values instead of recursive step. It helps with
cases like: {'action': {'type': 'step', 'amount': 1}}, by passing
{'key': 'type', 'value': 'amount'} as a helper you'd get
{'action_step': [1]} as a result.
"""
d_o_l = {}
if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys():
if prev_key is not None:
key = f"{prev_key}_{helper['key']}"
else:
key = helper['key']
if key not in d_o_l.keys():
d_o_l[key] = []
d_o_l[key].append(d[helper['value']])
return d_o_l
for k, v in d.items():
if isinstance(v, dict):
d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k))
else:
if prev_key is not None:
key = f'{prev_key}_{k}'
else:
key = k
if key not in d_o_l.keys():
d_o_l[key] = []
if isinstance(v, list):
d_o_l[key].extend(v)
else:
d_o_l[key].append(v)
return d_o_l
|
c615582febbd043adae6788585d004aabf1ac7e3
| 3,642,081
|
def same_shape(shape1, shape2):
"""
Checks if two shapes are the same
Parameters
----------
shape1 : tuple
First shape
shape2 : tuple
Second shape
Returns
-------
flag : bool
True if both shapes are the same (same length and dimensions)
"""
if len(shape1) != len(shape2):
return False
for i in range(len(shape1)):
if shape1[i] != shape2[i]:
return False
return True
|
9452f7973e510532cee587f2bf49a146fb8cc46e
| 3,642,082
|
def get_reference():
"""Get DrugBank references."""
return _get_model(drugbank.Reference)
|
b4d26c24559883253f3894a1c56151e1809e4050
| 3,642,083
|
import collections
def __parse_search_results(collected_results, raise_exception_finally):
"""
Parses locally the results collected from the __mapped_pattern_matching:
- list of ( scores, matched_intervals, unprocessed_interval,
rank_superwindow, <meta info>, <error_info>)
This parsing
- evaluates the errors: logs and raise IkatsException if raise_exception_finally is True
- evaluates, encodes the unprocessed intervals
:param collected_results: the results collected from the __mapped_pattern_matching
:type list
:param raise_exception_finally: flag is True when it is demanded to raise
IkatsException when errors exist in the collected fields <error_info>
:type raise_exception_finally: bool
:return: unprocessed_info: dict stocking unprocessed intervals [sd, ed] by superwindow rank
:rtype: dict
"""
# On the driver/manager
# - driver: local computing of the re-evaluated intervals:
# => set unprocessed_info
# => logs errors
unprocessed_info = {}
error_exists = False
for status_cell in collected_results:
# logs error (when needed)
error = status_cell[-1]
cell_has__errors = False
if isinstance(error, collections.Iterable) and len(error) > 0:
cell_has__errors = True
for error_item in error:
LOGGER.error(error_item)
error_exists = error_exists or cell_has__errors
# unprocessed_info[ <rank_superwindow> ] = <unprocessed_interval>
unprocessed_info[status_cell[3]] = status_cell[2]
LOGGER.debug("Unprocessed_info[rank=%s]=%s", status_cell[3], status_cell[2])
if raise_exception_finally and error_exists:
raise IkatsException("At least one error has been collected: see driver/manager logs")
return unprocessed_info
|
5e7bad5a745d76b6d0a9b45a3c36df846051c1bc
| 3,642,084
|
def decode_matrix_fbs(fbs):
"""
Given an FBS-encoded Matrix, return a Pandas DataFrame the contains the data and indices.
"""
matrix = Matrix.Matrix.GetRootAsMatrix(fbs, 0)
n_rows = matrix.NRows()
n_cols = matrix.NCols()
if n_rows == 0 or n_cols == 0:
return pd.DataFrame()
if matrix.RowIndexType() is not TypedArray.TypedArray.NONE:
raise ValueError("row indexing not supported for FBS Matrix")
columns_length = matrix.ColumnsLength()
columns_index = deserialize_typed_array((matrix.ColIndexType(), matrix.ColIndex()))
if columns_index is None:
columns_index = range(0, n_cols)
# sanity checks
if len(columns_index) != n_cols or columns_length != n_cols:
raise ValueError("FBS column count does not match number of columns in underlying matrix")
columns_data = {}
columns_type = {}
for col_idx in range(0, columns_length):
col = matrix.Columns(col_idx)
tarr = (col.UType(), col.U())
data = deserialize_typed_array(tarr)
columns_data[columns_index[col_idx]] = data
if len(data) != n_rows:
raise ValueError("FBS column length does not match number of rows")
if col.UType() is TypedArray.TypedArray.JSONEncodedArray:
columns_type[columns_index[col_idx]] = "category"
df = pd.DataFrame.from_dict(data=columns_data).astype(columns_type, copy=False)
# more sanity checks
if not df.columns.is_unique or len(df.columns) != n_cols:
raise KeyError("FBS column indices are not unique")
return df
|
d3ffdd5f0d74a6e07b47fac175dae4ad6035cda8
| 3,642,085
|
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for sensors."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
return True
|
eaeee6df6c8b632fb331884236f7b3b47d551683
| 3,642,086
|
def _all_lists_equal_lenght(values: t.List[t.List[str]]) -> bool:
"""
Tests to see if all the lengths of all the elements are the same
"""
for vn in values:
if len(values[0]) != len(vn):
return False
return True
|
9fd7db874658822d7a48d5f27340e0dfd5a2d177
| 3,642,087
|
def wind_shear(
shear: str, unit_alt: str = "ft", unit_wind: str = "kt", spoken: bool = False
) -> str:
"""Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt
"""
if not shear or "WS" not in shear or "/" not in shear:
return ""
shear = shear[2:].rstrip(unit_wind.upper()).split("/")
wdir = core.spoken_number(shear[1][:3], True) if spoken else shear[1][:3]
return f"Wind shear {int(shear[0])*100}{unit_alt} from {wdir} at {shear[1][3:]}{unit_wind}"
|
b7aaf5253e251393a508de2d8080a5b3458c45f6
| 3,642,088
|
def root_hash(hashes):
"""
Compute the root hash of a merkle tree with the given list of leaf hashes
"""
# the number of hashes must be a power of two
assert len(hashes) & (len(hashes) - 1) == 0
while len(hashes) > 1:
hashes = [sha256(l + r).digest() for l, r in zip(*[iter(hashes)] * 2)]
return hashes[0]
|
9036414c71e192a62968a9939d56f9523359c877
| 3,642,089
|
import json
def DumpStr(obj, pretty=False, newline=None, **json_dumps_kwargs):
"""Serialize a Python object to a JSON string.
Args:
obj: a Python object to be serialized.
pretty: True to output in human-friendly pretty format.
newline: True to append a newline in the end of result, default to the
previous argument ``pretty``.
json_dumps_kwargs: Any allowable arguments to json.dumps.
Returns:
The serialized JSON string.
"""
if newline is None:
newline = pretty
if pretty:
kwargs = dict(indent=2, separators=(',', ': '), sort_keys=True)
else:
kwargs = {}
kwargs.update(json_dumps_kwargs)
result = json.dumps(obj, **kwargs)
if newline:
result += '\n'
return result
|
97ed8c722d8d9e545f29214fbc8a817e6cf4ca1a
| 3,642,090
|
def snake_case(s: str):
"""
Transform into a lower case string with underscores between words.
Parameters
----------
s : str
Original string to transform.
Returns
-------
Transformed string.
"""
return _change_case(s, '_', str.lower)
|
c4dc65445e424101b3b5264c2f14e0aa0d7bcd22
| 3,642,091
|
def multiple_workers_thread(worker_fn, queue_capacity_input=1000, queue_capacity_output=1000, n_worker=3):
"""
:param worker_fn: lambda (tid, queue): pass
:param queue_capacity:
:param n_worker:
:return:
"""
threads = []
queue_input = Queue.Queue(queue_capacity_input)
queue_output = Queue.Queue(queue_capacity_output)
for i in range(n_worker):
t = threading.Thread(target=worker_fn, args=(i, queue_input, queue_output))
threads.append(t)
t.start()
print 'multiple_workers_thread: start %s sub-processes' % n_worker
return queue_input, queue_output, threads
|
b9a989404b6f7e3aa6b028af5e55719364c62fd8
| 3,642,092
|
from typing import List
from typing import Any
def reorder(list_1: List[Any]) -> List[Any]:
"""This function takes a list and returns it in sorted order"""
new_list: list = []
for ele in list_1:
new_list.append(ele)
temp = new_list.index(ele)
while temp > 0:
if new_list[temp - 1] > new_list[temp]:
new_list[temp - 1], new_list[temp] = new_list[temp], new_list[temp-1]
else:
break
temp = temp - 1
return new_list
|
2e7dad8fa138b1a9a140deab4223eea4a09cdf91
| 3,642,093
|
def is_extended_markdown(view):
"""True if the view contains 'Markdown Extended'
syntax'ed text.
"""
return view.settings().get("syntax").endswith(
"Markdown Extended.sublime-syntax")
|
5c870fd277910f6fa48f2b8ae0dfd304fdbddff0
| 3,642,094
|
from typing import Dict
from typing import List
from typing import Any
import random
import logging
def _generate_graph(rule_dict: Dict[int, List[PartRule]], upper_bound: int) -> Any:
"""
Create a new graph from the VRG at random
Returns None if the nodes in generated graph exceeds upper_bound
:return: newly generated graph
"""
node_counter = 1
new_g = LightMultiGraph()
new_g.add_node(0, label=0)
non_terminals = {0}
rule_ordering = [] # list of rule ids in the order they were fired
while len(non_terminals) > 0: # continue until no more non-terminal nodes
if new_g.order() > upper_bound: # early stopping
return None, None
node_sample = random.sample(non_terminals, 1)[0] # choose a non terminal node at random
lhs = new_g.nodes[node_sample]['label']
rhs_candidates = rule_dict[lhs]
if len(rhs_candidates) == 1:
rhs = rhs_candidates[0]
else:
weights = np.array([rule.frequency for rule in rhs_candidates])
weights = weights / np.sum(weights) # normalize into probabilities
idx = int(np.random.choice(range(len(rhs_candidates)), size=1, p=weights)) # pick based on probability
rhs = rhs_candidates[idx]
logging.debug(f'firing rule {rhs.id}, selecting node {node_sample} with label: {lhs}')
rule_ordering.append(rhs.id)
broken_edges = find_boundary_edges(new_g, {node_sample})
assert len(broken_edges) == lhs
new_g.remove_node(node_sample)
non_terminals.remove(node_sample)
nodes = {}
for n, d in rhs.graph.nodes(data=True): # all the nodes are internal
new_node = node_counter
nodes[n] = new_node
label = None
if 'label' in d: # if it's a new non-terminal add it to the set of non-terminals
non_terminals.add(new_node)
label = d['label']
node_color = None
if 'node_colors' in d.keys():
node_color = random.sample(d['node_colors'], 1)[0]
if label is None and node_color is None:
new_g.add_node(new_node, b_deg=d['b_deg'])
elif label is not None and node_color is None:
new_g.add_node(new_node, b_deg=d['b_deg'], label=label)
elif label is None and node_color is not None:
new_g.add_node(new_node, b_deg=d['b_deg'], node_color=node_color)
else:
new_g.add_node(new_node, b_deg=d['b_deg'], label=label, node_color=node_color)
node_counter += 1
# randomly assign broken edges to boundary edges
random.shuffle(broken_edges)
# randomly joining the new boundary edges from the RHS to the rest of the graph - uniformly at random
for n, d in rhs.graph.nodes(data=True):
num_boundary_edges = d['b_deg']
if num_boundary_edges == 0: # there are no boundary edges incident to that node
continue
assert len(broken_edges) >= num_boundary_edges
edge_candidates = broken_edges[:num_boundary_edges] # picking the first batch of broken edges
broken_edges = broken_edges[num_boundary_edges:] # removing them from future consideration
for e in edge_candidates: # each edge is either (node_sample, v) or (u, node_sample)
if len(e) == 2:
u, v = e
else:
u, v, d = e
if u == node_sample:
u = nodes[n]
else:
v = nodes[n]
logging.debug(f'adding broken edge ({u}, {v})')
if len(e) == 2:
new_g.add_edge(u, v)
else:
new_g.add_edge(u, v, attr_dict=d)
# adding the rhs to the new graph
for u, v, d in rhs.graph.edges(data=True):
#edge_multiplicity = rhs.graph[u][v]['weight'] #
edge_multiplicity = d['weight']
if 'edge_colors' in d.keys():
edge_color = random.sample(d['edge_colors'], 1)[0]
new_g.add_edge(nodes[u], nodes[v], weight=edge_multiplicity, edge_color=edge_color)
else:
new_g.add_edge(nodes[u], nodes[v], weight=edge_multiplicity)
logging.debug(f'adding RHS internal edge ({nodes[u]}, {nodes[v]}) wt: {edge_multiplicity}')
return new_g, rule_ordering
|
8bcc3c93c0ff7f1f895f3970b9aa16c7f6293e68
| 3,642,095
|
def match_command_to_alias(command, aliases, match_multiple=False):
"""
Match the text against an action and return the action reference.
"""
results = []
for alias in aliases:
formats = list_format_strings_from_aliases([alias], match_multiple)
for format_ in formats:
try:
extract_parameters(format_str=format_['representation'],
param_stream=command)
except ParseException:
continue
results.append(format_)
return results
|
74712b70cb5995c30c7948991d60954732e4bc16
| 3,642,096
|
def dan_acf(x, axis=0, fast=False):
"""
DFM's acf function
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m]
|
ec258af743184c09e1962f1445ec6971b4d0cfab
| 3,642,097
|
import re
def set_selenium_local_session(proxy_address,
proxy_port,
proxy_username,
proxy_password,
proxy_chrome_extension,
headless_browser,
use_firefox,
browser_profile_path,
disable_image_load,
page_delay,
logger):
"""Starts local session for a selenium server.
Default case scenario."""
browser = None
err_msg = ''
if use_firefox:
firefox_options = Firefox_Options()
if headless_browser:
firefox_options.add_argument('-headless')
if browser_profile_path is not None:
firefox_profile = webdriver.FirefoxProfile(
browser_profile_path)
else:
firefox_profile = webdriver.FirefoxProfile()
# set English language
firefox_profile.set_preference('intl.accept_languages', 'en')
if disable_image_load:
# permissions.default.image = 2: Disable images load,
# this setting can improve pageload & save bandwidth
firefox_profile.set_preference('permissions.default.image', 2)
if proxy_address and proxy_port:
firefox_profile.set_preference('network.proxy.type', 1)
firefox_profile.set_preference('network.proxy.http',
proxy_address)
firefox_profile.set_preference('network.proxy.http_port',
proxy_port)
firefox_profile.set_preference('network.proxy.ssl',
proxy_address)
firefox_profile.set_preference('network.proxy.ssl_port',
proxy_port)
browser = webdriver.Firefox(firefox_profile=firefox_profile,
options=firefox_options)
# converts to custom browser
# browser = convert_selenium_browser(browser)
# authenticate with popup alert window
if (proxy_username and proxy_password):
proxy_authentication(browser,
logger,
proxy_username,
proxy_password)
else:
chromedriver_location = get_chromedriver_location()
chrome_options = Options()
chrome_options.add_argument('--mute-audio')
chrome_options.add_argument('--dns-prefetch-disable')
chrome_options.add_argument('--lang=en-US')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_argument('--no-sandbox')
# this option implements Chrome Headless, a new (late 2017)
# GUI-less browser. chromedriver 2.9 and above required
if headless_browser:
chrome_options.add_argument('--headless')
if disable_image_load:
chrome_options.add_argument(
'--blink-settings=imagesEnabled=false')
# replaces browser User Agent from "HeadlessChrome".
user_agent = "Chrome"
chrome_options.add_argument('user-agent={user_agent}'
.format(user_agent=user_agent))
capabilities = DesiredCapabilities.CHROME
# Proxy for chrome
if proxy_address and proxy_port:
prox = Proxy()
proxy = ":".join([proxy_address, str(proxy_port)])
if headless_browser:
chrome_options.add_argument(
'--proxy-server=http://{}'.format(proxy))
else:
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = proxy
prox.socks_proxy = proxy
prox.ssl_proxy = proxy
prox.add_to_capabilities(capabilities)
# add proxy extension
if proxy_chrome_extension and not headless_browser:
chrome_options.add_extension(proxy_chrome_extension)
# using saved profile for chrome
if browser_profile_path is not None:
chrome_options.add_argument(
'user-data-dir={}'.format(browser_profile_path))
chrome_prefs = {
'intl.accept_languages': 'en-US',
}
if disable_image_load:
chrome_prefs['profile.managed_default_content_settings.images'] = 2
chrome_options.add_experimental_option('prefs', chrome_prefs)
try:
browser = webdriver.Chrome(chromedriver_location,
desired_capabilities=capabilities,
chrome_options=chrome_options)
# gets custom instance
# browser = convert_selenium_browser(browser)
except WebDriverException as exc:
logger.exception(exc)
err_msg = 'ensure chromedriver is installed at {}'.format(
Settings.chromedriver_location)
return browser, err_msg
# prevent: Message: unknown error: call function result missing 'value'
matches = re.match(r'^(\d+\.\d+)',
browser.capabilities['chrome'][
'chromedriverVersion'])
if float(matches.groups()[0]) < Settings.chromedriver_min_version:
err_msg = 'chromedriver {} is not supported, expects {}+'.format(
float(matches.groups()[0]), Settings.chromedriver_min_version)
return browser, err_msg
browser.implicitly_wait(page_delay)
message = "Session started!"
highlight_print('browser', message, "initialization", "info", logger)
print('')
return browser, err_msg
|
4f70baeea220c8e4e21097a5a9d9d54a47f55c2e
| 3,642,098
|
def match():
"""Show a timer of the match length and an upload button"""
player_west = request.form['player_west']
player_east = request.form['player_east']
start_time = dt.datetime.now().strftime('%Y%m%d%H%M')
# generate filename to save video to
filename = '{}_vs_{}_{}.h264'.format(player_west, player_east, start_time)
GAMESAVER.filename = filename
GAMESAVER.start_recording(filename)
return render_template('match.html',
player_west=player_west,
player_east=player_east,
filename=filename)
|
7d2e51ddfaafdff903a31b5662093ab423d8f3a1
| 3,642,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.