content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def load_data_file(filename):
"""loads a single file into a DataFrame"""
regexp = '^.*/results/([^/]+)/([^/]+)/([^/]+).csv$'
optimizer, blackbox, seed = re.match(regexp, filename).groups()
f = ROOT + '/results/{}/{}/{}.csv'.format(optimizer, blackbox, seed)
result = np.genfromtxt(f, delimiter=',')
return get_best(result)
| 22,700
|
def get_hub_manager():
"""Generate Hub plugin structure"""
global _HUB_MANAGER
if not _HUB_MANAGER:
_HUB_MANAGER = _HubManager(_plugins)
return _HUB_MANAGER
| 22,701
|
def extract_stimtype(
data: pd.DataFrame, stimtype: str, columns: list
) -> pd.DataFrame:
"""
Get trials with matching label under stimType
"""
if stimtype not in accept_stimtype:
raise ValueError(f"invalid {stimtype}, only accept {accept_stimtype}")
get = columns.copy()
get += ["participant_id"]
get += [i for i in identity_entity if i in data.columns]
stimresp = data.query(f"stimType == '{stimtype}'")
return stimresp.loc[:, get]
| 22,702
|
def compute_rank_clf_loss(hparams, scores, labels, group_size, weight):
"""
Compute ranking/classification loss
Note that the tfr loss is slightly different than our implementation: the tfr loss is sum over all loss and
devided by number of queries; our implementation is sum over all loss and devided by the number of larger than
0 labels.
"""
# Classification loss
if hparams.num_classes > 1:
labels = tf.cast(labels, tf.int32)
labels = tf.squeeze(labels, -1) # Last dimension is max_group_size, which should be 1
return tf.losses.sparse_softmax_cross_entropy(logits=scores, labels=labels, weights=weight)
# Expand weight to [batch size, 1] so that in inhouse ranking loss it can be multiplied with loss which is
# [batch_size, max_group_size]
expanded_weight = tf.expand_dims(weight, axis=-1)
# Ranking losses
# tf-ranking loss
if hparams.use_tfr_loss:
weight_name = "weight"
loss_fn = tfr.losses.make_loss_fn(hparams.tfr_loss_fn, lambda_weight=hparams.tfr_lambda_weights,
weights_feature_name=weight_name)
loss = loss_fn(labels, scores, {weight_name: expanded_weight})
return loss
# our own implementation
if hparams.ltr_loss_fn == 'pairwise':
lambdarank = LambdaRank()
pairwise_loss, pairwise_mask = lambdarank(scores, labels, group_size)
loss = tf.reduce_sum(tf.reduce_sum(pairwise_loss, axis=[1, 2]) * expanded_weight) / tf.reduce_sum(pairwise_mask)
elif hparams.ltr_loss_fn == 'softmax':
loss = compute_softmax_loss(scores, labels, group_size) * expanded_weight
is_positive_label = tf.cast(tf.greater(labels, 0), dtype=tf.float32)
loss = tf.div_no_nan(tf.reduce_sum(loss), tf.reduce_sum(is_positive_label))
elif hparams.ltr_loss_fn == 'pointwise':
loss = compute_sigmoid_cross_entropy_loss(scores, labels, group_size) * expanded_weight
loss = tf.reduce_mean(loss)
else:
raise ValueError('Currently only support pointwise/pairwise/softmax/softmax_cls.')
return loss
| 22,703
|
def get_org_image_url(url, insert_own_log=False):
""" liefert gegebenenfalls die URL zum Logo der betreffenden Institution """
#n_pos = url[7:].find('/') # [7:] um http:// zu ueberspringen
#org_url = url[:n_pos+7+1] # einschliesslich '/'
item_containers = get_image_items(ELIXIER_LOGOS_PATH)
image_url = image_url_url = ''
image_url_extern = True
for ic in item_containers:
arr = string.splitfields(ic.item.sub_title, '|')
for a in arr:
b = a.strip()
if b != '' and url.find(b) >= 0:
image_url = ELIXIER_LOGOS_URL + ic.item.name
image_url_url = ic.item.title
image_url_extern = True
break
if image_url != '':
break
if insert_own_log and image_url == '':
image_url = EDUFOLDER_INST_LOGO_URL
image_url_url = get_base_site_url()
image_url_extern = False
return image_url, image_url_url, image_url_extern
| 22,704
|
async def download_page(url, file_dir, file_name, is_binary=False):
"""
Fetch URL and save response to file
Args:
url (str): Page URL
file_dir (pathlib.Path): File directory
file_name (str): File name
is_binary (bool): True if should download binary content (e.g. images)
Returns:
HttpResponse: HTTP response content and extension
"""
response = await fetch(url, is_binary)
path = file_dir.joinpath('{}{}'.format(file_name, response.ext))
try:
with ThreadPoolExecutor() as pool:
await asyncio.get_running_loop().run_in_executor(
pool, write_file, str(path), is_binary, response.content
)
except OSError:
logging.error('Can\'t save file: {}'.format(path))
return response
| 22,705
|
def normal_coffee():
"""
when the user decides to pick a normal or large cup of coffee
:return: template that explains how to make normal coffee
"""
return statement(render_template('explanation_large_cup', product='kaffee'))
| 22,706
|
def _transitive_closure_dense_numpy(A, kind='metric', verbose=False):
"""
Calculates Transitive Closure using numpy dense matrix traversing.
"""
C = A.copy()
n, m = A.shape
# Check if diagonal is all zero
if sum(np.diagonal(A)) > 0:
raise ValueError("Diagonal has to be zero for matrix computation to be correct")
# Compute Transitive Closure
for i in range(0, n):
if verbose:
print('calc row:', i + 1, 'of', m)
for j in range(0, n):
if kind == 'metric':
vec = C[i, :] + C[:, j]
C[i, j] = vec.min()
elif kind == 'ultrametric':
vec = np.maximum(C[i, :], C[:, j])
C[i, j] = vec.min()
return np.array(C)
| 22,707
|
def _walk_to_root(path):
"""
Yield directories starting from the given directory up to the root
"""
if not os.path.exists(path): # pragma: no cover
raise IOError('Starting path not found')
if os.path.isfile(path): # pragma: no cover
path = os.path.dirname(path)
last_dir = None
current_dir = os.path.abspath(path)
while last_dir != current_dir:
yield current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
last_dir, current_dir = current_dir, parent_dir
| 22,708
|
def convert_date(raw_dates: pd.Series) -> pd.Series:
"""Automatically converts series containing raw dates
to specific format.
Parameters
----------
raw_dates:
Series to be converted.
Returns
-------
Optimized pandas series.
"""
raw_dates = pd.to_datetime(raw_dates, utc=True)
return raw_dates
| 22,709
|
def _feed_subs_to_queue(monitored_sub, queue):
"""Stream all submissions and put them into the queue.
Submissions are stored in a tuple with a 0.
Error logging should be established as needed."""
subreddit = reddit.subreddit(monitored_sub)
while True:
try:
for sub in subreddit.stream.submissions():
queue.append((sub, 0))
except Exception as e:
print('Submission stream error. Resetting')
print(e)
continue
| 22,710
|
async def get_scorekeeper_by_id(scorekeeper_id: conint(ge=0, lt=2**31)):
"""Retrieve a Scorekeeper object, based on Scorekeeper ID,
containing: Scorekeeper ID, name, slug string, and gender."""
try:
scorekeeper = Scorekeeper(database_connection=_database_connection)
scorekeeper_info = scorekeeper.retrieve_by_id(scorekeeper_id)
if not scorekeeper_info:
raise HTTPException(status_code=404,
detail=f"Scorekeeper ID {scorekeeper_id} not found")
else:
return scorekeeper_info
except ValueError:
raise HTTPException(status_code=404,
detail=f"Scorekeeper ID {scorekeeper_id} not found")
except ProgrammingError:
raise HTTPException(status_code=500,
detail="Unable to retrieve scorekeeper information")
except DatabaseError:
raise HTTPException(status_code=500,
detail="Database error occurred while trying to "
"retrieve scorekeeper information")
| 22,711
|
def check_spelling(data, header_to_print=None):
"""Checks spelling via Yandex.Speller API"""
try:
p = Popen(['./yasp'], stdin=PIPE, stdout=PIPE, encoding='UTF-8')
p.stdin.write(data)
output = p.communicate()[0]
if output:
if header_to_print:
print(header_to_print)
print(output, end='')
print('-' * 20)
p.stdin.close()
except Exception as e:
print('Cannot communicate with '
'Yandes Speller API, skipped: %s' % e)
| 22,712
|
def modulePath():
"""
This will get us the program's directory, even if we are frozen
using py2exe
"""
try:
_ = sys.executable if weAreFrozen() else __file__
except NameError:
_ = inspect.getsourcefile(modulePath)
return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(_))))
| 22,713
|
def get_time(sec_scale):
"""time since epoch in milisecond
"""
if sec_scale == 'sec':
scale = 0
elif sec_scale == 'msec':
scale = 3
else:
raise
secs = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
return int(secs * pow(10, scale))
| 22,714
|
def get_json(url):
"""
Function that retrieves a json from a given url.
:return: the json that was received
"""
with urllib.request.urlopen(url) as response:
data = response.readall().decode('utf-8')
data = json.loads(data)
return data
| 22,715
|
def _defaultChangeProvider(variables,wf):
""" by default we just forword the message to the change provider """
return variables
| 22,716
|
def evaluate_surface_derivatives(surface, params, order=1):
"""
"""
if surface.rational:
control_points = np.array(surface.weighted_control_points)
else:
control_points = np.array(surface.control_points)
degree_u, degree_v = surface.degree
knot_vector_u, knot_vector_v = surface.knot_vector
count_u, count_v = surface.count
params_u = [p[0] for p in params]
params_v = [p[1] for p in params]
spans_u = find_spans(knot_vector_u, count_u, params_u)
bases_u = basis_functions_derivatives(degree_u, knot_vector_u, spans_u, params_u, order)
spans_v = find_spans(knot_vector_v, count_v, params_v)
bases_v = basis_functions_derivatives(degree_v, knot_vector_v, spans_v, params_v, order)
dv = min(degree_v, order)
derivatives = []
for span_u, basis_u, span_v, basis_v in zip(spans_u, bases_u, spans_v, bases_v):
b = control_points[span_u - degree_u:span_u + 1, span_v - degree_v:span_v + 1]
temp = np.dot(b.T, np.array(basis_u).T).T
dd = min(order, dv)
SKL = np.dot(np.array(basis_v[:dd + 1]), temp[:degree_v + 1]).transpose(1, 0, 2)
derivatives.append(SKL)
if not surface.rational:
return np.array(derivatives)
else:
# TODO: numpify this!
D = []
for SKLw in derivatives:
dimension = 4
SKL = [[[0.0 for _ in range(dimension)] for _ in range(order + 1)] for _ in range(order + 1)]
for k in range(0, order + 1):
for l in range(0, order + 1): # noqa E741
v = list(SKLw[k, l])[:]
for j in range(1, l + 1):
v[:] = [tmp - (binomial_coefficient(l, j) * SKLw[0][j][-1] * drv) for tmp, drv in zip(v, SKL[k][l - j])]
for i in range(1, k + 1):
v[:] = [tmp - (binomial_coefficient(k, i) * SKLw[i][0][-1] * drv) for tmp, drv in zip(v, SKL[k - i][l])]
v2 = [0.0 for _ in range(dimension - 1)]
for j in range(1, l + 1):
v2[:] = [tmp + (binomial_coefficient(l, j) * SKLw[i][j][-1] * drv) for tmp, drv in zip(v2, SKL[k - i][l - j])]
v[:] = [tmp - (binomial_coefficient(k, i) * tmp2) for tmp, tmp2 in zip(v, v2)]
SKL[k][l][:] = [tmp / SKLw[0][0][-1] for tmp in v[0:(dimension - 1)]]
D.append(SKL)
return np.array(D)
| 22,717
|
def GPPrediction(y_train, X_train, T_train, eqid_train, sid_train = None, lid_train = None,
X_new = None, T_new = None, eqid_new = None, sid_new = None, lid_new = None,
dc_0 = 0.,
Tid_list = None, Hyp_list = None, phi_0 = None, tau_0 = None,
sigma_s = None, sigma_e = None):
"""
Make ground motion predictions at new locations conditioned on the training data
Parameters
----------
y_train : np.array(n_train_pt)
Array with ground-motion observations associated with training data
X_train : np.array(n_train_pt, n_dim)
Design matrix for training data.
T_train : np.array(n_train_pt, 2x n_coor)
Coordinates matrix for training data.
eqid_train : np.array(n_train_pt)
Earthquake IDs for training data.
sid_train : np.array(n_train_pt), optional
Station IDs for training data. The default is None.
lid_train : np.array(n_train_pt), optional
Source IDs for training data. The default is None.
X_new : np.array(n_new_pt, n_dim), optional
Desing matrix for predictions. The default is None.
T_new : np.array(n_new_pt, 2 x n_coor), optional
Coordinate matrix for predictions. The default is None.
eqid_new : np.array(n_new_pt), optional
Earthquake IDs for predictions. The default is None.
sid_new : np.array(n_new_pt), optional
Station IDs for predictions. The default is None.
lid_new : np.array(n_new_pt), optional
Source IDs for predictions. The default is None.
dc_0 : float, optional
Mean offset. The default is zero.
Tid_list : n_dim list
List to specify the coordinate pair or each dimension.
Hyp_list : TYPE, optional
List of hyper-parameters for each dimension of the covariance fuction.
phi_0 : double
Within-event standard deviation.
tau_0 : double
Between-event standard deviation.
sigma_s : double, optional
Standard deviation for zero correlation site-to-site term. The default is None.
sigma_e : double, optional
Standard deviation for zero correlation source-to-source term. The default is None.
Returns
-------
np.array(n_new_pt)
median estimate of new predictions.
np.array(n_new_pt, n_new_pt)
epistemic uncertainty of new predictions.
"""
#import pdb; pdb.set_trace()
#remove mean offset from conditioning data
y_train = y_train - dc_0
#number of grid nodes
n_pt_train = X_train.shape[0]
n_pt_new = X_new.shape[0]
#initialize covariance matrices
cov_data = np.zeros([n_pt_train,n_pt_train])
cov_star = np.zeros([n_pt_new,n_pt_train])
cov_star2 = np.zeros([n_pt_new,n_pt_new])
#create covariance matrices
for k, (hyp, tid) in enumerate(zip(Hyp_list,Tid_list)):
#covariance between train data
cov_data += CreateCovMaternDimX(X_train[:,k], X_train[:,k],
T_train[tid], T_train[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 1e-6)
#covariance between train data and predictions
cov_star += CreateCovMaternDimX(X_new[:,k], X_train[:,k],
T_new[tid], T_train[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 0)
#covariance between prediction data
cov_star2 += CreateCovMaternDimX(X_new[:,k], X_new[:,k],
T_new[tid], T_new[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 1e-6)
#add site to site systematic effects if sigma_s is specified
if not (sigma_s is None):
assert(not(sid_train is None)), 'Error site id for training data not specified'
cov_data += CreateCovS2S(sid_train, sid_train, sigma_s, delta = 1e-6)
#add source to source systematic effects if phi_L2L is specified
if not (sigma_e is None):
assert(not(lid_train is None)), 'Error location id for training data not specified'
cov_data += CreateCovL2L(lid_train, lid_train, sigma_e, delta = 1e-6)
#add between and within event covariance matrices
cov_data += CreateCovWe(eqid_train, eqid_train, phi_0)
cov_data += CreateCovBe(eqid_train, eqid_train, tau_0, delta = 1e-6)
#consider site to site systematic effects in predictions if sigma_s is specified
if not ( (sigma_s is None) or (sid_new is None)):
cov_star2 += CreateCovS2S(sid_new, sid_new, sigma_s, delta = 1e-6)
cov_star += CreateCovS2S(sid_new, sid_train, sigma_s)
#consider site to site systematic effects in predictions if sigma_s is specified
if not ( (sigma_e is None) or (lid_new is None)):
cov_star2 += CreateCovL2L(lid_new, lid_new, sigma_e, delta = 1e-6)
cov_star += CreateCovL2L(lid_new, lid_train, sigma_e)
#consider earthquake aleatory terms if eqid_new is specified
if not (eqid_new is None):
cov_star2 += CreateCovBe(eqid_new, eqid_new, tau_0, delta = 1e-6)
cov_star += CreateCovBe(eqid_new, eqid_train, tau_0)
#posterior mean and variance at new locations
y_new_mu = cov_star.dot(linalg.solve(cov_data, y_train))
#add mean offset to new predictions
y_new_mu = y_new_mu + dc_0
y_new_cov = cov_star2 - cov_star.dot(linalg.solve(cov_data, cov_star.transpose()))
#posterior standard dev. at new locations
y_new_sig = np.sqrt(np.diag(y_new_cov))
return y_new_mu.flatten(), y_new_sig.flatten(), y_new_cov
| 22,718
|
def zdotu(x, y):
"""
This function computes the complex scalar product \M{x^T y} for the
vectors x and y, returning the result.
"""
return _gslwrap.gsl_blas_zdotu(x, y, 1j)
| 22,719
|
def music21_to_chord_duration(p, key):
"""
Takes in a Music21 score, and outputs three lists
List for chords (by primeFormString string name)
List for chord function (by romanNumeralFromChord .romanNumeral)
List for durations
"""
p_chords = p.chordify()
p_chords_o = p_chords.flat.getElementsByClass('Chord')
chord_list = []
chord_function_list = []
duration_list = []
for ch in p_chords_o:
duration_list.append(ch.duration.quarterLength)
ch.closedPosition(forceOctave=4, inPlace=True)
rn = roman.romanNumeralFromChord(ch, key)
rp = rn.pitches
rp_names = ",".join([pi.name + pi.unicodeNameWithOctave[-1] for pi in rp])
chord_list.append(rp_names)
chord_function_list.append(rn.figure)
return chord_list, chord_function_list, duration_list
| 22,720
|
def city_country(city, country, population=''):
"""Generate a neatly formatted city/country name."""
full_name = city + ', ' + country
if population:
return full_name.title() + ' - population ' + str(population)
else:
return full_name.title()
| 22,721
|
def generate_headermap(line,startswith="Chr", sep="\t"):
"""
>>> line = "Chr\\tStart\\tEnd\\tRef\\tAlt\\tFunc.refGene\\tGene.refGene\\tGeneDetail.refGene\\tExonicFunc.refGene\\tAAChange.refGene\\tsnp138\\tsnp138NonFlagged\\tesp6500siv2_ea\\tcosmic70\\tclinvar_20150629\\tOtherinfo"
>>> generate_headermap(line)
{'Chr': 0, 'Start': 1, 'End': 2, 'Ref': 3, 'Alt': 4, 'Func.refGene': 5, 'Gene.refGene': 6, 'GeneDetail.refGene': 7, 'ExonicFunc.refGene': 8, 'AAChange.refGene': 9, 'snp138': 10, 'snp138NonFlagged': 11, 'esp6500siv2_ea': 12, 'cosmic70': 13, 'clinvar_20150629': 14, 'Otherinfo': 15}
"""
if not line.startswith(startswith):
raise Exception("Header line should start with \"{0}\"".format(startswith))
else:
if line.startswith("#"):
line = line[1:]
return dict([(v, i) for i,v in enumerate(line.rstrip().split(sep))])
| 22,722
|
def max(q):
"""Return the maximum of an array or maximum along an axis.
Parameters
----------
q : array_like
Input data
Returns
-------
array_like
Maximum of an array or maximum along an axis
"""
if isphysicalquantity(q):
return q.__class__(np.max(q.value), q.unit)
else:
return np.max(q)
| 22,723
|
def make_failure_log(conclusion_pred, premise_preds, conclusion, premises,
coq_output_lines=None):
"""
Produces a dictionary with the following structure:
{"unproved sub-goal" : "sub-goal_predicate",
"matching premises" : ["premise1", "premise2", ...],
"raw sub-goal" : "conclusion",
"raw premises" : ["raw premise1", "raw premise2", ...]}
Raw sub-goal and raw premises are the coq lines with the premise
internal name and its predicates. E.g.
H : premise (Acc x1)
Note that this function is not capable of returning all unproved
sub-goals in coq's stack. We only return the top unproved sub-goal.
"""
failure_log = OrderedDict()
conclusion_base = denormalize_token(conclusion_pred)
# failure_log["unproved sub-goal"] = conclusion_base
premises_base = [denormalize_token(p) for p in premise_preds]
# failure_log["matching premises"] = premises_base
# failure_log["raw sub-goal"] = conclusion
# failure_log["raw premises"] = premises
premise_preds = []
for p in premises:
try:
pred = p.split()[2]
except:
continue
if pred.startswith('_'):
premise_preds.append(denormalize_token(pred))
failure_log["all_premises"] = premise_preds
failure_log["other_sub-goals"] = get_subgoals_from_coq_output(
coq_output_lines, premises)
failure_log["other_sub-goals"].insert(0, {
'subgoal': conclusion_base,
'index': 1,
'raw_subgoal': conclusion,
'matching_premises' : premises_base,
'matching_raw_premises' : premises_base})
failure_log["type_error"] = has_type_error(coq_output_lines)
failure_log["open_formula"] = has_open_formula(coq_output_lines)
return failure_log
| 22,724
|
def request(url=None, json=None, parser=lambda x: x, encoding=None, **kwargs):
"""
:param url:
:param json:
:param parser: None 的时候返回r,否则返回 parser(r.json())
:param kwargs:
:return:
"""
method = 'post' if json is not None else 'get' # 特殊情况除外
logger.info(f"Request Method: {method}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE '
}
r = requests.request(method, url, json=json, headers=headers)
r.encoding = encoding if encoding else r.apparent_encoding
if parser is None:
return r
return parser(r.json())
| 22,725
|
def dump_all(dashboard_dir):
"""dump dashboard to specific dir with fhs"""
if not os.path.exists(dashboard_dir):
print("dump: create dashboard dir %s" % dashboard_dir)
os.mkdir(dashboard_dir)
dbmeta = list_dashboards()
folders = set([i.get('folderUid') for i in dbmeta if 'folderUid' in i and i.get('type') != 'dash-folder'])
dashdbs = [(i.get('uid'), i.get('folderUid', '.')) for i in dbmeta if
i.get('type') != 'dash-folder']
for d in folders:
abs_path = os.path.join(dashboard_dir, d)
if os.path.isfile(abs_path):
os.remove(abs_path)
if not os.path.exists(abs_path):
print("dump: %s / dir created" % d)
os.mkdir(abs_path)
print("dump: create dir %s" % d)
for uid, folder in dashdbs:
dbpath = os.path.join(dashboard_dir, folder, uid + '.json')
if folder == "." or not folder:
dbpath = os.path.join(dashboard_dir, uid + '.json')
print("dump: %s / %s \t ---> %s" % (folder, uid, dbpath))
dump_dashboard_to_file(get_dashboard(uid), dbpath)
| 22,726
|
def log_density_igaussian(z, z_var):
"""Calculate log density of zero-mean isotropic gaussian distribution given z and z_var."""
assert z.ndimension() == 2
assert z_var > 0
z_dim = z.size(1)
return -(z_dim/2)*math.log(2*math.pi*z_var) + z.pow(2).sum(1).div(-2*z_var)
| 22,727
|
def denom(r,E,J,model):
"""solve the denominator"""
ur = model.potcurve(r)#model.potcurve[ (abs(r-model.rcurve)).argmin()]
return 2.0*(E-ur)*r*r - J*J;
| 22,728
|
def approximate_gaussians(confidence_array, mean_array, variance_array):
""" Approximate gaussians with given parameters with one gaussian.
Approximation is performed via minimization of Kullback-Leibler
divergence KL(sum_{j} w_j N_{mu_j, sigma_j} || N_{mu, sigma}).
Parameters
----------
confidence_array : ndarray(num_gaussians)
confidence values for gaussians.
mean_array : ndarray(num_gaussians, 3)
(z,y,x) mean values for input gaussians.
variance_array : ndarray(num_gaussians)
(z,y,x) variances for input gaussians.
Returns
-------
tuple(ndarray(3), ndarray(3))
mean and sigma for covering gaussian.
"""
delimiter = np.sum(confidence_array)
mu = np.sum(mean_array.T * confidence_array, axis=1) / delimiter
sigma = np.sqrt(np.sum((variance_array + (mean_array - mu) ** 2).T
* confidence_array, axis=1) / delimiter)
return mu, sigma
| 22,729
|
def return_loss(apply_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray],
steps: types.Transition):
"""Loss wrapper for ReturnMapper.
Args:
apply_fn: applies a transition model (o_t, a_t) -> (o_t+1, r), expects the
leading axis to index the batch and the second axis to index the
transition triplet (t-1, t, t+1).
steps: RLDS dictionary of transition triplets as prepared by
`rlds_loader.episode_to_timestep_batch`.
Returns:
A scalar loss value as jnp.ndarray.
"""
observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...],
steps.observation)
action_t = steps.action[:, dataset.CURRENT, ...]
n_step_return_t = steps.extras[dataset.N_STEP_RETURN][:, dataset.CURRENT, ...]
predicted_n_step_return_t = apply_fn(observation_t, action_t)
return mse(predicted_n_step_return_t, n_step_return_t)
| 22,730
|
def part_two(data):
"""Part two"""
array = ['a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
commands = data.split(',')
for _ in range(1000000000 % 30):
dance(array, commands)
return ''.join(map(str, array))
| 22,731
|
def ts_declare():
"""Makes an f5-telemetry-streaming declaration from the supplied metadata"""
if is_rest_worker('/mgmt/shared/telemetry/declare') and os.path.isfile(
TS_DECLARATION_FILE):
tsdf = open(TS_DECLARATION_FILE, 'r')
declaration = tsdf.read()
tsdf.close()
json.loads(declaration)
d_url = 'http://localhost:8100/mgmt/shared/telemetry/declare'
LOG.debug('POST f5-telemetry-streaming declaration')
response = requests.post(d_url, auth=('admin', ''), data=declaration)
# initial request
if response.status_code < 400:
return True
LOG.error('f5-telemetry-streaming declaration failed %s - %s',
response.status_code, response.text)
return False
LOG.error(
'f5-telemetry-streaming worker not installed or declaration missing')
return False
| 22,732
|
def readCSVPremadeGroups(filename, studentProperties=None):
"""studentProperties is a list of student properties in the order they appear in the CSV.
For example, if a CSV row (each group is a row) is as follows: "Rowan Wilson, rowan@harvard.edu, 1579348, Bob Tilano, bob@harvard.edu, 57387294"
Then the format is: fullname, email, huid, fullname, email, huid, ...
Thus, studentProperties = ['fullname', 'email', 'huid']
"""
csv = _readCSV(filename)
# Create studentProperties if needed
if studentProperties == None:
studentProperties = []
firstHeader = None
for header in csv['headers']:
header = _keepOnlyLetters(header).lower()
if firstHeader == header:
# Found beginning of repeating sequence
break
if firstHeader == None:
firstHeader = header
studentProperties.append(header)
# Pull groups from CSV data
groups = []
for row in csv['data']:
students = []
currStudent = None
for i in range(len(row)):
if len(row[i].strip()) == 0:
break
propIndex = i % len(studentProperties)
if propIndex == 0:
# Just starting a new student
currStudent = {}
else:
currStudent[studentProperties[propIndex]] = row[i]
if propIndex == len(studentProperties) - 1:
# Just finished adding properties to a student
students.append(currStudent)
if len(students) > 0:
groups.append(students)
return groups
| 22,733
|
def random_contrast(video, lower, upper, seed=None):
"""Adjust the contrast of an image or images by a random factor.
Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_contrast`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_contrast(x, 0.2, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError("upper must be > lower.")
if lower < 0:
raise ValueError("lower must be non-negative.")
contrast_factor = tf.random.random_uniform([], lower, upper, seed=seed)
return adjust_contrast(video, contrast_factor)
| 22,734
|
def load(df: DataFrame, config: Dict, logger) -> bool:
"""Write data in final destination
:param df: DataFrame to save.
:type df: DataFrame
:param config: job configuration
:type config: Dict
:param logger: Py4j Logger
:type logger: Py4j.Logger
:return: True
:rtype: bool
"""
df.write.save(path=config['output_path'], mode='overwrite')
return True
| 22,735
|
def phase_correct_zero(spec, phi):
"""
Correct the phases of a spectrum by phi radians
Parameters
----------
spec : float array of complex dtype
The spectrum to be corrected.
phi : float
Returns
-------
spec : float array
The phase corrected spectrum
Notes
-----
[Keeler2005] Keeler, J (2005). Understanding NMR Spectroscopy, 2nd
edition. Wiley. Page 88.
"""
c_factor = np.exp(-1j * phi)
# If it's an array, we need to reshape it and broadcast across the
# frequency bands in the spectrum. Otherwise, we assume it's a scalar and
# apply it to all the dimensions of the spec array:
if hasattr(phi, 'shape'):
c_factor = c_factor.reshape(c_factor.shape + (1,))
return spec * c_factor
| 22,736
|
def upsample(s, n, phase=0):
"""Increase sampling rate by integer factor n with included offset phase.
"""
return np.roll(np.kron(s, np.r_[1, np.zeros(n-1)]), phase)
| 22,737
|
def close_connection(exception):
"""
Close DB connection
"""
db = getattr(g, "_database", None)
if db is not None:
db.close()
| 22,738
|
def _build_plugin_out(name, outdir, options, builder):
"""Build the --{lang}_out argument for a given plugin."""
arg = outdir
if options:
arg = ",".join(options) + ":" + arg
builder["args"] += ["--%s_out=%s" % (name, arg)]
| 22,739
|
def parse_idx_inp(idx_str):
""" parse idx string
"""
idx_str = idx_str.strip()
if idx_str.isdigit():
idxs = [int(idx_str)]
if '-' in idx_str:
[idx_begin, idx_end] = idx_str.split('-')
idxs = list(range(int(idx_begin), int(idx_end)+1))
return idxs
| 22,740
|
def toggl_request_get(url: str, params: dict = False) -> requests.Response:
"""Send a GET request to specified url using toggl headers and configured auth"""
headers = {"Content-Type": "application/json"}
auth = (CONFIG["toggl"]["api_token"], "api_token")
response = requests.get(url, headers=headers, auth=auth, params=params)
return response
| 22,741
|
def sample_point_cloud(source, target, sample_indices=[2]):
""" Resamples a source point cloud at the coordinates of a target points
Uses the nearest point in the target point cloud to the source point
Parameters
----------
source: array
Input point cloud
target: array
Target point cloud for sample locations
sample_indices: list
List of indices to sample from source. Defaults to 2 (z or height
dimension)
Returns
-------
An array of sampled points
"""
sample_indices = np.array(sample_indices)
tree = cKDTree(source[:, 0:2])
dist, idx = tree.query(target, n_jobs=-1)
output = np.hstack(
[
target,
source[idx[:, None], sample_indices].reshape(
(len(idx), len(sample_indices))
),
]
)
return output
| 22,742
|
def make_satellite_gsp_pv_map(batch: Batch, example_index: int, satellite_channel_index: int):
"""Make a animation of the satellite, gsp and the pv data"""
trace_times = []
times = batch.satellite.time[example_index]
pv = batch.pv
for time in times:
trace_times.append(
make_satellite_gsp_pv_map_one_time_value(
batch=batch,
example_index=example_index,
satellite_channel_index=satellite_channel_index,
time_value=time,
)
)
frames = []
for i, traces in enumerate(trace_times):
frames.append(go.Frame(data=traces, name=f"frame{i+1}"))
# make slider
labels = [pd.to_datetime(time.data) for time in times]
sliders = make_slider(labels=labels)
x = pv.x_coords[example_index][pv.x_coords[example_index] != 0].mean()
y = pv.y_coords[example_index][pv.y_coords[example_index] != 0].mean()
lat, lon = osgb_to_lat_lon(x=x, y=y)
fig = go.Figure(
data=trace_times[0],
layout=go.Layout(
title="Start Title",
),
frames=frames,
)
fig.update_layout(updatemenus=[make_buttons()])
fig.update_layout(
mapbox_style="carto-positron", mapbox_zoom=8, mapbox_center={"lat": lat, "lon": lon}
)
fig.update_layout(sliders=sliders)
return fig
| 22,743
|
def raw_to_engineering_product(product, idbm):
"""Apply parameter raw to engineering conversion for the entire product.
Parameters
----------
product : `BaseProduct`
The TM product as level 0
Returns
-------
`int`
How many columns where calibrated.
"""
col_n = 0
idb_ranges = QTable(rows=[(version, range.start.as_float(), range.end.as_float())
for version, range in product.idb_versions.items()],
names=["version", "obt_start", "obt_end"])
idb_ranges.sort("obt_start")
idb_ranges['obt_start'][0] = SCETime.min_time().as_float()
for i in range(0, len(idb_ranges)-1):
idb_ranges['obt_end'][i] = idb_ranges['obt_start'][i+1]
idb_ranges['obt_end'][-1] = SCETime.max_time().as_float()
for col in product.data.colnames:
if (not (hasattr(product.data[col], "meta")
and "PCF_CURTX" in product.data[col].meta
and product.data[col].meta["PCF_CURTX"] is not None
and product.data[col].meta["NIXS"] is not None
and hasattr(product, "idb")
)):
continue
col_n += 1
c = 0
# clone the current column into a new column as the content might be replaced chunk wise
product.data[CCN] = product.data[col]
for idbversion, starttime, endtime in idb_ranges.iterrows():
idb = idbm.get_idb(idbversion)
idb_time_period = np.where((starttime <= product.data['time']) &
(product.data['time'] < endtime))[0]
if len(idb_time_period) < 1:
continue
c += len(idb_time_period)
calib_param = idb.get_params_for_calibration(
product.service_type,
product.service_subtype,
(product.ssid if hasattr(product, "ssid") else None),
product.data[col].meta["NIXS"],
product.data[col].meta["PCF_CURTX"])[0]
raw = Parameter(product.data[col].meta["NIXS"],
product.data[idb_time_period][col], None)
eng = apply_raw_to_engineering(raw, (calib_param, idb))
# cast the type of the column if needed
if product.data[CCN].dtype != eng.engineering.dtype:
product.data[CCN] = product.data[CCN].astype(eng.engineering.dtype)
# set the unit if needed
if hasattr(eng.engineering, "unit") and \
product.data[CCN].unit != eng.engineering.unit:
meta = product.data[col].meta
product.data[CCN].unit = eng.engineering.unit
# restore the meta info
setattr(product.data[CCN], "meta", meta)
# override the data into the new column
product.data[CCN][idb_time_period] = eng.engineering
# replace the old column with the converted
product.data[col] = product.data[CCN]
product.data[col].meta = product.data[CCN].meta
# delete the generic column for conversion
del product.data[CCN]
# delete the calibration key from meta as it is now processed
del product.data[col].meta["PCF_CURTX"]
if c != len(product.data):
logger.warning("Not all time bins got converted to engineering" +
"values due to bad idb periods." +
f"\n Converted bins: {c}\ntotal bins {len(product.data)}")
return col_n
| 22,744
|
def paginate(objects, page_num, per_page, max_paging_links):
"""
Return a paginated page for the given objects, giving it a custom
``visible_page_range`` attribute calculated from ``max_paging_links``.
"""
paginator = Paginator(objects, per_page)
try:
page_num = int(page_num)
except ValueError:
page_num = 1
try:
objects = paginator.page(page_num)
except (EmptyPage, InvalidPage):
objects = paginator.page(paginator.num_pages)
page_range = objects.paginator.page_range
if len(page_range) > max_paging_links:
start = min(objects.paginator.num_pages - max_paging_links,
max(0, objects.number - (max_paging_links / 2) - 1))
page_range = page_range[start:start + max_paging_links]
objects.visible_page_range = page_range
return objects
| 22,745
|
def _get_crop_frame(image, max_wiggle, tx, ty):
"""
Based on on the max_wiggle, determines a cropping frame.
"""
pic_width, pic_height = image.size
wiggle_room_x = max_wiggle * .5 * pic_width
wiggle_room_y = max_wiggle * .5 * pic_height
cropped_width = pic_width - wiggle_room_x
cropped_height = pic_height - wiggle_room_y
left = int(tx * wiggle_room_x)
top = int(ty * wiggle_room_y)
right = left + cropped_width
bottom = top + cropped_height
return left, top, right, bottom
| 22,746
|
def __vigenere(s, key='virink', de=0):
"""维吉利亚密码"""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res
| 22,747
|
def mask_unit_group(unit_group: tf.Tensor, unit_group_length: tf.Tensor, mask_value=0) -> tf.Tensor:
""" Masks unit groups according to their length.
Args:
unit_group: A tensor of rank 3 with a sequence of unit feature vectors.
unit_group_length: The length of the unit group (assumes all unit feature vectors upfront).
mask_value: The mask value.
Returns:
A tensor of rank 3 where indices beyond unit_group_length are zero-masked.
"""
if unit_group_length is not None:
# get rid of last dimensions with size 1
if unit_group.shape.rank - unit_group_length.shape.rank < 2:
unit_group_length = tf.squeeze(unit_group_length, axis=-1) # B
# mask with mask_value
unit_group_mask = tf.sequence_mask(
tf.cast(unit_group_length, tf.int32), maxlen=unit_group.shape[1], dtype=unit_group.dtype) # B x T
unit_group_mask = tf.expand_dims(unit_group_mask, axis=-1)
unit_group *= unit_group_mask
if mask_value != 0:
mask_value = tf.convert_to_tensor(mask_value)
unit_group = tf.cast(unit_group, mask_value.dtype)
unit_group_mask = tf.cast(unit_group_mask, mask_value.dtype)
unit_group += (1 - unit_group_mask) * mask_value
return unit_group
| 22,748
|
def fill_session_team(team_id, session_id, dbsession=DBSESSION):
"""
Use the FPL API to get list of players in an FPL squad with id=team_id,
then fill the session team with these players.
"""
# first reset the team
reset_session_team(session_id, dbsession)
# now query the API
players = fetcher.get_fpl_team_data(get_last_finished_gameweek(), team_id)
player_ids = [p["element"] for p in players]
for pid in player_ids:
add_session_player(pid, session_id, dbsession)
team_history = fetcher.get_fpl_team_history_data()["current"]
index = (
get_last_finished_gameweek() - 1
) # as gameweek starts counting from 1 but list index starts at 0
budget = team_history[index]["value"]
set_session_budget(budget, session_id)
return player_ids
| 22,749
|
def create_map(users_info):
"""
This function builds an HTML map with locations of user's friends on
Twitter.
"""
my_map = folium.Map(
location=[49.818396058511645, 24.02258071000576], zoom_start=10)
folium.TileLayer('cartodbdark_matter').add_to(my_map)
folium.TileLayer('stamentoner').add_to(my_map)
folium.TileLayer('openstreetmap').add_to(my_map)
fg_friends = folium.FeatureGroup(name='Twitter Friends')
for user in users_info:
nickname = user[0]
user_coord = user[1]
fg_friends.add_child(folium.Marker(location=user_coord,
popup=nickname,
icon=folium.Icon(color='darkred',
icon='heart')))
my_map.add_child(fg_friends)
my_map.add_child(folium.LayerControl())
return my_map.get_root().render()
| 22,750
|
def write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):
"""
Writes out a file containing the distance matrix
"""
# output_folder = 'core_contact_maps/dist_mat_txt_folder/'
numpy.set_printoptions(threshold=numpy.inf)
dist_mat_file = pdb.split('.')[0]
dist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'
with open(dist_mat_file, 'w') as open_file:
for i in mat:
open_file.write(str(i) + '\n')
return
| 22,751
|
def l2sq(x):
"""Sum the matrix elements squared
"""
return (x**2).sum()
| 22,752
|
def normalize(arr, axis=None):
"""
Normalize a vector between 0 and 1.
Parameters
----------
arr : numpy.ndarray
Input array
axis : integer
Axis along which normalization is computed
Returns
-------
arr : numpy.ndarray
Normalized version of the input array
"""
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
arr = arr - np.min(arr, axis)
M = np.max(arr, axis)
if np.sum(np.abs(M)) > 0:
arr = arr / M
return arr
| 22,753
|
def successive_substitution(m, T, P, max_iter, M, Pc, Tc, omega, delta, Aij,
Bij, delta_groups, calc_delta, K, steps=0):
"""
Find K-factors by successive substitution
Iterate to find a converged set of K-factors defining the gas/liquid
partitioning of a mixture using successive substitution. We follow the
algorithms in McCain (1990) and Michelsen and Mollerup (2007).
Parameters
----------
m : ndarray, size (nc)
masses of each component present in the whole mixture (gas plus
liquid, kg)
T : float
temperature (K)
P : float
pressure (Pa)
max_iter : int
maximum number of iterations to perform. Set max_iter to np.inf if
you want the algorithm to guarantee to iterate to convergenece, but
beware that you may create an infinite loop.
M : ndarray, size (nc)
Molecular weights (kg/mol)
Pc : ndarray, size (nc)
Critical pressures (Pa)
Tc : ndarray, size (nc)
Critical temperatures (K)
omega : ndarray, size (nc)
Acentric factors (--)
delta : ndarray, size (nc, nc)
Binary interaction coefficients for the Peng-Robinson equation of
state.
Aij : ndarray, (15, 15)
Coefficients in matrix A_ij for the group contribution method for
delta_ij following Privat and Jaubert (2012)
Bij : ndarray, (15, 15)
Coefficients in matrix A_ij for the group contribution method for
delta_ij following Privat and Jaubert (2012)
delta_groups : ndarray, (nc, 15)
Specification of the fractional groups for each component of the
mixture for the group contribution method of Privat and Jaubert (2012)
for delta_ij
calc_delta : int
Flag specifying whether or not to compute delta_ij (1: True, -1:
False) using the group contribution method
K : ndarray, size (nc)
Initial guess for the partition coefficients. If K = None, this
function will use initial estimates from Wilson (see Michelsen and
Mollerup, 2007, page 259, equation 26)
steps : int (default = 0)
Number of previous iteration steps
Returns
-------
K : ndarray, size (nc)
Final value of the K-factors
beta : float
Fraction of gas or liquid (--)
xi : ndarray, size(2, nc)
Mole fraction of each component in the mixture. Row 1 gives the
values for the gas phase and Row 2 gives the values for the liquid
phase (--)
exit_flag : int
Flag indicating how the solution finished: 1: converged in the
allowable number of iterations, 0: did not converge and did not find
any indication that it might be single phase, and -1: did not
converge, but it looks like it might be single phase.
steps : int
Total number of interation steps so far
Notes
-----
The max_iter parameter controls how many steps of successive iteration
are performed. If set to None, the iteration will continue until the
tolerance criteria are reached.
"""
# Update the value of K using successive substitution
def update_K(K):
"""
Evaluate the update function for finding K-factor
Evaluates the new guess for K-factor following McCain (1990) p. 426,
equation (15-23) as explained on p. 430 in connection with equation
(15-31). This is the update equation for the successive substitution
method.
Parameters
----------
T, P, m_0, M, Pc, Tc, omega, delta = constant and inherited
from above
K : ndarray
The current guess for the K-factor (--)
Returns
-------
K_new : ndarray
New guess for K-factor
"""
# Get the mixture composition for the current K-factor
xi, beta = gas_liq_eq(m, M, K)
# Get tha gas and liquid fugacities for the current composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta,
Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta,
Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
# If the mass of any component in the mixture is zero, make sure the
# K-factor is also zero.
K_new[np.isnan(K_new)] = 0.
# Follow what is said by Michelsen & Mollerup, at page 259, just
# above equation 27:
if steps==0.:
moles = m / M
zi = moles / np.sum(moles)
if np.sum(zi*K_new) - 1. <= 0.: # Condition 4 page 252
xi[0,:] = K_new * zi / np.sum(K_new*zi)
xi[1,:] = zi
# Recompute fugacities of gas and liquid:
# Get tha gas and liquid fugacities for the current
# composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
K_new[np.isnan(K_new)] = 0.
elif (1.-np.sum(zi/K_new))>=0.: # % Condition 5 page 252
xi[0,:] = zi
xi[1,:] = (zi/K_new)/np.sum(zi/K_new)
# Recompute fugacities of gas and liquid:
# Get tha gas and liquid fugacities for the current
# composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
K_new[np.isnan(K_new)] = 0.
# Return an updated value for the K factors
return (K_new, beta)
# Set up the iteration parameters
tol = 1.49012e-8 # Suggested by McCain (1990)
err = 1.
# Iterate to find the final value of K factor using successive
# substitution
stop = False
while err > tol and steps < max_iter and not stop:
# Save the current value of K factor
K_old = K
# Update the estimate of K factor using the present fugacities
K, beta = update_K(K)
steps += 1
if steps > 3 and (beta == 0. or beta == 1.):
stop = True
# Compute the current error based on the squared relative error
# suggested by McCain (1990) and update the iteration counter
err = np.nansum((K - K_old)**2 / (K * K_old))
# Determine the exit condition
if stop:
# Successive subsitution thinks this is single-phase
flag = -1
elif steps < max_iter:
# This solution is converged
flag = 1
else:
# No decision has been reached
flag = 0
# Update the equilibrium and return the last value of K-factor
xi, beta = gas_liq_eq(m, M, K)
return (K, beta, xi, flag, steps)
| 22,754
|
def Match(context, pattern, arg=None):
"""Do a regular expression match against the argument"""
if not arg:
arg = context.node
arg = Conversions.StringValue(arg)
bool = re.match(pattern, arg) and boolean.true or boolean.false
return bool
| 22,755
|
def rr_rectangle(rbins, a, b):
""" RR_rect(r; a, b) """
return Frr_rectangle(rbins[1:], a, b) - Frr_rectangle(rbins[:-1], a, b)
| 22,756
|
def update_type(title, title_new=None, description=None, col_titles_new={}):
"""Method creates data type
Args:
title (str): current type title
title_new (str): new type title
description (str): type description
col_titles_new (dict): new column values (key - col id, value - col value)
Returns:
bool
"""
try:
db = DBO(_get_dsn())._dbo_driver
cnt = db.execute(
'SELECT count(*) FROM data_type WHERE title = \'{0}\''.format(title)).fetchone()[0]
if (cnt == 0):
raise Error('Type {0} does not exist'.format(title))
query = 'UPDATE data_type SET '
if (title_new != None):
query += 'title = \'{0}\', '.format(title_new)
if (description != None):
query += 'description = \'{0}\', '.format(description)
for key, value in col_titles_new.items():
query += 'col{0}_title = \'{1}\', '.format(key, value)
query = query[:-2]
query += ' WHERE title = \'{0}\''.format(title)
db.execute(query)
db.commit()
return True
except Error as ex:
print(ex)
db.rollback()
return False
| 22,757
|
def api(repos_path):
"""Glottolog instance from shared directory for read-only tests."""
return pyglottolog.Glottolog(str(repos_path))
| 22,758
|
def get_all_paginated_data(url, token):
"""
Get all data for the requesting user.
Parameters
----------
url : str
URL to the current data to get
token: str
User's OSF token
Returns
-------
Data dictionary of the data points gathered up until now.
"""
headers = {'Authorization': 'Bearer {}'.format(token)}
# Get initial data
response = requests.get(url, headers=headers)
if response.status_code == 200:
response_json = response.json()
elif response.status_code == 410:
raise PresQTResponseException("The requested resource is no longer available.", status.HTTP_410_GONE)
elif response.status_code == 404:
raise OSFNotFoundError("Resource not found.", status.HTTP_404_NOT_FOUND)
elif response.status_code == 403:
raise OSFForbiddenError(
"User does not have access to this resource with the token provided.", status.HTTP_403_FORBIDDEN)
data = response_json['data']
meta = response_json['links']['meta']
# Calculate pagination pages
if '?filter' in url or '?page' in url:
# We already have all the data we need for this request
return data
else:
page_total = get_page_total(meta['total'], meta['per_page'])
url_list = ['{}?page={}'.format(url, number) for number in range(2, page_total + 1)]
# Call all pagination pages asynchronously
from presqt.targets.osf.utilities.utils.async_functions import run_urls_async
children_data = run_urls_async(url_list, headers)
[data.extend(child['data']) for child in children_data]
return data
| 22,759
|
def alaw_decode(x_a, quantization_channels, input_int=True, A=87.6):
"""alaw_decode(x_a, quantization_channels, input_int=True)
input
-----
x_a: np.array, mu-law waveform
quantization_channels: int, Number of channels
input_int: Bool
True: convert x_mu (int) from int to float, before mu-law decode
False: directly decode x_mu (float)
A: float, parameter for a-law, default 87.6
output
------
x: np.array, waveform
"""
num = quantization_channels - 1.0
if input_int:
x = x_a / num * 2 - 1.0
else:
x = x_a
sign = np.sign(x)
x_a_abs = np.abs(x)
x = x_a_abs * (1 + np.log(A))
flag = x >= 1
x[flag] = np.exp(x[flag] - 1)
x = sign * x / A
return x
| 22,760
|
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
See pull for valid source format details.
"""
limit = cmdutil.loglimit(opts)
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('comparing with %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
force=opts["force"])
if not incoming:
try:
os.unlink(opts["bundle"])
except:
pass
ui.status(_("no changes found\n"))
return 1
cleanup = None
try:
fname = opts["bundle"]
if fname or not other.local():
# create a bundle (uncompressed if other repo is not local)
if revs is None and other.capable('changegroupsubset'):
revs = rheads
if revs is None:
cg = other.changegroup(incoming, "incoming")
else:
cg = other.changegroupsubset(incoming, revs, 'incoming')
bundletype = other.local() and "HG10BZ" or "HG10UN"
fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
# keep written bundle?
if opts["bundle"]:
cleanup = None
if not other.local():
# use the created uncompressed bundlerepo
other = bundlerepo.bundlerepository(ui, repo.root, fname)
o = other.changelog.nodesbetween(incoming, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, other, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
displayer.close()
finally:
if hasattr(other, 'close'):
other.close()
if cleanup:
os.unlink(cleanup)
| 22,761
|
def test_failure(database):
""" Test failure for PrimaryPlaceOfPerformanceCode for aggregate records (i.e., when RecordType = 1)
must be in countywide format (XX**###). """
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00**333", record_type="1")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="AB**33", record_type="1")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00*****", record_type="1")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3])
assert errors == 3
| 22,762
|
def test_jujuconfig_missing_file(tmp_path):
"""No config.yaml file at all."""
result = JujuConfig().run(tmp_path)
assert result == JujuConfig.Result.ok
| 22,763
|
def _ec_2d(X):
"""Function for computing the empirical Euler characteristic of a given
thresholded data array.
Input arguments:
================
Y : ndarray of floats
The thresholded image. Ones correspond to activated regions.
Output arguments:
=================
ec : float
The empirical Euler characteristic.
"""
# TODO: check for holes in the activated regions.
_, ec = label(X, neighbors=None, background=0, return_num=True,
connectivity=2)
return ec
| 22,764
|
def swap_year_for_time(df, inplace):
"""Internal implementation to swap 'year' domain to 'time' (as datetime)"""
if not df.time_col == "year":
raise ValueError("Time domain must be 'year' to use this method")
ret = df.copy() if not inplace else df
index = ret._data.index
order = [v if v != "year" else "time" for v in index.names]
if "subannual" in df.extra_cols:
order = order.remove("subannual")
time_values = zip(*[index.get_level_values(c) for c in ["year", "subannual"]])
time = list(map(dateutil.parser.parse, [f"{y}-{s}" for y, s in time_values]))
index = index.droplevel(["year", "subannual"])
ret.extra_cols.remove("subannual")
else:
time = index.get_level_values("year")
index = index.droplevel(["year"])
# add new index column, assign data and other attributes
index = append_index_col(index, time, "time", order=order)
ret._data.index = index
ret.time_col = "time"
ret._set_attributes()
delattr(ret, "year")
if not inplace:
return ret
| 22,765
|
def get_orders(
db: Session,
skip: int = 0,
limit: int = 50,
moderator: str = None,
owner: str = None,
desc: bool = True,
) -> Optional[List[entities.Order]]:
"""
Get the registed orders using filters.
Args:
- db: the database session.
- skip: the number of filtered entities to skip.
- limit: the number of entities to limit the query.
- moderadtor: the moderator name that create the order.
- owner: the owner name that receive the order.
- desc: order by request_at datetime.
Returns:
- the list of orders or `None` if there are no orders to return
using the filter specified.
"""
order_by = (
entities.Order.requested_at.desc()
if desc
else entities.Order.requested_at.asc()
)
query = db.query(entities.Order).order_by(order_by)
if moderator:
query = query.filter_by(mod_display_name=moderator)
if owner:
query = query.filter_by(owner_display_name=owner)
return query.offset(skip).limit(limit).all()
| 22,766
|
def create_data_loader(img_dir, info_csv_path, batch_size):
"""Returns a data loader for the model."""
img_transform = transforms.Compose([transforms.Resize((120, 120), interpolation=Image.BICUBIC),
transforms.ToTensor()])
img_dataset = FashionDataset(img_dir, img_transform, info_csv_path)
data_loader = DataLoader(img_dataset, batch_size=batch_size, shuffle=True, num_workers=12, pin_memory=True)
return data_loader
| 22,767
|
def comp_number_phase_eq(self):
"""Compute the equivalent number of phase
Parameters
----------
self : LamSquirrelCage
A LamSquirrelCage object
Returns
-------
qb: float
Zs/p
"""
return self.slot.Zs / float(self.winding.p)
| 22,768
|
def exprOps(expr):
"""This operation estimation is not handling some simple optimizations that
should be done (i.e. y-x is treated as -1*x+y) and it is overestimating multiplications
in situations such as divisions. This is as a result of the simple method
of implementing this function given the rudimentary form of the expression
tree. It should only be overestimating the number of operations though,
so it is a viable way to see how much optimization is improving the
computational load of the generated Kalman filters"""
ops = OperationCounts()
if isinstance(expr, sympy.Symbol) or isinstance(expr, sympy.Number):
#print('--> {}'.format(expr))
ops.reads += 1
else:
func = expr.func
num = len(expr.args) - 1
#print('--> ({}, {})'.format(func, expr.args))
process = True
if func == sympy.Add:
ops.addsubs += num
elif func == sympy.Mul:
ops.mults += num
elif func == sympy.Pow:
if expr.args[1] == -1:
ops.divs += 1
process = False
elif expr.args[1] > 0:
ops.mults += int(expr.args[1].evalf()-1)
process = False
else:
print('Error: Unknown how to map expression {} to operation counts'.format(expr))
else:
print('Unknown function {}'.format(func))
if process:
for arg in expr.args:
o = exprOps(arg)
ops += o
return ops
| 22,769
|
def ellipsis_reformat(source: str) -> str:
"""
Move ellipses (``...``) for type stubs onto the end of the stub definition.
Before:
.. code-block:: python
def foo(value: str) -> int:
...
After:
.. code-block:: python
def foo(value: str) -> int: ...
:param source: The source to reformat.
:return: The reformatted source.
"""
if "..." not in source:
return source
return EllipsisRewriter(source).rewrite()
| 22,770
|
def build_wtk_filepath(region, year, resolution=None):
"""
A utility for building WIND Toolkit filepaths.
Args:
region (str): region in which the lat/lon point is located (see
`get_regions`)
year (int): year to be accessed (see `get_regions`)
resolution (:obj:`str`, optional): data resolution (see `get_regions`)
Returns:
str: The filepath for the requested resource.
"""
wtk = _load_wtk()
base_url = '/nrel/wtk/'
assert region in wtk, 'region not found: %s' % region
year_range = wtk[region]['year_range']
year_range = range(year_range[0], year_range[1]+1)
assert isinstance(year, int), '"year" must be an integer'
msg = 'year %s not available for region: %s' % (year, region)
assert year in year_range, msg
if resolution:
msg = 'resolution "%s" not available for region: %s' % (
resolution, region)
assert resolution in wtk[region]['resolutions'], msg
base = wtk[region].get('base')
if resolution == '5min':
url_region = '%s-%s/' % (region, resolution)
else:
url_region = region + '/'
if base:
file = '%s_%s.h5' % (base, year)
else:
file = 'wtk_%s_%s.h5' % (region, year)
return base_url + url_region + file
| 22,771
|
def init_doc(args: dict) -> dict:
""" Initialize documentation variable
:param args: A dictionary containing relevant documentation fields
:return:
"""
doc = {}
try:
doc[ENDPOINT_PORT_KEY] = args[ENDPOINT_PORT_KEY]
except KeyError:
logging.warning("No port for documentation specified, default one will be used: "+str(DEFAULT_REST_PORT))
doc[ENDPOINT_PORT_KEY] = DEFAULT_REST_PORT
try:
doc[ENDPOINT_URL_KEY] = args[ENDPOINT_URL_KEY]
except KeyError:
logging.warning("No URL for documentation specified, default one will be used: " + DEFAULT_URL)
doc[ENDPOINT_URL_KEY] = DEFAULT_URL
try:
doc[MODULE_NAME_KEY] = args[MODULE_NAME_KEY]
except KeyError:
logging.warning("No module name for documentation specified, default one will be used: " + DEFAULT_MODULE_NAME)
doc[MODULE_NAME_KEY] = DEFAULT_MODULE_NAME
return doc
| 22,772
|
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == "":
return unichr(int(name)) # "&" => "&"
if hex.lower() == "x":
return unichr(int("0x" + name, 16)) # "&" = > "&"
else:
cp = name2codepoint.get(name) # "&" => "&"
return unichr(cp) if cp else match.group() # "&foo;" => "&foo;"
if isinstance(string, basestring):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
| 22,773
|
def test_get_or_create_auth_tokens(mocker, settings, user):
"""
get_or_create_auth_tokens will contact our plugin's API to get a refresh token for a user, or to create one
"""
settings.OPEN_DISCUSSIONS_REDDIT_URL = "http://fake"
refresh_token_url = urljoin(
settings.OPEN_DISCUSSIONS_REDDIT_URL, "/api/v1/generate_refresh_token"
)
get_session_stub = mocker.patch("channels.api._get_session", autospec=True)
refresh_token_value = "refresh_token"
access_token_value = "access_token"
get_session_stub.return_value.get.return_value.json.return_value = {
"refresh_token": refresh_token_value,
"access_token": access_token_value,
"expires_in": 123,
}
assert RedditAccessToken.objects.filter(user=user).count() == 0
assert RedditRefreshToken.objects.filter(user=user).count() == 0
refresh_token, access_token = api.get_or_create_auth_tokens(user)
assert refresh_token.token_value == refresh_token_value
assert access_token.token_value == access_token_value
get_session_stub.return_value.get.assert_called_once_with(
refresh_token_url, params={"username": user.username}
)
get_session_stub.return_value.get.return_value.json.assert_called_once_with()
assert RedditAccessToken.objects.filter(user=user).count() == 1
assert RedditRefreshToken.objects.filter(user=user).count() == 1
| 22,774
|
def convert2board(chrom, rows, cols):
"""
Converts the chromosome represented in a list into a 2D numpy array.
:param rows: number of rows associated with the board.
:param cols: number of columns associated with the board.
:param chrom: chromosome to be converted.
:return: 2D numpy array.
"""
# Initialise the variables to be used
idx = int(0) # Chromosome index
board = np.zeros((rows, cols), 'uint8')
board.fill(CELL_UNASSIGNED)
# Now loop through the board adding the shapes and checking validity.
# Start at top left corner, processing each row in turn.
for row in range(rows):
for col in range(cols):
# Retrieve the next shape
shape = chrom[idx]
# Skip the cell if it is already occupied.
if board[row][col] != CELL_UNASSIGNED:
continue
# Have we run out of shapes...
if shape == CELL_UNASSIGNED:
idx = idx + 1
if idx >= len(chrom):
return board
continue
# Attempt to place the shape on the board.
if shape == CELL_SPACE:
# Place the hole if valid.
if not ((col > 0 and board[row][col - 1] == CELL_SPACE) or
(row > 0 and board[row - 1][col] == CELL_SPACE)):
board[row][col] = CELL_SPACE
elif shape == CELL_HDOMINO:
# Are we ok to have a horizontal domino?
if col < cols - 1 and board[row][col + 1] == CELL_UNASSIGNED:
board[row][col] = CELL_HDOMINO
board[row][col + 1] = CELL_HDOMINO
else:
# shape == CELL_VDOMINO:
# Are we ok to have a vertical domino?
if row < rows - 1:
board[row][col] = CELL_VDOMINO
board[row + 1][col] = CELL_VDOMINO
# Move on to the next shape
idx = idx + 1
if idx >= len(chrom):
return board
return board
| 22,775
|
def dots(it, label="", hide=None, every=1):
"""Progress iterator. Prints a dot for each item being iterated"""
count = 0
if not hide:
STREAM.write(label)
for i, item in enumerate(it):
if not hide:
if i % every == 0: # True every "every" updates
STREAM.write(DOTS_CHAR)
sys.stderr.flush()
count += 1
yield item
STREAM.write("\n")
STREAM.flush()
| 22,776
|
def _assert_common_properties(dev,
notice_level,
msg_file,
num_cpu_threads=None):
"""Assert the properties common to all devices are correct."""
assert dev.notice_level == notice_level
assert dev.msg_file == msg_file
if num_cpu_threads is not None:
if hoomd.version.tbb_enabled:
assert dev.num_cpu_threads == num_cpu_threads
else:
assert dev.num_cpu_threads == 1
assert type(dev.communicator) == hoomd.communicator.Communicator
| 22,777
|
def filter_halo_pnum(data, Ncut=1000):
""" Returns indicies of halos with more than Ncut particles"""
npart = np.array(data['np'][0])
ind =np.where(npart > Ncut)[0]
print("# of halos:",len(ind))
return ind
| 22,778
|
def _preprocess_sgm(line, is_sgm):
"""Preprocessing to strip tags in SGM files."""
if not is_sgm:
return line
# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
if line.startswith("<srcset") or line.startswith("</srcset"):
return ""
if line.startswith("<refset") or line.startswith("</refset"):
return ""
if line.startswith("<doc") or line.startswith("</doc"):
return ""
if line.startswith("<p>") or line.startswith("</p>"):
return ""
# Strip <seg> tags.
line = line.strip()
if line.startswith("<seg") and line.endswith("</seg>"):
i = line.index(">")
return line[i + 1:-6]
| 22,779
|
def discover(using, index="*"):
"""
:param using: Elasticsearch client
:param index: Comma-separated list or wildcard expression of index names used to limit the request.
"""
indices = Indices()
for index_name, index_detail in using.indices.get(index=index).items():
indices[index_name] = Index(
client=using,
name=index_name,
mappings=index_detail["mappings"],
settings=index_detail["settings"],
aliases=index_detail["aliases"],
)
return indices
| 22,780
|
def quote_sql_value(cursor: Cursor, value: SQLType) -> str:
"""
Use the SQL `quote()` function to return the quoted version of `value`.
:returns: the quoted value
"""
if isinstance(value, (int, float, datetime)):
return str(value)
if value is None:
return "NULL"
if isinstance(value, (str, bytes)):
cursor.execute("SELECT quote(?);", (value,))
result = cursor.fetchall()[0][0]
assert isinstance(result, str)
return result
raise ValueError(f"Do not know how to quote value of type {type(value)}")
| 22,781
|
def create_admin_nova_client(context):
"""
Creates client that uses trove admin credentials
:return: a client for nova for the trove admin
"""
client = create_nova_client(context, password=CONF.nova_proxy_admin_pass)
return client
| 22,782
|
def page_cache(timeout=1800):
"""
page cache
param:
timeout:the deadline of cache default is 1800
"""
def _func(func):
def wrap(request, *a, **kw):
key = request.get_full_path()
#pass chinese
try:
key = mkey.encode("utf-8")
except Exception, e:
key = str(key)
data = None
try:
data = mclient.get(key)
if not data:
data = func(request, *a, **kw)
if data:
mclient.set(key, data, timeout)
return HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain"))
except Exception, e:
if data:
HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain"))
else:
return HttpResponse("<objects><error>%s</error></objects>" % e,
content_type=request.META.get("CONTENT_TYPE", "text/plain"))
return wrap
return _funcs
| 22,783
|
def get_available_games():
"""Get a list of games that are available to join."""
games = Game.objects.filter(started=False) #pylint: disable=no-member
if len(games) == 0:
options = [('', '- None -')]
else:
options = [('', '- Select -')]
for game in games:
options.append((game.name, game.name))
return options
| 22,784
|
def test_unstructure_attrs_lists(benchmark, converter_cls, unstructure_strat):
"""
Benchmark a large (30 attributes) attrs class containing lists of
primitives.
"""
class E(IntEnum):
ONE = 1
TWO = 2
@attr.define
class C:
a: List[int]
b: List[float]
c: List[str]
d: List[bytes]
e: List[E]
f: List[int]
g: List[float]
h: List[str]
i: List[bytes]
j: List[E]
k: List[int]
l: List[float]
m: List[str]
n: List[bytes]
o: List[E]
p: List[int]
q: List[float]
r: List[str]
s: List[bytes]
t: List[E]
u: List[int]
v: List[float]
w: List[str]
x: List[bytes]
y: List[E]
z: List[int]
aa: List[float]
ab: List[str]
ac: List[bytes]
ad: List[E]
c = converter_cls(unstruct_strat=unstructure_strat)
benchmark(
c.unstructure,
C(
[1] * 3,
[1.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.ONE] * 3,
[2] * 3,
[2.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.TWO] * 3,
[3] * 3,
[3.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.ONE] * 3,
[4] * 3,
[4.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.TWO] * 3,
[5] * 3,
[5.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.ONE] * 3,
[6] * 3,
[6.0] * 3,
["a small string"] * 3,
["test".encode()] * 3,
[E.TWO] * 3,
),
)
| 22,785
|
def _add_rays_single_cam(
camera_data: TensorDict,
*,
scene_from_frame: tf_geometry.Isometry,
) -> TensorDict:
"""Returns the camera, eventually with the rays added."""
if _has_precomputed_rays(camera_data):
return camera_data
else:
# Logic below for generating camera rays only applies to perspective
# cameras. It will produce incorrect camera rays for other types of
# cameras (e.g. those with distortions).
camera_type = camera_data['intrinsics']['type']
tf.debugging.assert_equal(camera_type, 'PERSPECTIVE')
# Pinhole camera model below does not know how to handle lens distortion.
# Ensure that no distortion exists here.
radial_distortion = camera_data['intrinsics']['distortion']['radial']
tf.debugging.assert_near(radial_distortion,
tf.zeros_like(radial_distortion))
tangential_distortion = (
camera_data['intrinsics']['distortion']['tangential'])
tf.debugging.assert_near(tangential_distortion,
tf.zeros_like(tangential_distortion))
h, w, _ = camera_data['color_image'].shape
# Compute camera pose w.r.t scene (camera to scene transform).
camera_from_frame = tf_geometry.Isometry(**camera_data['extrinsics'])
scene_from_camera = scene_from_frame * camera_from_frame.inverse()
# Get rays w.r.t scene passing through every pixel center of the camera.
camera_intrinsics = camera_data['intrinsics']
ray_origins, ray_directions = tf_geometry.rays_from_image_grid(
camera=tf_geometry.PinholeCamera(
K=camera_intrinsics['K'],
# Use static shape if available
image_width=w or camera_intrinsics['image_width'],
image_height=h or camera_intrinsics['image_height']),
world_from_camera=scene_from_camera,
)
camera_data['ray_origins'] = ray_origins
camera_data['ray_directions'] = ray_directions
return camera_data
| 22,786
|
def add_vendor_suboption(sender_type, code, data):
"""
After adding Vendor Specific Option we can decide to add suboptions to it. Please make sure which are
supported and if it's necessary add suboption by yourself.
"""
dhcpmsg.add_vendor_suboption(int(code), data)
| 22,787
|
def test_print_albumdata_80(capsys, monkeypatch, albumdata):
"""Try to print albumdata to width 80 correctly."""
expected = """\
================================================================================
Test Artist Test album test
================================================================================
Track Track Artist Suggested filename
--------------------------------------------------------------------------------
Test track Test Artist /home/user/Music/Test Artist/Test [...]
--------------------------------------------------------------------------------
"""
monkeypatch.setattr('shutil.get_terminal_size',
lambda: (80, 24))
albumdata.Albumdata._print_albumdata(testdata)
out, err = capsys.readouterr()
assert out == expected
| 22,788
|
def read_file(filename=""):
"""reads filename with utf-8"""
with open(filename, encoding='utf-8') as f:
print(f.read(), end="")
| 22,789
|
def process_ps_stdout(stdout):
""" Process the stdout of the ps command """
return [i.split()[0] for i in filter(lambda x: x, stdout.decode("utf-8").split("\n")[1:])]
| 22,790
|
def _cleanup_archive_dir(tm_env):
"""Delete old files from archive directory if space exceeds the threshold.
"""
archives = glob.glob(os.path.join(tm_env.archives_dir, '*'))
infos = []
dir_size = 0
for archive in archives:
stat = os.stat(archive)
dir_size += stat.st_size
infos.append((stat.st_mtime, stat.st_size, archive))
if dir_size <= _ARCHIVE_LIMIT:
_LOGGER.info('Archive directory below threshold: %s', dir_size)
return
_LOGGER.info('Archive directory above threshold: %s gt %s',
dir_size, _ARCHIVE_LIMIT)
infos.sort()
while dir_size > _ARCHIVE_LIMIT:
ctime, size, archive = infos.pop(0)
dir_size -= size
_LOGGER.info('Unlink old archive %s: ctime: %s, size: %s',
archive, ctime, size)
fs.rm_safe(archive)
| 22,791
|
def chpasswd(path, oldpassword, newpassword):
"""Change password of a private key.
"""
if len(newpassword) != 0 and not len(newpassword) > 4: return False
cmd = shlex.split('ssh-keygen -p')
child = pexpect.spawn(cmd[0], cmd[1:])
i = child.expect(['Enter file in which the key is', pexpect.EOF])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(path)
i = child.expect(['Enter old passphrase', 'Enter new passphrase', pexpect.EOF])
if i == 0:
child.sendline(oldpassword)
i = child.expect(['Enter new passphrase', 'Bad passphrase', pexpect.EOF])
if i != 0:
if child.isalive(): child.wait()
return False
elif i == 2:
if child.isalive(): child.wait()
return False
child.sendline(newpassword)
i = child.expect(['Enter same passphrase again', pexpect.EOF])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(newpassword)
child.expect(pexpect.EOF)
if child.isalive():
return child.wait() == 0
return True
| 22,792
|
def get_display_limits(VarInst, data=None):
"""Get limits to resize the display of Variables.
Function takes as argument a `VariableInstance` from a `Section` or
`Planform` and an optional :obj:`data` argument, which specifies how to
determine the limits to return.
Parameters
----------
VarInst : :obj:`~deltametrics.section.BaseSectionVariable` subclass
The `Variable` instance to visualize. May be any subclass of
:obj:`~deltametrics.section.BaseSectionVariable` or
:obj:`~deltametrics.plan.BasePlanformVariable`.
data : :obj:`str`, optional
The type of data to compute limits for. Typically this will be the
same value used with either :obj:`get_display_arrays` or
:obj:`get_display_lines`. Supported options are `'spacetime'`,
`'preserved'`, and `'stratigraphy'`.
Returns
-------
xmin, xmax, ymin, ymax : :obj:`float`
Values to use as limits on a plot. Use with, for example,
``ax.set_xlim((xmin, xmax))``.
"""
# # # SectionVariables # # #
if issubclass(type(VarInst), section.BaseSectionVariable):
# # DataSection # #
if isinstance(VarInst, section.DataSectionVariable):
data = data or VarInst._default_data
if data in VarInst._spacetime_names:
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z)
elif data in VarInst._preserved_names:
VarInst._check_knows_stratigraphy() # need to check explicitly
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z)
elif data in VarInst._stratigraphy_names:
VarInst._check_knows_stratigraphy() # need to check explicitly
_strata = np.copy(VarInst.strat_attr['strata'])
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(_strata), np.max(_strata) * 1.5
else:
raise ValueError('Bad data argument: %s' % str(data))
# # StratigraphySection # #
elif isinstance(VarInst, section.StratigraphySectionVariable):
data = data or VarInst._default_data
if data in VarInst._spacetime_names:
VarInst._check_knows_spacetime() # always False
elif data in VarInst._preserved_names:
VarInst._check_knows_spacetime() # always False
elif data in VarInst._stratigraphy_names:
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z) * 1.5
else:
raise ValueError('Bad data argument: %s' % str(data))
else:
raise TypeError
# # # PlanformVariables # # #
elif False: # issubclass(type(VarInst), plan.BasePlanformVariable):
raise NotImplementedError
else:
raise TypeError('Invaid "VarInst" type: %s' % type(VarInst))
| 22,793
|
def plot_layer_consistency_example(eigval_col, eigvec_col, layernames, layeridx=[0,1,-1], titstr="GAN", figdir="", savelabel="", use_cuda=False):
"""
Note for scatter plot the aspect ratio is set fixed to one.
:param eigval_col:
:param eigvec_col:
:param nsamp:
:param titstr:
:param figdir:
:return:
"""
nsamp = len(layeridx)
# Hnums = len(eigval_col)
# eiglist = sorted(np.random.choice(Hnums, nsamp, replace=False)) # range(5)
print("Plot hessian of layers : ", [layernames[idx] for idx in layeridx])
fig = plt.figure(figsize=[10, 10], constrained_layout=False)
spec = fig.add_gridspec(ncols=nsamp, nrows=nsamp, left=0.075, right=0.975, top=0.9, bottom=0.05)
for axi, Li in enumerate(layeridx):
eigval_i, eigvect_i = eigval_col[Li], eigvec_col[Li]
for axj, Lj in enumerate(layeridx):
eigval_j, eigvect_j = eigval_col[Lj], eigvec_col[Lj]
inpr = eigvect_i.T @ eigvect_j
vHv_ij = np.diag((inpr @ np.diag(eigval_j)) @ inpr.T)
ax = fig.add_subplot(spec[axi, axj])
if axi == axj:
ax.hist(np.log10(eigval_j), 20)
else:
ax.scatter(np.log10(eigval_j), np.log10(vHv_ij), s=15, alpha=0.6)
ax.set_aspect(1, adjustable='datalim')
if axi == nsamp-1:
ax.set_xlabel("eigvals @ %s" % layernames[Lj])
if axj == 0:
ax.set_ylabel("vHv eigvec @ %s" % layernames[Li])
ST = plt.suptitle("Consistency of %s Hessian Across Layers\n"
"Cross scatter of EigenValues and vHv values for Hessian at %d Layers"%(titstr, nsamp),
fontsize=18)
# plt.subplots_adjust(left=0.175, right=0.95 )
RND = np.random.randint(1000)
plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.jpg" % (savelabel, RND)),
bbox_extra_artists=[ST]) #
plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.pdf" % (savelabel, RND)),
bbox_extra_artists=[ST]) #
return fig
| 22,794
|
def min_vertex_cover(left_v, right_v):
"""
Use the Hopcroft-Karp algorithm to find a maximum
matching or maximum independent set of a bipartite graph.
Next, find a minimum vertex cover by finding the
complement of a maximum independent set.
The function takes as input two dictionaries, one for the
left vertices and one for the right vertices. Each key in
the left dictionary is a left vertex with a value equal to
a list of the right vertices that are connected to the key
by an edge. The right dictionary is structured similarly.
The output is a dictionary with keys equal to the vertices
in a minimum vertex cover and values equal to lists of the
vertices connected to the key by an edge.
For example, using the following simple bipartite graph:
1000 2000
1001 2000
where vertices 1000 and 1001 each have one edge and 2000 has
two edges, the input would be:
left = {1000: [2000], 1001: [2000]}
right = {2000: [1000, 1001]}
and the ouput or minimum vertex cover would be:
{2000: [1000, 1001]}
with vertex 2000 being the minimum vertex cover.
"""
data_hk = bipartiteMatch(left_v)
left_mis = data_hk[1]
right_mis = data_hk[2]
mvc = left_v.copy()
mvc.update(right_v) # junta os dicionarios num so
for v in left_mis:
try:
del (mvc[v])
except KeyError:
pass
for v in right_mis:
try:
del (mvc[v])
except KeyError:
pass
return mvc
| 22,795
|
def neighbor_dist(x1, y1, x2, y2):
"""Return distance of nearest neighbor to x1, y1 in x2, y2"""
m1, m2, d12 = match_xy(x2, y2, x1, y1, neighbors=1)
return d12
| 22,796
|
def add_artist_subscription(auth, userid, artist_mbid):
"""
Add an artist to the list of subscribed artists.
:param tuple auth: authentication data (username, password)
:param str userid: user ID (must match auth data)
:param str artist_mbid: musicbrainz ID of the artist to add
:return: True on success
:raises: HTTPError
"""
url = '%s/artists/%s/%s' % (API_BASE_URL, userid, artist_mbid)
response = requests.put(url, auth=auth)
response.raise_for_status()
return True
| 22,797
|
def get_built_vocab(dataset: str) -> Vocab:
"""load vocab file for `dataset` to get Vocab based on selected client and data in current directory
Args:
dataset (str): string of dataset name to get vocab
Returns:
if there is no built vocab file for `dataset`, return None, else return Vocab
"""
vocab_file_path = Path(__file__).parent.resolve() / f'{dataset}_vocab.pickle'
if not vocab_file_path.exists():
print('There is no built vocab file for {} dataset, please run `main` or `build_vocab.sh` to build it firstly.'
.format(dataset))
return None
vocab_file = open(vocab_file_path, 'rb') # get vocab based on sample data
vocab = pickle.load(vocab_file)
return vocab
| 22,798
|
def binidx(num: int, width: Optional[int] = None) -> Iterable[int]:
""" Returns the indices of bits with the value `1`.
Parameters
----------
num : int
The number representing the binary state.
width : int, optional
Minimum number of digits used. The default is the global value `BITS`.
Returns
-------
binidx : list
"""
fill = width or 0
return list(sorted(i for i, char in enumerate(f"{num:0{fill}b}"[::-1]) if char == "1"))
| 22,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.