content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def prepares_return(data_filter, total_values):
"""
Funcção que prepara o array final com os valores totais por cliente
:param data_filter:
:param total_values:
:return total_values:
"""
for number in total_values:
for record in data_filter:
if number['source'] == record['source']:
number['total'] += record['valor']
# Arredondando os valores
number['total'] = round(number['total'], 2)
return total_values | feed5eeaea5b8d19de3818f2267bae2a156fea69 | 30,678 |
def getbinlen(value):
"""return the bit length of an integer"""
result = 0
if value == 0:
return 1
while value != 0:
value >>= 1
result += 1
return result | 523772f1c5eb856bff831e1565b2ff47fc19b2ff | 30,679 |
import pandas
def binary_feature(df, feat_col, value, binary_feature_col_name=None, concat=False):
"""
Given a dataframe, feature column name and value to check, return a series of binary responses 1 and 0
1 if the value in the feature column to check is present, 0 if otherwise
binary_feature
"""
# If binary_feature_col_name is none use this instead
if not binary_feature_col_name:
binary_feature_col_name = feat_col+'_is_'+str(value)
def is_value_present(s, value):
"""
Given a series and a value, return a binary feature 1 if present and 0 if otherwise
"""
if s[feat_col] == value:
return 1
else:
return 0
# Return binary feature series
binary_feature = df.apply(lambda s: is_value_present(s, value), axis=1)
# Set series name
binary_feature.name = binary_feature_col_name
if concat:
return pandas.concat([df, binary_feature], axis=1)
return binary_feature | 6215c422a74c6bf6d95308f014164279c478c40e | 30,680 |
def calculate_mean(some_list):
"""
Function to calculate the mean of a dataset.
Takes the list as an input and outputs the mean.
"""
return (1.0 * sum(some_list) / len(some_list)) | d0374fc5321f6caa05f546e274490e906bf60106 | 30,681 |
def __compress(list_events):
"""
Compress a list of events,
using one instantiation for the same key/value.
Parameters
--------------
list_events
List of events of the stream
Returns
--------------
:param list_events:
:return:
"""
compress_dict = {}
i = 0
while i < len(list_events):
# create a new event where keys and values are compressed
comp_ev = {}
for k, v in list_events[i].items():
# check if the key has already been instantiated.
# in that case, use the current instantiation.
if k not in compress_dict:
compress_dict[k] = k
else:
k = compress_dict[k]
# check if the value has already been instantiated.
# in that case, use the current instantiation
if v not in compress_dict:
compress_dict[v] = v
else:
v = compress_dict[v]
# saves the compressed keys and values in the dictionary
comp_ev[k] = v
list_events[i] = comp_ev
i = i + 1
return list_events | 79d5fbef74be3db5296a631ee50757dabab2e897 | 30,682 |
def merge_media(forms, arg=None):
"""Merge media for a list of forms
Usage: {{ form_list|merge_media }}
* With no arg, returns all media from all forms with duplicates removed
Usage: {{ form_list|merge_media:'media_type' }}
* With an arg, returns only media of that type. Types 'css' and 'js' are common.
See Django documentation for more information about form media.
"""
if len(forms) == 0:
return ''
combined = forms[0].media
if len(forms) > 1:
for val in forms[1:]:
combined += val.media
if arg is None:
return str(combined)
return str(combined[arg]) | e4885524e3ac6c8598f485f55fa915b6a4874001 | 30,683 |
import re
def split_filenames(text):
"""Splits comma or newline separated filenames
and returns them as a list.
"""
names = [name.strip()
for name in re.split(r'[\n,]', text)]
return list(filter(None, names)) | 85d53b77a81d6c1133068932a510ff3c9087a3cd | 30,684 |
def _item_prop(name, default=None, setdefault=None, hint=False):
"""Create a property that fetches a value from the dictionary.
We implement getter, setter and deleter here. Whilst we may not want users
to do all of those things it is not our job to prevent users from doing
something stupid. Our goal is to provide helper functions and if people go
and delete hints or force set multiple href methods that's not our fault.
Note that set
:param default: A value that is returned if nothing present in the object.
This value will be ignored if setdefault is set.
:param callable setdefault: A value set and returned if nothing present in
the object. This value will take priority to default. Note that this is
a callable so the set value will be the result of executing this
function. This allows us to create new objects for default values.
:param bool hint: True if this attribute exists in the hints dictionary.
:rtype: property
"""
def o(self):
return self.hints if hint else self
def _getter(self):
if setdefault:
return o(self).setdefault(name, setdefault())
else:
return o(self).get(name, default)
def _setter(self, value):
o(self)[name] = value
def _deleter(self):
o(self).pop(name, None)
return property(_getter, _setter, _deleter) | dd6bfbdae4e4a7ab38b971a51246b2f69bead56d | 30,685 |
import json
def list_generator_op(parallelism: int) -> str:
"""Generate list for parallel"""
# JSON payload is required for ParallelFor
return json.dumps([x for x in range(parallelism)]) | adade4d6afc2eeac6867da14b5f25e8f9b49317c | 30,686 |
def counter_count(string):
"""String's counter() method is used."""
return {i: string.count(i) for i in string} | f92985bf8f5d9895b426b61ad810e2cd81c4e400 | 30,687 |
import torch
def accuracy(output_1, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
#view() means resize() -1 means 'it depends'
global total_re
with torch.no_grad():
batch_size = target.size(0)
#print("batch_size",batch_size)
maxk = max(topk) # = 5
number1, pred1 = output_1.topk(maxk, 1, True, True) #sort and get top k and their index
pred = pred1[:]
#number2, pred2 = output_2.topk(maxk, 1, True, True) #sort and get top k and their index
#print("pred1:",pred1.t()) #is index 5col xrow
#print("pred2:",pred2.t()) #is index 5col xrow
#print("pred after:",pred)
#print("_ number1 after:",number1.t())
#print("number1[0][1]:",number1[0][1])
#print("pred[0][1]:",pred[0][1])
#print("number1.shape[0]",number1.shape[0])
#print("number1.shape[1]",number1.shape[1])
'''
for a in range(0,number1.shape[0]):
#gap_0 = number1[a][0] - number1[a][1]
gap = random.random()
#gap = number1[a][0]
#gap = gap_0.item()
#print("gap:",gap)
#print("a:",a)
if gap < args.lineh:
total_re = total_re +1
pred[a][0] = pred2[a][0]
pred[a][1] = pred2[a][1]
'''
pred = pred.t() # a zhuanzhi transpose xcol 5row
#print("pred_combine.t():",pred)
#print("size:",pred[0][0].type()) #5,12
correct = pred.eq(target.view(1, -1).expand_as(pred)) #expend target to pred
#print("correct:",correct)
res = []
for k in topk: #loop twice 1&5
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | b0ed9919d2aebb7cdc208afe69db6c4c5a2b6ff4 | 30,689 |
from typing import Dict
def _is_import_finished(log: Dict) -> bool:
"""Returns whether the import has finished (failed or succeeded)."""
return log['state'] not in ('QUEUED', 'RUNNING') | 4dbb9ee522b210781bbc25542dd1ab86dc0cd397 | 30,690 |
from typing import Dict
def _query_dict_to_qs(dic: Dict[str, str]) -> str:
"""
{'k1': 'v1', 'k2': 'v2'} -> ?k1=v1&k2=v2
"""
if not dic:
return ''
return '?' + '&'.join(f'{k}={v}' for k, v in dic.items()) | 27e9d7de3da75a9ed589d2a40a00b6cc2461afcd | 30,691 |
def extents_overlap(a_extent, b_extent):
"""Test if two extents overlap"""
if (a_extent.xmin > b_extent.xmax or
a_extent.xmax < b_extent.xmin or
a_extent.ymin > b_extent.ymax or
a_extent.ymax < b_extent.ymin):
return False
else:
return True | 09f30e3982fd139b4208501236c2a0fc4a413b96 | 30,693 |
def create_logdir(dataset, label, rd,
allow_zz, score_method, do_spectral_norm):
""" Directory to save training logs, weights, biases, etc."""
model = 'alad_sn{}_dzz{}'.format(do_spectral_norm, allow_zz)
return "train_logs/{}/{}/dzzenabled{}/{}/label{}/" \
"rd{}".format(dataset, model, allow_zz,
score_method, label, rd) | 97c00c376ab0a70032932577f84d745bc01061da | 30,695 |
def get_high_lows_lookback(high, low, lookback_days):
"""
Get the highs and lows in a lookback window.
Parameters
----------
high : DataFrame
High price for each ticker and date
low : DataFrame
Low price for each ticker and date
lookback_days : int
The number of days to look back
Returns
-------
lookback_high : DataFrame
Lookback high price for each ticker and date
lookback_low : DataFrame
Lookback low price for each ticker and date
"""
# getting max price for high prices excluding present day
lookback_high = high.rolling(window=lookback_days).max().shift()
# getting min price for low prices excluding present day
lookback_low = low.rolling(window=lookback_days).min().shift()
return lookback_high, lookback_low | d24905db2ae2425f7d57e3af503802c597d0c212 | 30,698 |
import glob
import os
def get_latest_file(filetype, detector=None, disregard_known_files=False):
"""
This function gets the latest modified file of filetype. This function will look into the calwebb_spec2_pytests
directory for the given file.
Args:
filetype: string, name/type of file type, e.g. *.txt, *.html, full_run_map_DETECTOR.txt
detector: string, name in header keyword DETECTOR
disregard_known_files: boolean, if True function will not look for True_steps_suffix_map_DETECTOR.txt
or full_run_map_DETECTOR.txt
Returns:
latest_filetypefile: string, name of the latest file modified of type filetype
"""
# get a list of all the filetype files in the calwebb_spec2_pytests dir
list_of_filetypefiles = glob.glob(filetype)
# find the latest of the filetype files but exclude known file names
if disregard_known_files:
if detector is None:
print("get_latest_file: DETECTOR not defined.")
exit()
if "True_steps_suffix_map_"+detector+".txt" in list_of_filetypefiles:
idx = list_of_filetypefiles.index("True_steps_suffix_map_"+detector+".txt")
list_of_filetypefiles.pop(idx)
if "full_run_map_"+detector+".txt" in list_of_filetypefiles:
idx = list_of_filetypefiles.index("full_run_map_"+detector+".txt")
list_of_filetypefiles.pop(idx)
latest_filetypefile = "File not found."
if len(list_of_filetypefiles) > 0:
latest_filetypefile = max(list_of_filetypefiles, key=os.path.getctime)
#print("filetype, latest_filetypefile = ", filetype, latest_filetypefile)
return latest_filetypefile | 1a6874e6173bdb02c5724408acf8de327efcbdda | 30,699 |
def dG_to_flux_bounds(dG_min, dG_max, infinity=1000, abstol=1e-6):
""" Convert standard Gibbs energy range to reaction flux bounds.
Args:
dG_min (float): minimum standard Gibbs energy
dG_max (float): maximum standard Gibbs energy
infinity (float): value to represent infinity (default: 1000)
abstol (float): absolute tolerance to consider a value larger than zero (default: 1e-6)
Returns:
"""
if dG_min > abstol:
lb, ub = -infinity, 0
elif dG_max < -abstol:
lb, ub = 0, infinity
else:
lb, ub = -infinity, infinity
return lb, ub | 85fabd94e2ba74a3accba873cde3bdc0398dd1d1 | 30,703 |
def _get_weights(model, features):
"""
If the model is a linear model, parse the weights to a list of strings.
Parameters
----------
model : estimator
An sklearn linear_model object
features : list of str
The feature names, in order.
Returns
-------
list of str
The weights associated with each feature.
"""
try:
weights = model.coef_
intercept = model.intercept_
assert weights.shape[0] == 1
assert weights.shape[1] == len(features)
assert len(intercept) == 1
weights = list(weights.flatten())
except (AttributeError, AssertionError):
return None
col_width = max([len(f) for f in features]) + 2
txt_out = ["Feature" + " " * (col_width - 7) + "Weight"]
for weight, feature in zip(weights, features):
space = " " * (col_width - len(feature))
txt_out.append(feature + space + str(weight))
txt_out.append("intercept" + " " * (col_width - 9) + str(intercept[0]))
return txt_out | f26947922505cb3c06f1421238fdcde11064a686 | 30,705 |
import socket
def is_port_available(port: int, udp: bool = False) -> bool:
"""Checks whether a specified port is available to be attached to.
From `podman_compose <https://github.com/containers/podman-compose/blob/devel/podman_compose.py>`_.
Args:
port (int): The port to check.
udp (bool): Also check udp
Returns:
bool: True if available, False otherwise.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
in_use = s.connect_ex(('localhost', int(port))) == 0
if udp:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
in_use = s.connect_ex(('localhost', int(port))) == 0 or in_use
return not in_use | a963ad45477fc43bca1a356c6b76f8995f7df60b | 30,706 |
from pathlib import Path
def mask_workdir(location: Path, stacktrace: str, placeholder="$BLUEPRINT_DIR"):
"""
replaces real workdir with placeholder
"""
return stacktrace.replace(str(location), placeholder) | ffae80fa247c440472cf45c49e14ea9a700a44f5 | 30,707 |
def remove_character_at(str, idx):
"""Removes the character from str at index idx, returning the remaining string
str, int -> str
>>> remove_character_at("boats", 2)
'bots'
"""
return str[:idx] + str[idx+1:] | abc7bedb33c5c9e024dd8cf5830f3b3ee8b08f42 | 30,708 |
def cal_cluster_centers(df, fuzzy_matrix, n_sample, c, m):
"""
param df: 数据集的特征集,不包含标签列
param fuzzy_matrix: 隶属度矩阵
param c: 聚类簇数量
param m: 加权指数
"""
# *字符称为解包运算符
# zip(*fuzzy_amtrix) 相当于将fuzzy_matrix按列展开并拼接,但并不合并!
# list(zip(*fuzzy_amtrix)) 包含 列数 个元组。
fuzzy_mat_ravel = list(zip(*fuzzy_matrix))
cluster_centers = []
# 遍历聚类数量次
for j in range(c):
# 取出属于某一类的所有样本的隶属度列表(隶属度矩阵的一列)
fuzzy_one_dim_list = list(fuzzy_mat_ravel[j])
# 计算隶属度的m次方
m_fuzzy_one_dim_list = [p ** m for p in fuzzy_one_dim_list]
# 隶属度求和,求解聚类中心公式中的分母
denominator = sum(m_fuzzy_one_dim_list)
#
numerator_list = []
# 遍历所有样本,求分子
for i in range(n_sample):
# 取出一个样本
sample = list(df.iloc[i])
# 聚类簇中心的分子部分,样本与对应的隶属度的m次方相乘
mul_sample_fuzzy = [m_fuzzy_one_dim_list[i] * val for val in sample]
numerator_list.append(mul_sample_fuzzy)
# 计算分子,求和
numerator = map(sum, list(zip(*numerator_list)))
cluster_center = [val/denominator for val in numerator]
cluster_centers.append(cluster_center)
return cluster_centers | c1971b9bc1b5b48e0222cd2798da3477db9c95ad | 30,709 |
import os
def _find_entry_in_tree(repository, identifier, tree):
""" Finds a nearest-match entry in the given tree. This simply means that
the file extension can be optionally ignored.
"""
def is_dir_match(segment, entry, segments):
""" Used to decide whether or not the given segment matches the current
tree entry being iterated through git.
"""
if entry[1] != segment:
return False
if len(segments) < 2:
return False
# 16384 represents the git attributes for a directory
if entry[0] != 16384:
return False
return True
def is_file_match(segment, entry, segments):
""" Used to decide whether the given segment is a file and is a match
for our current search criteria.
"""
if len(segments) is not 1:
return False
if entry[1] == segment:
return True
dot_position = entry[1].rfind('.')
if dot_position > -1 \
and entry[1].startswith(segment) \
and (len(segment)) == dot_position:
return True
return False
def recurse_git_tree(segments, tree, full_path=[]):
entries = tree.entries()
for entry in entries:
if is_dir_match(segments[0], entry, segments):
full_path.append(segments[0])
segments = segments[1:]
return recurse_git_tree(segments,
repository.get_object(entry[2]),
full_path)
break
if is_file_match(segments[0], entry, segments):
full_path.append(entry[1])
full_path = os.sep.join(full_path)
return (repository.get_object(entry[2]), full_path)
return recurse_git_tree(identifier.split(os.sep), tree) | 6403fe10026984e9d0dd9b1e3c96365567fb1a89 | 30,710 |
def remove_outlier(df_in, col_name, k=1.5):
"""remove outlier using iqr criterion (k=1.5)"""
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3-q1 # Interquartile range
fence_low = q1 - k * iqr
fence_high = q3 + k * iqr
df_out = df_in.loc[(df_in[col_name] >= fence_low) &
(df_in[col_name] <= fence_high)]
return df_out | fe519cdf339138dbff57fe183949ed0fd98b6e3f | 30,711 |
import argparse
def setup_parser():
"""Setup the command line argument parser"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Path to the configuration file')
return parser | a6346332f700326aa83de32b69ca68bd72327f8b | 30,713 |
def round_builtin():
"""round: Round a number to a given precision."""
return "short pi {}".format(round(3.1415, 2)) | d414d170cd615814bbd06b1d3045b24a1463cb96 | 30,716 |
def get_multi_label_options(defaults=None):
"""Multi-label-related options
"""
if defaults is None:
defaults = {}
options = {
# Multi-label labels. If set, only the given labels are expanded
'--labels': {
'action': 'store',
'dest': 'labels',
'default': defaults.get('labels', None),
'help': ("Comma-separated list of the labels"
" to be expanded from a multi-label field.")},
# Multi-label label separator. Separator used when splitting labels in
# the objective field.
'--label-separator': {
'action': 'store',
'dest': 'label_separator',
'default': defaults.get('label_separator', None),
'help': ("Separator used when splitting labels in the"
" objective field.")},
# Multi-label fields. Comma-separated list of fields that should be
# treated as being multi-label fields.
# Either its name or column number.
'--multi-label-fields': {
'action': 'store',
'dest': 'multi_label_fields',
'default': defaults.get('multi_label_fields', None),
'help': ("Comma-separated list of the fields"
" to be expanded as being multi-label. Name"
" or column number.")},
# Label-aggregates. Comma-separated list of aggregation functions
# for the multi-label fields.
'--label-aggregates': {
'action': 'store',
'dest': 'label_aggregates',
'default': defaults.get('label_aggregates', None),
'help': ("Comma-separated list of aggregation functions "
"for the multi-label field labels."
" Allowed aggregates: count, first and last")}}
return options | c4d3eb4b7859980ecbefb9f7d6c46948c398b92a | 30,718 |
def coll_fn_extract(data):
"""
DataLoader装载数据集使用, 判优, 过滤,打包
(
[
"editor 's note : in our behind the scenes series , cnn correspondents share their experiences in covering news and analyze the stories behind the events . here , soledad o'brien takes users inside a jail where many of the inmates are mentally ill .",
"an inmate housed on the `` forgotten floor , '' where many mentally ill inmates are housed in miami before trial .", "miami , florida -lrb- cnn -rrb- -- the ninth floor of the miami-dade pretrial detention facility is dubbed the `` forgotten floor . '' here , inmates with the most severe mental illnesses are incarcerated until they 're ready to appear in court .",
"most often , they face drug charges or charges of assaulting an officer -- charges that judge steven leifman says are usually `` avoidable felonies . '' he says the arrests often result from confrontations with police . mentally ill people often wo n't do what they 're told when police arrive on the scene -- confrontation seems to exacerbate their illness and they become more paranoid , delusional , and less likely to follow directions , according to leifman .",
"so , they end up on the ninth floor severely mentally disturbed , but not getting any real help because they 're in jail .", "we toured the jail with leifman . he is well known in miami as an advocate for justice and the mentally ill . even though we were not exactly welcomed with open arms by the guards , we were given permission to shoot videotape and tour the floor . go inside the ` forgotten floor ' ''",
"at first , it 's hard to determine where the people are . the prisoners are wearing sleeveless robes . imagine cutting holes for arms and feet in a heavy wool sleeping bag -- that 's kind of what they look like . they 're designed to keep the mentally ill patients from injuring themselves . that 's also why they have no shoes , laces or mattresses .",
'leifman says about one-third of all people in miami-dade county jails are mentally ill . so , he says , the sheer volume is overwhelming the system , and the result is what we see on the ninth floor .',
"of course , it is a jail , so it 's not supposed to be warm and comforting , but the lights glare , the cells are tiny and it 's loud . we see two , sometimes three men -- sometimes in the robes , sometimes naked , lying or sitting in their cells .",
"`` i am the son of the president . you need to get me out of here ! '' one man shouts at me .",
'he is absolutely serious , convinced that help is on the way -- if only he could reach the white house .',
"leifman tells me that these prisoner-patients will often circulate through the system , occasionally stabilizing in a mental hospital , only to return to jail to face their charges . it 's brutally unjust , in his mind , and he has become a strong advocate for changing things in miami .",
'over a meal later , we talk about how things got this way for mental patients .',
"leifman says 200 years ago people were considered `` lunatics '' and they were locked up in jails even if they had no charges against them . they were just considered unfit to be in society .",
'over the years , he says , there was some public outcry , and the mentally ill were moved out of jails and into hospitals . but leifman says many of these mental hospitals were so horrible they were shut down .',
'where did the patients go ? nowhere . the streets . they became , in many cases , the homeless , he says . they never got treatment .', 'leifman says in 1955 there were more than half a million people in state mental hospitals , and today that number has been reduced 90 percent , and 40,000 to 50,000 people are in mental hospitals .',
"the judge says he 's working to change this . starting in 2008 , many inmates who would otherwise have been brought to the `` forgotten floor '' will instead be sent to a new mental health facility -- the first step on a journey toward long-term treatment , not just punishment .",
"leifman says it 's not the complete answer , but it 's a start . leifman says the best part is that it 's a win-win solution . the patients win , the families are relieved , and the state saves money by simply not cycling these prisoners through again and again .",
'and , for leifman , justice is served . e-mail to a friend .'],
[1, 3, 9, 11]
)
:param data:
:return:
"""
def is_good_data(d):
""" make sure data is not empty"""
source_sents, extracts = d
return source_sents and extracts
batch = list(filter(is_good_data, data))
assert all(map(is_good_data, batch))
return batch | a7ae18629879552b5e5674e794a05dbeeef8d9e5 | 30,719 |
import os
def mkname(name):
"""
Returns a possible non-conflicting name able to be created in current directory.
"""
num = 0
name = str(name)
new_n = name[:]
old_n = new_n
while os.path.exists(new_n):
old_n = new_n
num += 1
new_n = name + str(num)
return(new_n, old_n) | a75eace5ba25b219d77b25e1755d1eb4b2594b34 | 30,720 |
def token2word_embeddings(data, pooling="max"):
"""Pool subword bert embeddings into word embeddings"""
assert pooling in ["first", "max", "sum", "avg"]
if pooling == "first":
# embeddings (bs, max_n_tokens, h_dim)
embeddings = data["bert_embeddings"]
indices = data["bert_indices"].long().to(embeddings.device)
indices = indices.unsqueeze(-1).repeat(1, 1, embeddings.size(-1))
return embeddings.gather(1, indices)
else:
# embeddings (bs, max_n_tokens, h_dim)
embeddings = data["bert_embeddings"]
# mask (bs, max_n_words, max_n_tokens)
mask = data["bert_alignment"].to(embeddings.device)
# embeddings (bs, max_n_tokens, h_dim) -> (bs, max_n_words, max_n_tokens, h_dim)_
embeddings = embeddings.unsqueeze(1).repeat(1, mask.size(1), 1, 1)
if pooling == "max":
embeddings.masked_fill_((mask == 0).unsqueeze(-1), -1e30)
return embeddings.max(2)[0]
elif pooling == "sum":
embeddings.masked_fill_((mask == 0).unsqueeze(-1), 0)
return embeddings.sum(2)
elif pooling == "avg":
embeddings.masked_fill_((mask == 0).unsqueeze(-1), 0)
return embeddings.mean(2) | 0ef59a11270ae3bb858d43020ac920b3a809bc15 | 30,721 |
import math
def calculate_number_of_bins_half(data):
"""The number of bins will be the half of the data size."""
return int(math.ceil(len(data) / 2.0)) | 5e162a05e9040a37f204b4a4d702c34e50ac9374 | 30,722 |
def get_scope(field):
"""For a single field get the scope variable
Return a tuple with name:scope pairs"""
name = field['name']
if 'scope' in field['field']:
scope = field['field']['scope']
else:
scope = ''
return (name, scope) | 1b931ec1a7c5a629fe6b39034c23fd02568ed5a7 | 30,724 |
import random
def generate_quality():
"""Generate Quality Code."""
quality_codes = ["A01", "A02", "A03", "A04"]
return random.choice(quality_codes) | d3f2eb0253ebb4d3db1c86dbd2685f2e302cbff9 | 30,725 |
import numpy
def error_estimation_simplex(vertex_vector_h, vertex_chi_sq_h, func):
"""
Error estimation.
Calculations according to
ANALYTICAL CHEMISTRY, VOL. 60, NO. 8, APRIL 15, 1988
notations
---------
theta_i = vertex_vector[i, :]
chi_sq_i = vertex_chi_sq[i]
"""
# print("\nvertex_vector")
# print(vertex_vector_h)
# print("\nvertex_chi_sq")
# print(vertex_chi_sq_h)
# temporary solution
k, hh = vertex_vector_h.shape # hh = k-1
theta_0 = vertex_vector_h[0, :]
m_q = numpy.zeros((k-1, k-1))
vertex_vector = numpy.zeros(vertex_vector_h.shape, dtype=float)
vertex_vector[0, :] = theta_0
max_radius = numpy.zeros(k-1, dtype=float)
for i in range(1, k):
theta_i = vertex_vector_h[i, :]
rand_radius = numpy.abs(theta_i-theta_0)
max_radius = numpy.max(numpy.vstack([max_radius, rand_radius]), axis=0)
# print("max_radius ", max_radius)
for i in range(1, k):
radius_h = numpy.zeros(k-1, dtype=float)
radius_h[i-1] = max_radius[i-1]
vertex_vector[i, :] = theta_0+radius_h
l_chi_sq = []
for i in range(0, k):
theta_i = vertex_vector_h[i, :]
chi_sq = func(theta_i)
l_chi_sq.append(chi_sq)
vertex_chi_sq = numpy.array(l_chi_sq, dtype=float)
# print("hh, k: ", hh, k)
# print("theta_0: ", theta_0)
chi_sq_0 = vertex_chi_sq[0]
# print("chi_sq_0: ", chi_sq_0)
v_a = numpy.zeros(k-1)
m_b = numpy.zeros((k-1, k-1))
m_q = numpy.zeros((k-1, k-1))
m_chi_sq_0i = numpy.zeros(k-1)
# print("step 1")
for i in range(1, k):
theta_i = vertex_vector[i, :]
theta_0i = 0.5*(theta_0+theta_i)
chi_sq_0i = func(theta_0i)
# print("ii: {:} {:}".format(i, chi_sq_0i))
m_chi_sq_0i[i-1] = chi_sq_0i
m_q[i-1, :] = theta_i-theta_0
# print("step 2")
for i in range(1, k):
chi_sq_i = vertex_chi_sq[i]
theta_i = vertex_vector[i, :]
chi_sq_0i = m_chi_sq_0i[i-1]
a_i = 4.*chi_sq_0i - chi_sq_i - 3.*chi_sq_0
v_a[i-1] = a_i
b_ii = 2.*(chi_sq_i + chi_sq_0 - 2.*chi_sq_0i)
m_b[i-1, i-1] = b_ii
for j in range(i+1, k):
chi_sq_0j = m_chi_sq_0i[j-1]
theta_j = vertex_vector[j, :]
theta_ij = 0.5*(theta_i+theta_j)
chi_sq_ij = func(theta_ij)
# print("ij: {:} {:} {:}".format(i, j, chi_sq_ij))
b_ij = 2.*(chi_sq_ij + chi_sq_0 - chi_sq_0i - chi_sq_0j)
m_b[i-1, j-1] = b_ij
m_b[j-1, i-1] = b_ij
# print("step 3")
m_ib = numpy.linalg.inv(m_b)
m_qib = numpy.matmul(m_q, m_ib)
v_qiba = numpy.matmul(m_qib, v_a)
# theta_min = theta_0 - v_qiba
m_qibqt = numpy.matmul(m_qib, m_q.transpose())
m_error = 2.*chi_sq_0*m_qibqt
# print("\nm_q")
# print(m_q)
# print("\nm_b")
# print(m_b)
# print("\nm_ib")
# print(m_ib)
# print("\nv_a")
# print(v_a)
# print("\ntheta_min: ", theta_min)
# print("\ntheta_0: ", theta_0)
# print("\nm_error: ", m_error)
# print(50*"*")
return m_error, numpy.abs(v_qiba) | 319803a40639c58183239a24497b38d749f59ebd | 30,726 |
import torch
def quantile_features(density, q_vals):
"""
Input
- density: tensor of shape [n_samples]
- q_vals: list of numbers between 0 and 1 with the quantiles to use
Output
- quartile_sigs: tensor of shape [len(q_vals)]
"""
q_vals = torch.tensor(q_vals, dtype=density.dtype)
quantiles = torch.quantile(density, q_vals)
return quantiles | f720125b43250403a6164b7a94bd97f25cfea422 | 30,727 |
def And(s1, s2):
""" And(s1, s2) returns a new selector that selects a node only if BOTH
s1, and s2 select the node."""
return lambda x: s1(x) and s2(x) | 2de67c6b7109bf6b12c187f85bf8ca4483289156 | 30,728 |
import toml
def load_config_data():
""" Loads all config data from the duck_bot_config.toml file. """
return toml.load("duck_bot_config.toml") | b7cb1c28c5cd8b0399b3f2555ca09a677df7416b | 30,730 |
def get_cell(caves, row, col):
"""Get (row, col) cell in caves."""
return caves[row][col] | 743068c3be8e0e60b56cc8f0c9c99a0cea07e4c2 | 30,731 |
from typing import Callable
from typing import Any
from typing import Iterable
def first_order_frequencies(method: Callable[[list[Any], list[float], int], Iterable[Any]],
iterations: int,
population_size: int,
sample_size: int,
weights: list[float]) -> dict[Any, int]:
"""
Calculates the first order inclusion frequencies for the parameters given and returns it as a dictionary.
"""
if iterations < 1:
raise ValueError("iterations cannot be less than 1")
if sample_size < 1:
raise ValueError("sample_size cannot be less than 1")
if sample_size > population_size:
raise ValueError("sample_size cannot be higher than population_size")
if len(weights) != population_size:
raise ValueError("the weights must be as many as the population elements")
frequencies: dict[Any, int] = {}
for _ in range(iterations):
sample: Iterable[Any] = method(list(range(population_size)), weights, sample_size)
for element in sample:
if element not in frequencies:
frequencies[element] = 0
frequencies[element] += 1
return frequencies | e03af41b83ad80a1f4313975d920c0c5efd815e0 | 30,732 |
def taux_gc(seq):
"""
Le taux de "GC" dans la séquence ADN.
"""
return ((seq.count('G') + seq.count('C')) / len(seq)) * 100 | d517f1b2dcde0ca2bb14648d2c40f58d52b43fec | 30,733 |
def check_within_range(dframe, lower_bound, upper_bound):
"""
Helper function to check if all values in a dataframe
are within given range (values inclusive) lower_bound, upper_bound
"""
out_of_range = 0
for col in dframe.columns:
within_range = dframe[col].between(lower_bound, upper_bound, inclusive=True).count()
if within_range != len(dframe[col]):
out_of_range += 1
if out_of_range == 0:
return True
else:
return False | d81414f7ca11c514dd7348ee8f3013a472df932c | 30,735 |
def namebunch(abunch, aname):
"""give the bunch object a name, if it has a Name field"""
if abunch.Name == None:
pass
else:
abunch.Name = aname
return abunch | d3a32d578ef604760d1f5adb009c96de519f0ec3 | 30,737 |
def get_ranking13():
"""
Return the ranking with ID 13.
"""
return [
("a5", 0.868655),
("a6", 0.846338),
("a4", 0.812076),
("a3", 0.789327),
("a2", 0.718801),
("a1", 0.300742),
] | 8fd076f8d385600e10cc3a4610638a7dc30a0055 | 30,738 |
def convert_bytes_to_bits(byte_value):
""" Convert input bytes to bits """
return byte_value * 8 | e6cda98e84b133dc48a19ebc3e98e79bd577bf47 | 30,740 |
def for_object(permissions, obj):
"""
Only useful in the permission handling. This filter binds a new object to
the permission handler to check for object-level permissions.
"""
# some permission check has failed earlier, so we don't bother trying to
# bind a new object to it.
if permissions == '':
return permissions
return permissions.bind_object(obj) | b843b2a4c2f13bb01d54c52d2bdf63d6047176c2 | 30,742 |
def mode(nums):
"""Return most-common number in list.
For this function, there will always be a single-most-common value;
you do not need to worry about handling cases where more than one item
occurs the same number of times.
>>> mode([1, 2, 1])
1
>>> mode([2, 2, 3, 3, 2])
2
"""
d = {}
for x in nums:
if x in d:
d[x] = d[x] + 1
else:
d[x] = 1
vp = d.items()
x = 0
y = 0
mx = 0
my = 0
for x, y in vp:
if y > my:
mx = x
my = y
return mx | 6421a27a545dcc000923842b88137e46ea0a0a5e | 30,744 |
def _callify(meth):
"""Return method if it is callable,
otherwise return the form's method of the name"""
if callable(meth):
return meth
elif isinstance(meth, str):
return lambda form, *args: getattr(form, meth)(*args) | df0e323d77d1e62cc89bbac84b3cf24a6d2e6f02 | 30,747 |
import numpy
def get_freq_array(bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return numpy.arange(n_chans)*float(bandwidth)/n_chans | 0f3ab851be519498765d7b579f13c44e3d751c3f | 30,752 |
import random
def _drawRandom(nbToDraw, maxValue, exclusion=None):
"""Draws random numbers from 0 to maxValue.
Args:
nbToDraw (int): number of numbers to draw
maxValue (int): max value for the numbers to draw
exclusion (set): numbers to exclude
"""
numbers = set()
while len(numbers) < nbToDraw:
choice = random.randrange(0, maxValue)
if not exclusion or choice not in exclusion:
numbers.add(choice)
return numbers | 24e44dc52cce7722bb1074b747457fc160912664 | 30,753 |
import copy
def _set_logger_name(config_in, package_name):
"""Replace config yml's logger package-name with the real package name"""
config = copy.deepcopy(config_in)
config['loggers'][package_name] = config['loggers'].pop('package-name')
return config | 12b7e33a6655fffd472d19545779d4ee325895cd | 30,757 |
def getPubSubAPSConfiguration(notifierID, config):
"""
Returns the Apple push notification settings specific to the pushKey
"""
try:
protocol, ignored = notifierID
except ValueError:
# id has no protocol, so we can't look up APS config
return None
# If we are directly talking to apple push, advertise those settings
applePushSettings = config.Notifications.Services.APNS
if applePushSettings.Enabled:
settings = {}
settings["APSBundleID"] = applePushSettings[protocol]["Topic"]
if config.EnableSSL or config.BehindTLSProxy:
url = "https://%s:%s/%s" % (
config.ServerHostName, config.SSLPort,
applePushSettings.SubscriptionURL)
else:
url = "http://%s:%s/%s" % (
config.ServerHostName, config.HTTPPort,
applePushSettings.SubscriptionURL)
settings["SubscriptionURL"] = url
settings["SubscriptionRefreshIntervalSeconds"] = applePushSettings.SubscriptionRefreshIntervalSeconds
settings["APSEnvironment"] = applePushSettings.Environment
return settings
return None | bdadd793af1cb17f18401ab1938ac7a7517b67b5 | 30,760 |
def get_bound(atom, bound=None):
"""
Return appropriate `bound` parameter.
"""
if bound is None:
bound = atom.bound
if bound is None:
raise ValueError('either atom must be in bound '
+ 'mode or a keyword "bound" '
+ 'argument must be supplied')
return bound | f9223945011fbc056db170a943cf33fb09662920 | 30,761 |
def finding_gitlab_forks(fork_user):
"""
fork_user: Takes a repository to user_count dictionary map
purpose: calculates how much gitlab forks are there among all the forks
"""
total_forks = 0
gitlab_forks = 0
gitlab_url = []
for fu in fork_user:
total_forks += 1
if 'https://gitlab.com/' in fu:
gitlab_url.append(fu)
gitlab_forks += 1
return total_forks, gitlab_forks, gitlab_url | 192cbc4f4dda4d086ddeb20243f9b3a605edad96 | 30,762 |
import sys
import unicodedata
def normalize_path(path):
"""normalize paths for MacOS (but do nothing on other platforms)"""
# HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
# Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
return unicodedata.normalize('NFD', path) if sys.platform == 'darwin' else path | df3295c47e30b54edb2d74959d8dbaa114aa9773 | 30,763 |
def argmax(d):
"""d (dict)"""
max_value = max(d.values())
max_key = -1
for key, value in d.items():
if value == max_value:
max_key = key
return max_key | a47869475a3369f2f6a6b623533c0875b8062a50 | 30,764 |
import typing
import math
def normalized(x: float, y: float) -> typing.Tuple[float, float]:
"""
Returns a unit vector version of x, y
"""
magnitude = math.hypot(x, y)
if magnitude == 0:
return 0, 0
return x / magnitude, y / magnitude | fe6079a0e1335977fa831b64a340f5e7747d763f | 30,765 |
import math
def solve_tangent_angle(distance, radius):
"""
Helper function to calculate the angle between the
centre of a circle and the tangent point, as seen from
a point a certain distance from the circle.
:Parameters:
distance: float
Distance of point from centre of circle.
radius: float
Radius of circle
:Returns:
tangent_angle: float
The tangent angle in radians, or None if there is
no solution.
"""
sinangle = float(radius) / float(distance)
if abs(sinangle) <= 1.0:
angle = math.asin(sinangle)
else:
angle = None
return angle | 6db4a340f50d0fd426dbae3e2248624cc3c50563 | 30,766 |
def fullname(o):
"""获取对象的类名"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
cls_name = o.__class__.__name__
else:
cls_name = module + '.' + o.__class__.__name__
if cls_name == 'type':
cls_name = o.__base__.__module__ + '.' + o.__base__.__name__
return cls_name | 1b4672add4a4ea8e5e7e9e539c596847e4d04bde | 30,767 |
from typing import OrderedDict
def collect(iterable, key=None, value=None):
"""Collect elements by key, preserving order."""
if key is None:
key = lambda element: element
if value is None:
value = lambda element: element
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), []).append(value(element))
return odict | 94bd28567ed1ef4069fb617e837c4ba02ff24ae4 | 30,768 |
def get_word_list_from_data(text_df):
"""
将数据集中的单词放入到一个列表中
"""
word_list = []
# text_df.iterrows()返回一个列表,包含了所有数据的系你想
# [(行号,内容), (行号,内容), (行号,内容), (行号,内容)......]
for i, r_data in text_df.iterrows():
word_list += r_data['text'].split(' ')
# 包含数据集里所有词语的列表
return word_list | d66a5db21b0cd6e899f562ccb77b28c64ccad69d | 30,769 |
def round_f(d):
"""
取精度
"""
return d | d4622be3edc72f066fb5f3ba8a992fd70278ef68 | 30,770 |
import uuid
def _obtain_signed_blob_storage_urls(self, workspace_id, id_count=1, blob_path=None):
"""Obtain a signed blob storage url.
Returns:
[dict]: blob storage urls
[dict]: blob storage ids
"""
blob_url = f'{self.HOME}/{self.API_1}/project/{workspace_id}/signed_blob_url'
if blob_path:
id_set = {"ids": [f'{blob_path}/{str(uuid.uuid4())}' for i in range(id_count)]}
else:
id_set = {"ids": [str(uuid.uuid4()) for i in range(id_count)]}
response = self._auth_post(blob_url, body=None, json=id_set, return_response=True)
data = response.json()
urls = data
return urls, id_set | e6fa3e492930162ff7963ce0a8aedc2d91bd3583 | 30,773 |
import re
def gettime_s(text):
"""
Parse text and return a time in seconds.
The text is of the format 0h : 0.min:0.0s:0 ms:0us:0 ns.
Spaces are not taken into account and any of the specifiers can be ignored.
"""
pattern = r'([+-]?\d+\.?\d*) ?([mμnsinh]+)'
matches = re.findall(pattern, text)
if len(matches) == 0:
return None
time = 0.
for res in matches:
tmp = float(res[0])
if res[1] == 'ns':
tmp *= 1e-9
elif res[1] == u'\u03BCs':
tmp *= 1e-6
elif res[1] == 'ms':
tmp *= 1e-3
elif res[1] == 'min':
tmp *= 60
elif res[1] == 'h':
tmp *= 3600
time += tmp
return time | 49f315b9f92dc04eea450f7d8b93a7f9bd08da14 | 30,774 |
def convert_openlayers_roi_to_numpy_image_roi(roi: list, image_height: int) -> list:
"""In both openlayers and numpy, the same roi format applies
Args:
roi (list): roi in format [x, y, width, height]
image_height (int): height of the original image from which the roi is cropped
Returns:
list: [description]
"""
[x, y, width, height] = roi
return [x, image_height - y - height, width, height] | 6fe3247b0b1dcc7a9f9da23cbde1e42d71199d88 | 30,775 |
def _adjust_map_extent(extent, relocate=True, scale_ratio=1):
"""
Adjust the extent (left, right, bottom, top) to a new staring point and
new unit. extent values will be divided by the scale_ratio
Example:
if scale_ratio = 1000, and the original extent unit is meter, then the
unit is converted to km, and the extent is divided by 1000
"""
if relocate:
left = 0
right = (extent[1]-extent[0])/scale_ratio
bottom = 0
top = (extent[3]-extent[2])/scale_ratio
else:
left = extent[0]/scale_ratio
right = extent[1]/scale_ratio
bottom = extent[2]/scale_ratio
top = extent[3]/scale_ratio
return (left, right, bottom, top) | ee1d6c4195daab7cc8473b05f334357d25b5b7b5 | 30,776 |
def parseArgs(args, aliases={}):
""" takes all options (anything starting with a -- or a -) from `args`
up until the first non-option.
short options (those with only one dash) will be converted to their long
version if there's a matching key in `shortOpts`. If there isn't, an
KeyError is raised."""
rc = {}
while len(args) > 0 and args[0].startswith('-'):
opt = args.pop(0)
if opt == '--':
break # ignore and exit
# treat options with one dash the same as those with two dashes
if opt.startswith('--'):
opt = opt[2:]
elif opt.startswith('-'):
opt = opt[1:]
equalsPos = opt.find('=')
val = None
if equalsPos >= 0:
val = opt[equalsPos+1:]
opt = opt[:equalsPos]
rc[opt] = val
return rc | f749a9f59834922342efe00be200230fc820bf54 | 30,777 |
def pkcs_unpad_encryption(bytestring, block_length):
"""Takes a PKCS1.5 block and returns the message"""
correct_length_bytestring = bytestring.rjust(block_length, b"\x00")
if correct_length_bytestring[:2] != b"\x00\x02":
raise ValueError(
f"Bytestring isn't PKCS1.5 formatted: {correct_length_bytestring}"
)
null_index = correct_length_bytestring.index(b"\x00", 2)
return correct_length_bytestring[null_index + 1 :] | 2753c4f30df006a88f31aa55b69d95326da8203f | 30,779 |
def fbexp(db, dp, rhog, rhos, umf, us):
"""
Bed expansion factor for calculating expanded bed height of a bubbling
fluidized bed reactor. See equations 14.7 and 14.8 in Souza-Santos [1]_.
Parameters
----------
db : float
Diameter of the bed [m]
dp : float
Diameter of the bed particle [m]
rhog : float
Density of gas [kg/m^3]
rhos : float
Density of bed particle [kg/m^3]
umf : float
Minimum fluidization velocity [m/s]
us : float
Superficial gas velocity [m/s]
Returns
-------
fbx : float
Bed expansion factor [-]
Example
-------
>>> umf = 0.1157
... us = 3.0*umf
... fbexp(0.05232, 0.0004, 0.4413, 2500, 0.1157, us)
1.4864
References
----------
.. [1] Marcio de Souza-Santos. Solid Fuels Combustion and Gasification:
Modeling, Simulation, and Equipment Operations. CRC Press, Taylor and
Francis Group, 2nd edition, 2010.
"""
if db < 0.0635:
# diameter of bed as db < 0.0635 m from Eq 14.7
tm1 = 1.032 * ((us - umf)**0.57) * (rhog**0.083)
tm2 = (rhos**0.166) * (umf**0.063) * (db**0.445)
fbx = 1 + (tm1 / tm2)
else:
# diameter of bed as db >= 0.0635 m from Eq 14.8
tm1 = 14.314 * ((us - umf)**0.738) * (dp**1.006) * (rhos**0.376)
tm2 = (rhog**0.126) * (umf**0.937)
fbx = 1 + (tm1 / tm2)
return fbx | c78b94639f1d6835ee490636e85d49f04b09ebe1 | 30,780 |
import getpass
import os
def get_user_name():
"""
Returns the user name associated with this process, or raises an
exception if it fails to determine the user name.
"""
try:
return getpass.getuser()
except Exception:
pass
try:
usr = os.path.expanduser( '~' )
if usr != '~':
return os.path.basename( usr )
except Exception:
pass
raise Exception( "could not determine the user name of this process" ) | 3a9e66bc4026d7ec41dd6365593e23e234e461ae | 30,781 |
from typing import List
def write_floats_10e(vals: List[float]) -> List[str]:
"""writes a series of Nastran formatted 10.3 floats"""
vals2 = []
for v in vals:
v2 = '%10.3E' % v
if v2 in (' 0.000E+00', '-0.000E+00'):
v2 = ' 0.0'
vals2.append(v2)
return vals2 | 7e2f9b1a9e4560d3d9194c18601d22a57ed0811e | 30,782 |
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g | 0f1981d699c4c80b61d4f0aececa1ccc4601712b | 30,783 |
from typing import Dict
import torch
def inputs_to_cuda(inputs: Dict[str, torch.Tensor]):
"""
Move tensors in the inputs to cuda.
Args:
inputs (dict[str, torch.Tensor]): Inputs dict
Returns:
dict[str, torch.Tensor]: Moved inputs dict
"""
if not torch.cuda.is_available():
return inputs
for key, value in inputs.items():
if isinstance(value, torch.Tensor):
inputs[key] = value.cuda()
return inputs | 1c67e915463ea04b2df03f3697a2eb83dedb07a2 | 30,784 |
def pruneNullRows(df):
"""
Removes rows that are all nulls.
:param pd.DataFrame df:
This is done in place to avoid storage problems with large dataframes.
:return pd.DataFrame:
"""
return df.dropna(axis=0, how='all') | af0a34bed71f937d6ff970f521d5f82720fffdc9 | 30,785 |
def load_ed25519_vectors(vector_data):
"""
djb's ed25519 vectors are structured as a colon delimited array:
0: secret key (32 bytes) + public key (32 bytes)
1: public key (32 bytes)
2: message (0+ bytes)
3: signature + message (64+ bytes)
"""
data = []
for line in vector_data:
secret_key, public_key, message, signature, _ = line.split(':')
secret_key = secret_key[0:64]
signature = signature[0:128]
data.append({
"secret_key": secret_key,
"public_key": public_key,
"message": message,
"signature": signature
})
return data | 618ea06c408d131664bbfe0b4350fee5e6a3edd0 | 30,786 |
from bs4 import BeautifulSoup
def parse_html(html: str) -> BeautifulSoup:
"""Parse the HTML with Beautiful Soup"""
return BeautifulSoup(html, features="html.parser") | 8e10667747f24b9f9790b2b512bc9d5635ec7cd9 | 30,787 |
import hashlib
def convert_email(email):
""" MD5 hash the email address """
email = email.strip().encode('utf-8').lower()
return hashlib.md5(email).hexdigest() | a556147ffb9111b6001c4d76f6cd82c3442e115e | 30,788 |
def get_n_lines(fin: str, size: int = 65536) -> int:
"""Given a filename, return how many lines (i.e. line endings) it has.
:param fin: input file
:param size: size in bytes to use as chunks
:return: number of lines (i.e. line endings) that `fin` has
"""
# borrowed from https://stackoverflow.com/a/9631635/1150683
def blocks(fh):
while True:
b = fh.read(size)
if not b:
break
yield b
with open(str(fin), encoding="utf-8") as fhin:
return sum([bl.count("\n") for bl in blocks(fhin)]) | 0259c71681a9779e3df311ff03010262ded8f058 | 30,790 |
def uniformize_points(p1, p2, p3, p4):
"""
Orders 4 points so their order will be top-left, top-right,
bottom-left, bottom-right.
A point is a list/tuple made of two values.
:param p1:
:param p2:
:param p3:
:param p4:
:return:
"""
pts = [p1, p2, p3, p4]
pts.sort(key=lambda x: x[0] + x[1])
if pts[1][0] < pts[2][0]:
pts[1], pts[2] = pts[2], pts[1]
return pts | d51dd6a3c1524b2efc1df49bd5abdcf9ab0ef7ab | 30,792 |
def correct_text(key, text):
"""
矫正OCR后的文字
:param key:
:param text:
:return:
"""
if key == 'title1':
return text.replace('<>', '').replace('母', '').replace('团', '')
elif key == 'title2':
if text and text[0] == 7:
text = text.replace('7', 'Z')
return text.replace('|', '').replace('乙轴', 'Z轴')
elif key == 'column_name':
return text.replace('+TOL TOL', '+TOL -TOL').replace('特征NOMINAL', '特征 NOMINAL')
else:
return text | a7a28b7d4f9e57e960c59428523dd3006e9050f3 | 30,793 |
import os
def get_download_filename( rule_response ):
"""
Parameters
----------
rule_response : dict
Returns
-------
str
"""
filename = 'download'
if 'name' in rule_response:
filename = rule_response.get( 'name' )
else:
download_path = rule_response.get( 'download' )
filename = os.path.basename( download_path )
return filename | 35b449e02cca22275c55225d2550060ac289d0c5 | 30,794 |
import re
def exact_match(single_line, single_line_compare):
"""
Look for an exact match within the compare parameter
@:return: Boolean value indicating a match was found or not.
"""
# Text could contain characters that are used for regex patterns
pattern = re.escape(single_line)
matches = re.search(pattern, single_line_compare)
if matches:
return True
return False | f4a654ffeca7f4e8fbc96f6097e11a9c7d9ee34b | 30,795 |
def auth_token(pytestconfig):
"""Get API token from command line"""
return pytestconfig.getoption("token") | 419a0e617f242ac9b657b7b397e8b06e447a7efe | 30,796 |
import struct
def msg_request_pack(index, begin, length):
"""<len=0013><id=6><index><begin><length>
b'\x00\x00\x00\r\x06\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00@\x00'
0, 32768, 16384 (5 byte prefix_len + msg_id prepended - not sure if it should be handled here)
"""
return (
bytes([0, 0, 0, 13, 6])
+ struct.pack(">L", index)
+ struct.pack(">L", begin)
+ struct.pack(">L", length)
) | 6346f2b3c9aee678ab5017fd00ad11dba32d3794 | 30,797 |
def conv_params(sz_in: int, sz_out: int):
"""Solves for filter_size, padding and stride per the following equation,
sz_out = (sz_in - filter_size + 2*padding) / stride + 1
Attempts to find a solution by iterating over various filter_size, stride and padding
in that order. If no solution is found, raises an error
"""
filter_size, stride, padding = [3,2,4,5], [2,1,3], [1,2,3]
for f in filter_size:
for s in stride:
for p in padding:
if ((sz_in - f + 2*p) / s + 1) == sz_out:
return (f, s, p)
raise Exception("Unable to find valid parameters for {0} to {1} convolution".format(sz_in, sz_out)) | 86c1a2437231d2fb6515a6581719d3568cdee813 | 30,799 |
def get_model_io_names(model):
"""Gets names of the input and output nodes of the model
Args:
model (keras Model): model to parse
Returns:
inputs (list): names of all the input nodes
outputs (list): names of all the output nodes
"""
num_inputs = len(model.inputs)
num_outputs = len(model.outputs)
inputs = []
outputs = []
for i in range(num_inputs):
nm = model.inputs[i].name.split(':')[0].split('/')[0]
inputs.append(nm)
for i in range(num_outputs):
nm = model.outputs[i].name.split(':')[0].split('/')[0]
outputs.append(nm)
return inputs, outputs | b8bc93bd2bf01597b16eaee5bc0f1a210e185dbe | 30,800 |
def find_package_data():
"""Find abiflows package_data."""
# This is not enough for these things to appear in an sdist.
# We need to muck with the MANIFEST to get this to work
package_data = {'abiflows.fireworks.tasks': ['n1000multiples_primes.json']
}
return package_data | 06549d7b20adce373bdf06dfc82684ec281eb15f | 30,802 |
def pascal(n):
"""
pascal: Method for computing the row in pascal's triangle that
corresponds to the number of bits. This gives us the number of layers
of the landscape and the number of mutants per row.
Parameters
----------
n : int
row of pascal's triangle to compute
Returns
-------
line : list of ints
row n of pascal's triangle
"""
line = [1]
for k in range(n):
line.append(line[k]* (n-k) / (k+1))
return line | 66c6a9946a730186feda3fee04c75dd32d4312ad | 30,805 |
def get_value(input_data, field_name, required=False):
"""
Return an unencoded value from an MMTF data structure.
:param input_data:
:param field_name:
:param required:
:return:
"""
if field_name in input_data:
return input_data[field_name]
elif required:
raise Exception('ERROR: Invalid MMTF File, field: {} is missing!'.format(field_name))
else:
return None | 3e4ec623528f279a61b5ad9897935a5fda8af2d1 | 30,806 |
def seconds_to_string(seconds):
"""
Format a time given in seconds to a string HH:MM:SS. Used for the
'leg time/cum. time' columns of the table view.
"""
hours, seconds = divmod(int(seconds), 3600)
minutes, seconds = divmod(seconds, 60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}" | 23db1370e887a9dad3d6dbd40bc2f25c244f1f77 | 30,807 |
def bateria(pcell_nom, vdc_sist, vdc_bc, nbat_p):
"""
Para calcular la potencia de la batería
vdc_sist: Usually is 12, 24 or 48 V for
large sistems.
"""
nbat_s = vdc_sist/vdc_bc
Pbat_nom = round(nbat_p*nbat_s*pcell_nom,2)
nbat = nbat_s*nbat_p
return Pbat_nom, nbat | 54f8c2ad935c3cbda00dd57c4357d39d4c8b4757 | 30,808 |
def get_slice(img, indices):
"""
return image slice by index array
"""
return img[indices[0]:indices[1], indices[2]:indices[3], indices[4]: indices[5]] | bb2c602d56cc874e0e82c32aacc3b8d5b36d4e35 | 30,810 |
def py2tcl(pystr):
"""
Converts openseespy script to tcl
Returns
-------
"""
# new = '\n'.join(pystr.split()[1:]) # removes the import line
new = pystr.replace('(', ' ')
new = new.replace(')', ' ')
new = new.replace('opy.', '')
new = new.replace(',', '')
new = new.replace("'", '')
new = new.replace('"', '')
# lines = new.splitlines()
# for i in range(len(lines)):
# if 'for i in range(' in lines[i]:
# line = lines.replace('for i in range(', 'for {set i 1} {$i <= num} {incr i 1} {')
return new | 9e5e35d78a03a1c36eb599c5036419fb4e34649e | 30,811 |
def get_min_max(arr):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
arr(list): list of integers containing one or more integers
Returns:
(int, int): A tuple of min and max numbers.
"""
if len(arr) == 0:
return None, None
min_number = max_number = arr[0]
for number in arr:
if number < min_number:
min_number = number
if number > max_number:
max_number = number
return min_number, max_number | d7cd2304092c766bfd0ffcb2235e7ad0c6428e61 | 30,814 |
def size_for(s):
"""
This function takes a string representing an amount of bytes and converts
it into the int corresponding to that many bytes. The string can be a plain
int which gets directly converted to that number of bytes or can end in a specifier
such as 100k. This indicates it is 100 kilobytes and so is translated into 100000
for example
size_for('1000') == 1000
size_for('10m') == 10000000
size_for('1000k') == size_for('1m')
etc.
The valid specifiers are
k = kilo (1000)
m = mega (1000000)
g = giga (1000000000)
"""
s = s.strip()
try:
return int(s)
except ValueError:
pass
d = s[-1]
v = s[:-1]
m = 1
if d == 'k':
m = 1000
elif d == 'm':
m = 1000000
elif d == 'g':
m = 1000000000
return int(v) * m | 3e88f0555f0ab1b06432d87c5ebca7f33b24d1c7 | 30,816 |
def _estimateNormedOutsideTemperature(data):
""" When the normed outside temperature for a given region is'nt known,
this method will estimate it as follows:
- the two day mean temperatures are calculated for
. reference year
. extreme winter year
- the normed outside temperature is the weighted min
of both mean curves
. the reference min is weighted with 0.35
. the winter extreme min is weighted with 0.65
A higher weighting of extreme winter data is used to model the tendency
of oversize the heat systems.
Arguments:
data {pd DataFrame} -- Preprocessed TRY data for
reference and extreme years
Returns:
float - Estimation of normed outside temperature for
given reference weather data
"""
grouped_data = data.groupby('doy')
meanT_ref = grouped_data.mean().reference['T']
meanT_win = grouped_data.mean().winter_extreme['T']
twoDayMeanT_ref = 0.5 * (meanT_ref[1:].values + meanT_ref[:-1].values)
twoDayMeanT_win = 0.5 * (meanT_win[1:].values + meanT_win[:-1].values)
return 0.35 * twoDayMeanT_ref.min() + 0.65 * twoDayMeanT_win.min() | ead03331a33b1c1492ccbff3cf53466686de484c | 30,817 |
import warnings
def igraph_info(g, census=False, fast=False):
"""Return summary dict of igraph properties"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
out = dict()
out["vertices"] = g.vcount()
out["edges"] = g.ecount()
out["density"] = 2*g.ecount()/(g.vcount()*(g.vcount()-1))
out["simple"] = g.is_simple()
out["directed"] = g.is_directed()
components = g.components(mode=1).sizes()
out["weak-components"] = len(components)
out["largest-weak"] = max(components)
if out["directed"]:
components = g.components(mode=2).sizes()
out["strong-components"] = len(components)
out["largest-strong"] = max(components)
if census:
out["dyads"] = g.dyad_census() if g.is_directed() else ()
out["triads"] = g.triad_census() if g.is_directed() else ()
if not fast:
out["diameter"] = g.diameter()
out["global clustering"] = g.transitivity_undirected()
out["local clustering"] = g.transitivity_avglocal_undirected()
return out | cc458be0dc77a6a2b107d7672832d20233842b8d | 30,819 |
def _is_whitenoise_installed() -> bool:
"""
Helper function to check if `whitenoise` is installed.
"""
try:
return True
except ModuleNotFoundError:
pass
return False | 3732d32de4fae1d9f65baeb481c9eb6a6dcdd7bd | 30,820 |
import os
def folder_search(input_folder,file_name):
"""
Scans a folder an returns a list of the paths to files that match the requirements. The matching is litteral.
*Previously named Foldersearch*
Warning:
It is just returning rigidly the files that **exaclty** match the path entered. Prefer re_folder_search for a more flexible scan using regular expressions.
Args:
input_folder (str): Root path from wich to search deeper.
file_name (str): A name of folder or file located in any subfolders (max recursion depth : 1) of the input_folder.
Returns:
NewDirlist (list): List of dirs matching the requirement.
"""
DirList = os.listdir(input_folder)
DirList.append(".")
NewDirlist=[]
for Subdir in DirList:
print(Subdir)
if os.path.exists(os.path.join(input_folder,Subdir,file_name)):
NewDirlist.append(os.path.abspath(os.path.join(input_folder,Subdir,file_name)))
return NewDirlist | 94fc3e47c3b434174e298917b63a197d9c7c7a09 | 30,821 |
def sanitize_version_number(version):
"""Removes common non-numerical characters from version numbers obtained from git tags, such as '_rc', etc."""
if version.startswith('.'):
version = '-1' + version
version = version.replace('_rc', '.')
return version | 4627ce6ad06046b575da3a272e8d8acc41183000 | 30,822 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.