content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def select_color_marker(i):
""" Return index-based marker/color format for plotting """
colors = ['b', 'g', 'r', 'c', 'y', 'k']
style = ['-', '--', '-.', ':']
ci = i % len(colors)
si = (i // len(colors)) % len(style)
return '%s%s' % (colors[ci], style[si]) | 9b06ecc6de31e8c2a0dbf423233640752fe09110 | 39,198 |
import argparse
def setup_train_args():
"""
设置训练参数
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', default=False,
action='store_true', help='不使用GPU进行训练')
parser.add_argument('--model_config', default='config/config.json', type=str, required=False,
help='选择模型参数')
parser.add_argument('--vocab_path', default='vocab/', type=str, required=False, help='选择词库')
parser.add_argument('--train_raw_path',
default='data/convai2/train/data.txt',
type=str, required=False, help='原始训练语料')
parser.add_argument('--train_tokenized_path', default='data/convai2/train/tokenized.txt', type=str,
required=False,
help='将原始训练语料tokenize之后的数据的存放位置')
parser.add_argument('--log_path', default='dialogue_model/convai2/training.log', type=str, required=False, help='训练日志存放位置')
parser.add_argument('--raw', default=False, action='store_true', help='是否对原始训练语料做tokenize。若尚未对原始训练语料进行tokenize,则指定该参数')
parser.add_argument('--epochs', default=12, type=int, required=False, help='训练的轮次')
parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
parser.add_argument('--dialogue_model_output_path', default='dialogue_model/convai2/', type=str, required=False,
help='对话模型输出路径')
parser.add_argument('--pretrained_model', default='model_param/', type=str, required=False, help='预训练的GPT2模型的路径')
parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=1, help="dataloader加载数据时使用的线程数量")
parser.add_argument('--train_mmi', action='store_true', help="若指定该参数,则训练DialoGPT的MMI模型")
parser.add_argument('--train_mmi_tokenized_path', default='data/train_mmi_tokenized.txt', type=str,
required=False,
help='将原始训练语料的每段对话翻转,然后进行tokenize之后的数据的存放位置,用于训练MMI模型')
parser.add_argument('--mmi_model_output_path', default='mmi_model', type=str, required=False, help='MMI模型保存路径')
# parser.add_argument('--max_len', type=int, default=60, help='每个utterance的最大长度,超过指定长度则进行截断')
# parser.add_argument('--max_history_len', type=int, default=5, help="dialogue history的最大长度")
return parser.parse_args() | fc283002e1687a2a6cb0e76e1c0dd93e5441f07c | 39,199 |
import pickle
def get_mct_frame(site):
"""Serves up a dataframe of micro-CT SSA data for a given site"""
# Load a dictionary of dataframes stored as a pickle file
# The pickle file itself is generated by notebook: CheckOutCT.ipynb
frames = pickle.load(open('../data/microCT/processed_mCT.p', 'rb'))
# Select your dataframe from the dictionary by using the site code as the key
mct_df = frames[site]
# Drop duplicate samples that have the same average height
mct_df = mct_df.drop_duplicates(subset='height_ave (cm)',keep="first")
mct_df.set_index('height_ave (cm)', inplace=True)
return mct_df | ace2697ff9833ca0392815e16741b77d416de3ef | 39,200 |
def yield_node_names(nodes):
"""Yield either ``op.name`` or ``str(node)``."""
return (getattr(n, "name", n) for n in nodes) | 05be07232a2b8b18d5b66a06fe7cbf04d7d76808 | 39,202 |
def get_memory_limit(component_limit, overall_limit):
"""
Return the minimum of the component and overall limits or None if neither is set.
"""
limits = [limit for limit in [component_limit, overall_limit] if limit is not None]
return min(limits) if limits else None | 50c8372dca1bacaa3e408abaf5e03659d01e1eea | 39,203 |
def _isQuoted(string, substring, idx):
"""returns True if position i of string is in a quoted region"""
bfr = string[:idx]
aft = string[idx + len(substring):]
if (bfr.count('"') % 2 or aft.count('"') % 2 or
bfr.count("'") % 2 or aft.count("'") % 2):
return True
else:
return False | 2adbc0ca6e61440c5f3e3070d4cfe5cab305d4ec | 39,204 |
def is_tag(t):
"""Is `t` a tag?
"""
return t.strip().startswith('{%') | a7e4f55925844d8e6e881dcad8d20bfc2f5968bf | 39,208 |
import argparse
def parse_args(args):
"""
Uses argparse to return a parser of all given arguments when running KoalaBot.py
:param args: sys.argv[1:]
:return: parsed argparse
"""
parser = argparse.ArgumentParser(description='Start the KoalaBot Discord bot')
parser.add_argument('--config', help="Config & database directory")
args, unknown = parser.parse_known_args(args)
return args | f2efef59864a2f96f93bb4a9a838155b44e3c868 | 39,212 |
import locale
def amountToCurrency(amount):
"""
Shows an amount in the user currency locale or default to `$`
"""
try:
return locale.currency(amount, grouping=True)
except ValueError: # If the locale does not support currency formatting
return '$' + str(round(amount, 2)) | 9ec10e4919fe35d1a2938c15d5a7c0c8ee809a24 | 39,215 |
def strip_library(name):
"""
>>> strip_library("fuchsia.device/MAX_DEVICE_NAME_LEN")
'MAX_DEVICE_NAME_LEN'
>>> strip_library("SomethingGreat")
'SomethingGreat'
"""
return name[name.rfind('/') + 1:] | 608c77cc36dae623c80f791704b1737cfed61390 | 39,216 |
def str_to_list(str, def_val=[]):
"""
try to return a list of some sort
"""
ret_val = def_val
try:
ret_val = str.split(",")
finally:
try:
if ret_val is None:
ret_val = [str]
except:
pass
return ret_val | 41f6771123f0e42b82183f13d6ca0930906d19c5 | 39,217 |
def parse_cdr_annotations_pubtator(entity_type, subset):
"""Get each annotation in the BC5CDR corpus with documents in PubTator format.
Requires:
entity_type: is str, either "Chemical" or "Disease"
subset: is str, either "train", "dev", "test" or "all"
Ensures:
annotations: is dict, each key is document str, values are list with all the annotations in document
"""
corpus_dir = "BioCreative-V-CDR-Corpus/CDR_Data/CDR.Corpus.v010516/"
annotations = dict()
filenames = list()
if subset == "train":
filenames.append("CDR_TrainingSet.PubTator.txt")
elif subset == "dev":
filenames.append("CDR_DevelopmentSet.PubTator.txt")
elif subset == "test":
filenames.append("CDR_TestSet.PubTator.txt")
elif subset == "all":
filenames.append("CDR_TrainingSet.PubTator.txt")
filenames.append("CDR_DevelopmentSet.PubTator.txt")
filenames.append("CDR_TestSet.PubTator.txt")
for filename in filenames:
with open(corpus_dir + filename, 'r') as corpus_file:
data = corpus_file.readlines()
corpus_file.close()
for line in data:
line_data = line.split("\t")
document_id = line_data[0]
if len(line_data) == 6 and line_data[4] == entity_type:
mesh_id = line_data[5].strip("\n")
annotation_text = line_data[3]
annotation = (mesh_id, annotation_text)
if document_id in annotations.keys():
current_values = annotations[document_id]
current_values.append(annotation)
annotations[document_id] = current_values
else:
annotations[document_id] = [annotation]
return annotations | 0476628a01db6cd936132838d025ceb3838a9c30 | 39,219 |
def nan_to_str(x) -> str:
"""Change from np.nan to a str of choice."""
if isinstance(x, str):
return x
else:
return 'Not Spell' | 6906fa894e18b631f441a68fcbb841a6e957f232 | 39,221 |
import copy
def deeplist(x: list) -> list:
"""
Deep copy a list. This is needed because list() by itself only makes a shallow copy.
See https://stackoverflow.com/questions/5105517/deep-copy-of-a-dict-in-python
Convenience function.
:param x: List to copy
:return: Deep copy of the list provided by copy.deepcopy().
"""
return copy.deepcopy(x) | c4161510ddf150e7b57c4d9681a8221b8325b312 | 39,222 |
import time
def QA_util_date_stamp(date):
"""
explanation:
转换日期时间字符串为浮点数的时间戳
params:
* date->
含义: 日期时间
类型: str
参数支持: []
return:
time
"""
datestr = str(date)[0:10]
date = time.mktime(time.strptime(datestr, '%Y-%m-%d'))
return date | fe099714e023bad48d104b9ba5dff704f76faf10 | 39,223 |
def get_layer_index(model, name):
"""Get index of layer by name"""
for idx, layer in enumerate(model.layers):
if layer.name == name:
return idx | 5c57fad4cbb28ab8b6605be669e4cb75024ee977 | 39,224 |
import os
import json
def get_abi_json():
"""
Returns FITCOIN ERC20 token ABI
:return:
"""
root_dir = os.path.dirname(os.path.abspath(__file__))
abi_path = os.path.join(root_dir, 'erc20', 'abi.json')
with open(abi_path) as f:
fitcoin = json.load(f)
return fitcoin | 9409dfbf10e8a36f2222eb99e2a64639ba6c0779 | 39,225 |
import gzip
def encode_gzip(data, compresslevel=6):
"""Encode the passed in data with gzip."""
return gzip.compress(data, compresslevel=compresslevel) | 2693956c15924194e1151d2c04eadf97167cc08b | 39,226 |
def isImage(url):
"""Check if url is related to an image
Args:
url (string): string
Returns:
boolean: return true if url is referring to an image
"""
return \
url.endswith(".png") or \
url.endswith(".jpg") or \
url.endswith(".jpeg") or \
url.endswith(".svg") | f55b0ba05fa115b8a7d8d85219bd05e465b18854 | 39,228 |
def _get_separator(num, sep_title, sep_character, sep_length):
"""Get a row separator for row *num*."""
left_divider_length = right_divider_length = sep_length
if isinstance(sep_length, tuple):
left_divider_length, right_divider_length = sep_length
left_divider = sep_character * left_divider_length
right_divider = sep_character * right_divider_length
title = sep_title.format(n=num + 1)
return "{left_divider}[ {title} ]{right_divider}\n".format(
left_divider=left_divider, right_divider=right_divider, title=title
) | 0e10658e11580264a7722f59390a9dfcfaf0a71b | 39,229 |
def parse_pages(pages):
"""
Give a string possibly containing a start and end page, return the start and end page if any
:param pages:
:return: list with start and end pages
"""
if '-' in pages:
k = pages.find('-')
start = pages[0:k]
end = pages[k + 1:]
else:
start = pages
end = ''
return [start, end] | 4ccf0dd8409d50c89dde3951eadd679e3009ffd8 | 39,230 |
import os
def create_adapter_code_dict():
"""Turn TSV-file to dict.
Read tab seperated file with numbered seqI7 and seqI5.
Return dict with sequence as key and number as value.
"""
basedir = os.path.dirname(os.path.abspath(__file__))
adapterdict = dict()
with open(os.path.join(basedir, 'docs/adaptercodes.txt')) as f:
for line in f:
if not line.split():
continue
else:
code, sequence = line.strip().split('\t')
adapterdict[sequence] = code
return adapterdict | 9f0842ea71658880cf024e24ab5faa40c2b16e9d | 39,231 |
import json
def jchars(*args):
"""
.. function:: jletters(text) -> character jpack
Splits an input text into its composing characters.
Examples:
>>> sql("select jchars('this is a text')")
jchars('this is a text')
---------------------------------------------------------
["t","h","i","s"," ","i","s"," ","a"," ","t","e","x","t"]
>>> sql("select jchars('another', 'text')")
jchars('another', 'text')
---------------------------------------------
["a","n","o","t","h","e","r","t","e","x","t"]
"""
output = []
for i in args:
output+=list(i)
return json.dumps(output, separators=(',',':'), ensure_ascii=False) | 63e2d47861f82f1b01cd9db58c91e7f54d95e81b | 39,233 |
def asymmetric_extend(q1, q2, extend_fn, backward=False):
"""directional extend_fn
"""
if backward:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2) | e91189299fb99096d05193b7c66c7a39803de053 | 39,235 |
def last_chars(sort_table):
"""Append end marker and generate rotation table."""
return ''.join([s[-1] for s in sort_table]) | 59e3162dd1cd9f090c4c3b1a2bce58d93595a953 | 39,236 |
from datetime import datetime
def data_extenso() -> str:
"""
transforma a data de numeral para extenso
:return: str
"""
mes = 'janeiro', 'fevereiro', 'marco', 'abril', 'maio', 'junho',\
'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro'
return f'Colatina-ES, {datetime.today().day} de {mes[datetime.today().month -1]} de {datetime.today().year}' | 7937429571a5a512cc3669152d9761aadad46b40 | 39,237 |
def _ll_subvoxel_overlap(xs, x1, x2):
"""For an interval [x1, x2], return the index of the lower limit of the
overlapping subvoxels whose borders are defined by the elements of xs."""
xmin = min(x1, x2)
if xmin <= xs[0]:
return 0
elif xmin >= xs[-1]:
ll = len(xs) - 1
return ll
else:
for i, x in enumerate(xs):
if x > xmin:
ll = i - 1
return ll
ll = 0
return ll | 5563642767d626f9bc516b90930432b2d5692442 | 39,238 |
from typing import Mapping
from typing import Sequence
from typing import Any
def to_key_value(data: Mapping) -> Sequence[Mapping[str, Any]]:
"""
converting dict to a form that will be used in visualizer (form: {"key": "name", "value", "hello"})
:param data: data to be converted
:return: converted data
"""
ret = []
for key, value in data.items():
ret.append({
"key": key,
"value": value
})
return ret | ea2e96dd7c3493cee80e71e8eb6320fe16e5a244 | 39,239 |
def _end_format(header, ender, dat_str):
""" Write a block with an end
"""
return (
header + '\n' +
dat_str + '\n' +
ender
) | 3676a7fe25e41f021fea4bc2e5debc99b9b3c771 | 39,240 |
import json
def _read_notebook_data_dict(notebook_path: str) -> dict:
"""
Read a dictionary of notebook data.
Parameters
----------
notebook_path : str
Path of target notebook.
Returns
-------
notebook_data_dict : dict
A dictionary of notebook data.
"""
with open(notebook_path, 'r') as f:
notebook_data_str: str = f.read()
notebook_data_dict: dict = json.loads(notebook_data_str)
return notebook_data_dict | c74fabb3ad1ff7d0e5d002791b1aef08a353199a | 39,241 |
import itertools
def _binary_count(n):
"""Count `n` binary digits from [0...0] to [1...1]."""
return list(itertools.product([0, 1], repeat=n)) | 0b21fc49763a7c09bd1ac84c4c823a0239a31db9 | 39,246 |
def bdev_compress_get_orphans(client, name=None):
"""Get a list of comp bdevs that do not have a pmem file (aka orphaned).
Args:
name: comp bdev name to query (optional; if omitted, query all comp bdevs)
Returns:
List of comp bdev names.
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_compress_get_orphans', params) | 4723929303c27388870ed7d9a2339e7e832b41d1 | 39,248 |
import json
def message_to_json(message):
"""
This function tranforms the string message to a json
string, this is to make all REST responses
to be in JSON format and easier to implement
in a consistent way.
"""
#if message is alreay in json then do not do anything
mesage_dict = {}
try:
message_dict = json.loads(message)
except ValueError:
message_dict = {
"msg": message
}
return json.dumps(message_dict) | bf20d028068d2716c5b40807e6b17a6ffb8b1073 | 39,249 |
import networkx
def graph_search(nx_graph, target_species):
"""Search nodal graph and generate list of species to remove
Parameters
----------
nx_graph : obj
networkx graph object of solution
target_species : list
List of target species to search from
Returns
-------
essential_nodes : str
String containing names of essential species
"""
if len(target_species) > 1:
essential_nodes = list()
for target in target_species:
essential = list(networkx.dfs_preorder_nodes(nx_graph, target))
for sp in essential:
if sp not in essential_nodes:
essential_nodes.append(sp)
else:
essential_nodes = list(
networkx.dfs_preorder_nodes(nx_graph, target_species[0]))
return essential_nodes | 0b5d41bb0b87ca0835984465b1cbfadbb7bedbc8 | 39,250 |
def recursive_replace(steps: int, to_expand: str, rules: dict) -> str:
""" Replace the given string with a new replacement string, according to the rules.
Args:
steps (int): How many iterations. Decremented with each recursion. Recursion ends when step=0.
input (str): Input str. The str to be replaced at this recursion depth
rules (dict): Map of XY: Z, such that XY becomes XZY """
res = to_expand # E.g. NNCB first iteration, NCN on second iteration, NBC on third...
chars_inserted = 0 # This grows as we insert horizontally, to allow indexing
for i in range(len(to_expand)-1): # sliding window of 2 chars; stop at len-1
pair = to_expand[i:i+2] # E.g. CB
if pair in rules: # if this pair has a valid replacement str
replacement = pair[0] + rules[pair] + pair[1] # E.g. CH -> CBH
insertion_point = i + chars_inserted
if steps > 1: # Deeper recursions to go
# Now recurse into the pair we've just expanded
replacement = recursive_replace(steps-1, replacement, rules)
res = res[:insertion_point] + replacement + res[insertion_point+2:]
# Because replacement is recursive, XY could be replaced by a long str
chars_inserted += len(replacement)-len(pair)
return res | 2104ccfbc89d60aa09235a2933aee0bf15dea0a3 | 39,251 |
def set_override_certificate_errors(override: bool) -> dict:
"""Enable/disable overriding certificate errors. If enabled, all certificate error events need to
be handled by the DevTools client and should be answered with `handleCertificateError` commands.
Parameters
----------
override: bool
If true, certificate errors will be overridden.
"""
return {
"method": "Security.setOverrideCertificateErrors",
"params": {"override": override},
} | 7dbf9d025ae80139befa1f32323f2d95327cecf3 | 39,253 |
def nonzeros(u):
"""Return number of non-zero items in list `u`."""
return len([val for val in u if val != 0]) | 77180e06c9e82bcb4ca19a289514a92334ad62cd | 39,254 |
def match_fields(exp_fields, fields):
"""
Check field names and values match the expected ones.
- exp_fields:
A list of dictionaries with field name/value pairs.
- fields:
SPARKL event fields as returned by the listener.
[
{'attr': {'name':'n', 'value':3}},
{'attr': {'name': 'div', 'value':2}}]
"""
# Reformat event fields to contain only name and value. If value is not
# given (in case of FLAG fields), value is None.
fields = [{field['attr']['name']: field['attr']['value']}
if 'value' in field['attr']
else {field['attr']['name']: None}
for field in fields]
return exp_fields == fields | 1a260f344ca42c480069b6951e037320fb6a63aa | 39,255 |
def address_domain(address):
"""
取得 email 地址的域名
"""
return address.split('@')[-1] | 7d98b5a0649e0e90586b330eceec41634ea490f8 | 39,256 |
def makeFooter(namestub, path, verbose):
"""Print the footer."""
linestring=str("(define-layer! -1 \
(cons 'filename \"%s/\")\
(cons 'visible #f)\
(cons 'color #(0 0 0)))\n" % path )
linestring+=str("(set-render-type! 3)\n")
return linestring | 18eb7f2efd4e3eef81aa95236cf44613e28ea757 | 39,257 |
import os
def tar(path):
"""
Tar the path and write output to stdout.
:param path: All contents under path are 'tar'ed.
"""
if not os.path.exists(path):
raise ValueError("Invalid argument: 'path' doesn't exist")
path = path.rstrip(os.sep)
parent, base = os.path.split(path)
return "tar -C %s %s" % (parent, base) | 120200b4d4a7ffb9b74dd8b461fb6e25f1f2d609 | 39,258 |
def merge_xml(xmls,output_file):
"""
merge xml files
Parameters:
-----------
xmls: list
List of paths of the xml files
output_file: str
Path of the merged xml
"""
if len(xmls) <2 :
raise Exception("Need two or more xml files to merge")
xmls = " ".join(xmls)
msg = f"scxmlmerge {xmls} > {output_file}"
# os.system(msg)
return msg | b574f3edb777f4a48f5209173baf7f74a465377e | 39,261 |
def archivo_valido(archivo, archivo_dict):
"""Confirma que el archivo exista y tenga notas escritas
Recibe el archivo CSV
Devuelve booleano"""
if archivo_dict != {}:
return True
print("El archivo no existe o está vacío")
return False | 6c4114963686232b1ad109616e16eb94621f6c25 | 39,263 |
def isNewPhase(ds1, ds2):
"""
Check if two dynamicsState have the same contacts
:param ds1:
:param ds2:
:return: True if they have the same contacts, False otherwise
"""
assert ds1.effNum() == ds2.effNum(), "The two dynamic states do not comes from the same model."
for i in range(ds1.effNum()):
if ds1.effActivation(i) != ds2.effActivation(i):
return True
return False | b6bf21106024991256a3a53b887bf73f83e7c037 | 39,264 |
def add_orphan_settings_to_tool_dependencies( tool_dependencies, orphan_tool_dependencies ):
"""Inspect all received tool dependencies and label those that are orphans within the repository."""
orphan_env_dependencies = orphan_tool_dependencies.get( 'set_environment', None )
new_tool_dependencies = {}
if tool_dependencies:
for td_key, requirements_dict in tool_dependencies.items():
if td_key in [ 'set_environment' ]:
# "set_environment": [{"name": "R_SCRIPT_PATH", "type": "set_environment"}]
if orphan_env_dependencies:
new_set_environment_dict_list = []
for set_environment_dict in requirements_dict:
if set_environment_dict in orphan_env_dependencies:
set_environment_dict[ 'is_orphan' ] = True
else:
set_environment_dict[ 'is_orphan' ] = False
new_set_environment_dict_list.append( set_environment_dict )
new_tool_dependencies[ td_key ] = new_set_environment_dict_list
else:
new_tool_dependencies[ td_key ] = requirements_dict
else:
# {"R/2.15.1": {"name": "R", "readme": "some string", "type": "package", "version": "2.15.1"}
if td_key in orphan_tool_dependencies:
requirements_dict[ 'is_orphan' ] = True
else:
requirements_dict[ 'is_orphan' ] = False
new_tool_dependencies[ td_key ] = requirements_dict
return new_tool_dependencies | ee2aea9ee8b38643eada77dfc1c03a399797c4af | 39,265 |
def create_events_model(areas, virus_states):
"""Create events for the model.
Parameters
----------
virus_states : list of strings
List containing the names of all virus variants.
Returns
-------
events: dict
Dictionary that contains the event names as keys
and dicitonaries that contain the event ids, and formulas
as values.
"""
events = {}
for index1 in areas:
for index2 in virus_states:
keys = f"event_{index1}_{index2}"
assignee = f"infectious_{index1}_vac0_{index2}"
trigger_time_par = f"{index2}_{index1}_appears_time"
trigger_quantity = f"{index2}_{index1}_appears_quantity"
events[keys] = {
"trigger_formula": f"geq(time, {trigger_time_par})",
"assignee_id": assignee,
"assign_formula": trigger_quantity,
}
return events | b1e8394f2a57e89372844cbb2b456d702d7b5c59 | 39,266 |
def _escape_cmd_arg(arg):
"""quote/escape and argument for a command line call so that it can
be safely used even if it has special charaters"""
arg = str(arg)
if ' ' in arg or '"' in arg:
return '"' + arg.replace('"', '""') + '"'
return arg | 502b0d3435cbed4e9601a407946479500d56455f | 39,267 |
import os
def _rrd_exists(self):
"""
.. versionadded:: 0.2
:returns: True if the RRD file already exists, False otherwise
You can also use a RRD object directly for comparision in
boolean expression, to check whether the RRD file exists
or not. Thus ``MyRRD("my.rrd").exists() == MyRRD("my.rrd")``.
"""
return os.path.isfile(self.filename) | a928f75a660bb267ccd18fa756ab36f066b0461b | 39,268 |
def _generate_math_operator(operator_name):
"""Helper function for _Vector operators."""
def operator(self, other):
return type(self)(self._operator(operator_name, other))
return operator | aa722432092092e6599c4c9d08ca0803a5c6e4ae | 39,269 |
def pad(text, bits=32):
"""
Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str>
"""
return text + (bits - len(text) % bits) * chr(bits - len(text) % bits) | 5b7b77bc6fef64121cf3fb0c127a83ddabf5dd11 | 39,271 |
def build_hexagonal_position(index_matrix):
"""Computes the position of the pixels in the hexagonal grid from the index matrix.
Args:
index_matrix (tensor): The index matrix representing the index of each pixel in the axial addressing system.
"""
pix_positions = []
for i in range(index_matrix.shape[0]):
for j in range(index_matrix.shape[1]):
if not index_matrix[i, j] == -1:
pix_positions.append([j - i/2, -i])
return pix_positions | 1b177c455b9945fb7d4b60764436c416a3e55503 | 39,272 |
def setup_dims(dataset, output_global_dim=None):
"""Extract dimes from dataset
"""
input_sample = dataset[0]['graph_input']
output_sample = dataset[0]['graph_target']
input_node_dim = input_sample['nodes'].shape[-1]
input_edge_dim = input_sample['edges'].shape[-1]
if input_sample['globals'] is not None:
input_global_dim = input_sample['globals'].shape[-1]
else:
input_global_dim = 0
output_node_dim = output_sample['nodes'].shape[-1]
output_edge_dim = output_sample['edges'].shape[-1]
if output_global_dim is None:
if output_sample['globals'] is not None:
output_global_dim = output_sample['globals'].shape[-1]
else:
output_global_dim = 0
return [input_node_dim, input_edge_dim, input_global_dim,
output_node_dim, output_edge_dim, output_global_dim] | 12cb66d71bb7ea39410283d223675d8fa2c3f937 | 39,274 |
import subprocess
import sys
def hoa_to_dot(hoa):
"""
Converts an HOA automaton into its DOT representation.
Works only for nondeterministic automata.
"""
autfilt = subprocess.Popen(['autfilt', '--dot'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = autfilt.communicate(hoa.encode('utf-8'))
if stderr:
print("Calling 'autfilt' for the conversion to DOT produced the message:\n"
+ stderr.decode('utf-8'), file=sys.stderr)
ret = autfilt.wait()
if ret:
raise subprocess.CalledProcessError(ret, 'autfilt')
return stdout.decode('utf-8') | 0c87730e6fc91a520501c33e517962b75606eced | 39,275 |
def is_valid(box):
"""Check that a bounding box has valid coordinates"""
return (box[..., 2] > box[..., 0]) and (box[..., 3] > box[..., 1]) | ca622196ac6710494fc052682dca5c2dde4af4ee | 39,277 |
from typing import OrderedDict
def parse_authors(file):
"""Parse authors TSV file"""
authors = OrderedDict()
metadata = OrderedDict()
for line in file:
line_split = line.rstrip().split("\t")
author, affiliations = line_split[0:2]
mdata = line_split[2:]
affiliations = affiliations.split(",")
authors[author] = affiliations
metadata[author] = mdata
return authors, metadata | e08e3815514b7aa02a74f1078b6377616fbaf863 | 39,279 |
def mask_last_dim(tensor, binary_mask):
"""Pick the elements of tensor in the last dimension according to binary_mask."""
return tensor[..., 0] * binary_mask + tensor[..., 1] * (1 - binary_mask) | ca1b40d8b90184c18979443483361987e49ec370 | 39,280 |
def minimum(ints):
"""
Return the minimum in a list of integers. If the list is empty,
return None.
"""
if ints == ():
return None
else:
head, tail = ints
min = minimum(tail)
if min is None or head <= min:
return head
else:
return min | ff4e4eab86e2efa0a01b7e254e7d10556284a6d3 | 39,281 |
def clean_name(name: str):
"""Clean name by stripping whitespace and newline."""
return name.splitlines()[0].strip() | 1df5c654bc52ecbe33b98fe9a32eb812abec4e0f | 39,282 |
def lib_version_satisfied(current_ver: str, mini_ver_limited: str,
newest_ver_limited: str = ""):
"""
Check python lib version whether is satisfied.
Notes:
Version number must be format of x.x.x, e.g. 1.1.0.
Args:
current_ver (str): Current lib version.
mini_ver_limited (str): Mini lib version.
newest_ver_limited (str): Newest lib version.
Returns:
bool, true or false.
"""
required_version_number_len = 3
if len(list(current_ver.split("."))) != required_version_number_len or \
len(list(mini_ver_limited.split("."))) != required_version_number_len or \
(newest_ver_limited and len(newest_ver_limited.split(".")) != required_version_number_len):
raise ValueError("Version number must be format of x.x.x.")
if current_ver < mini_ver_limited or (newest_ver_limited and current_ver > newest_ver_limited):
return False
return True | 9afb7d30ecfea1ac7067f48d61bae1e5f7d7bd12 | 39,283 |
import os
def get_free_disk_space(p):
"""
Returns the number of free bytes on the drive that ``p`` is on
"""
s = os.statvfs(p)
return s.f_frsize * s.f_bavail | b3debb923591194752cf3f2398d192a5e4e75e65 | 39,284 |
def datetime_to_list(date):
"""
convert a datetime object into a list [year,month,day,hour,minute,second]
Arguments
---------
date: datetime object
"""
return [date.year,date.month,date.day,date.hour,date.minute,date.second] | 5a581680793172bede720aa9c144203e843beb31 | 39,286 |
def get_profitrate_log(fpath):
"""ログから利益率取得
"""
result_dict = {}
with open(fpath, 'r', encoding='utf_8') as f:
lines = f.readlines()
for line in lines:
parse_line = line.split(':INFO:')
key = parse_line[0]
value = parse_line[1].replace('利益率=', '').replace('\n', '')
if parse_line[1].find("利益率") >= 0:
result_dict[key] = value
return result_dict | 57327413e5af5261a39799c9c4842e6adb6bf52a | 39,287 |
def ignore_formatter(error):
"""
Formatter that emits nothing, regardless of the error.
"""
return '' | 1d2d3b145e43d9d5840ad5dc3851331d5d67a23d | 39,288 |
from typing import OrderedDict
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1.0
median = 2.5
max = 4.0
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, str):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append('--')
else:
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func | a6a128c0b85dd018abe0dec6c5101ce31b3feed1 | 39,289 |
import six
def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items())) | 4b953a42b714f9729ae3ca41c413b295f012d72e | 39,292 |
import os
import pickle
def load_char_mapping():
""" Loads the character mapping object """
fpath = os.path.join("processed", "unq_chars.pkl")
with open(fpath, "rb") as fin:
ret = pickle.load(fin)
return ret | 11ee482bf84029a93784dffa2c8015d5dc5e1431 | 39,293 |
import argparse
def chkint(number):
"""
Sanity check integer to use as a depth level for creating test directories.
"""
try:
number = int(number)
except ValueError:
msg = "{} is not a valid integer.".format(number)
raise argparse.ArgumentTypeError(msg)
if number < 1:
msg = "{} is less than 1!".format(number)
raise argparse.ArgumentTypeError(msg)
if number > 15:
msg = "{} is too large... 15 MAX!".format(number)
raise argparse.ArgumentTypeError(msg)
return number | 1f4237fba44b09d081d23b25bce131de38f98bef | 39,296 |
import torch
def get_means(tensors_list):
"""
Calculate the mean of a list of tensors for each tensor in the list. In our case the list typically contains
a tensor for each class, such as the per class z values.
Parameters:
tensors_list (list): List of Tensors
Returns:
list: List of Tensors containing mean vectors
"""
means = []
for i in range(len(tensors_list)):
if isinstance(tensors_list[i], torch.Tensor):
means.append(torch.mean(tensors_list[i], dim=0))
else:
means.append([])
return means | b99b0dc2f0ab19c5ae55170d59b69b6f714f3db2 | 39,297 |
def deltatime_format(a, b):
""" Compute and format the time elapsed between two points in time.
Args:
a Earlier point-in-time
b Later point-in-time
Returns:
Elapsed time integer (in s),
Formatted elapsed time string (human-readable way)
"""
# Elapsed time (in seconds)
t = b - a
# Elapsed time (formatted)
d = t
s = d % 60
d //= 60
m = d % 60
d //= 60
h = d % 24
d //= 24
# Return elapsed time
return t, f"{d} day(s), {h} hour(s), {m} min(s), {s} sec(s)" | 0478dd50d7d8e4673058b4096cb0247352a80f6f | 39,299 |
import os
def gen_clf_output(output, classifier_type, depth):
"""
generate path to output clf pickl based on output_csv and depth of the tree
"""
out_pkl = os.path.join(
os.path.dirname(output),
os.path.basename(output).replace('.csv', f'.{classifier_type}.tree_max_depth_{depth}.pkl')
)
return out_pkl | acb8ac40f4673c766e90c65d2db1f2ea12b0dcc2 | 39,300 |
def pks_from_iterable(iterable, unique_output=False):
"""
Return pks list based on iterable
:param iterable: list of django model objects OR django queryset
:param unique_output: if True returned list will be unique
:return: list of int
"""
pks = list()
for obj in iterable:
try:
pks.append(int(getattr(obj, 'pk', obj)))
except (TypeError, ValueError):
raise TypeError("Iterable %s is not any of Queryset, list with django Model objects or ints" % iterable)
return list(set(pks)) if unique_output else pks | 7112b2da95d09fb7fc54626b0aa374d3304af87d | 39,302 |
import hashlib
def sha1_hash_from_text(text: str) -> str:
"""Return sha1 hex digest as string for text.
Parameters
----------
text: str
The text to be hashed
Returns
-------
str
the hash of the text
"""
return hashlib.sha1(text.encode()).hexdigest() | 999a00131adbc207af990a80404887694845da86 | 39,303 |
import tempfile
import os
import shutil
def model_to_bytes(model):
"""
Serialize the Keras model to HDF5 and load the file as bytes.
This saves the Keras model to a temp file as an intermediate step.
:return: str containing the model data
"""
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, "model.h5")
try:
model.save(temp_path)
with open(temp_path, mode='rb') as fin:
file_bytes = fin.read()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
return file_bytes | fa4c5e6c49fe9690ca49b4ce52a9ccab009909c2 | 39,304 |
import click
def os_options(f):
"""Aggregate multiple common options into one.
This decorator should be used by CLI commands that need an
Openstack client."""
f = click.option('--os-username', help='Openstack Username', required=True,
envvar='OS_USERNAME')(f)
f = click.option('--os-password', help='Openstack Password', required=True,
envvar='OS_PASSWORD')(f)
f = click.option('--os-project-id', help='Openstack Project ID',
required=True, envvar='OS_TENANT_NAME')(f)
f = click.option('--os-auth-url', help='Keystone auth URL',
envvar='OS_AUTH_URL')(f)
f = click.option('--os-region-name', help='Keystone region name',
default='RegionOne', envvar='OS_REGION_NAME')(f)
f = click.option('--os-service-type',
help='Defaults to env[OS_NETWORK_SERVICE_TYPE] '
'or network.',
default='network', envvar='OS_NETWORK_SERVICE_TYPE')(f)
f = click.option('--os-endpoint-type', envvar='OS_ENDPOINT_TYPE',
default='public',
help='Defaults to env[OS_ENDPOINT_TYPE] or public.')(f)
f = click.option('--os-tenant-name', envvar='OS_TENANT_NAME',
help='Authentication tenant name, defaults to'
'env[OS_TENANT_NAME].')(f)
f = click.option('--os-project-name', envvar='OS_PROJECT_NAME',
help='Another way to specify tenant name. '
'This option is mutually exclusive with '
'--os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].')(f)
f = click.option('--os-tenant-id', envvar='OS_TENANT_ID', default='',
help='Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].')(f)
f = click.option('--insecure', default=False,
envvar='NEUTRONCLIENT_INSECURE',
help="Explicitly allow neutronclient to perform "
"\"insecure\" SSL (https) requests. The server's "
"certificate will not be verified against any "
"certificate authorities. This option should be "
"used with caution.")(f)
f = click.option('--os-token', envvar='OS_TOKEN', default='',
help='Authentication token, defaults to '
'env[OS_TOKEN].')(f)
f = click.option('--os-url', envvar='OS_URL', default='',
help='Defaults to env[OS_URL].')(f)
f = click.option('--os-key', envvar='OS_KEY', default='',
help="Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY].")(f)
f = click.option('--os-project-domain-id',
envvar='OS_PROJECT_DOMAIN_ID', default='',
help='Defaults to env[OS_PROJECT_DOMAIN_ID].')(f)
f = click.option('--os-project-domain-name',
envvar='OS_PROJECT_DOMAIN_NAME', default='',
help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')(f)
f = click.option('--os-cert', envvar='OS_CERT', default='',
help="Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT].")(f)
f = click.option('--os-cacert', envvar='OS_CACERT',
help="Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT].")(f)
f = click.option('--os-user-domain-name', envvar='OS_USER_DOMAIN_NAME',
default='',
help='OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].')(f)
f = click.option('--os-user-domain-id', envvar='OS_USER_DOMAIN_ID',
default='',
help='OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].')(f)
f = click.option('--os-user-id', envvar='OS_USER_ID', default='',
help='Authentication user ID (Env: OS_USER_ID)')(f)
f = click.option('--http-timeout', envvar='OS_NETWORK_TIMEOUT',
default=None, type=click.FLOAT,
help='Timeout in seconds to wait for an HTTP response. '
'Defaults to env[OS_NETWORK_TIMEOUT] or None if not '
'specified.')(f)
f = click.option('--os-cloud', envvar='OS_CLOUD', default=None,
help='Defaults to env[OS_CLOUD].')(f)
return f | f38a646a45055d4b23e22d887b9d703fb804c868 | 39,305 |
import re
def match_skills(skills, ad):
"""
Find elements in a text through regex
:param skills: Set
:param ad: String
:return: Print item, start position, end position
"""
set_to_print = set()
for skill in skills:
my_regex = r"(^|\s|/|-)" + re.escape(skill) + r"(or|ar|er|r|n|en|et)?(\s|/|:|\.|-|,|'|$)"
for hit in re.finditer(my_regex, ad):
start = hit.start()
end = hit.end()
set_to_print.add((skill, start, end))
return set_to_print | 67599019fb63e451dfb2deb68fd3821657148b30 | 39,306 |
def is_ob_site_html(html):
"""Check if some HTML looks like it is from the overcomingbias site.
Parameters
----------
html : bs4.BeautifulSoup
An HTML page, possibly from the overcomingbias site.
Returns
-------
is_ob_site_html : bool
True if the input HTML "looks like" it is from the
overcomingbias site, and False otherwise.
"""
site_title = html.find(id="site-title")
if site_title is not None:
return "www.overcomingbias.com" in site_title.a["href"]
return False | ae3e21320858044772532e74fe94d0cfd0e1cb1b | 39,307 |
import os
def get_pip_script_name() -> str:
"""Return expected pip script name for os pipwatch is currently running on."""
script_name = "pip"
if os.name == "nt":
script_name += ".exe"
return script_name | e5db6b4c0b5d9f3589780bc4c9d4414a3f69fca5 | 39,309 |
def authorized_owners(group):
""" Return authorized owners """
owners = []
try:
for owner in group.get_related_owners(include_group=True):
owners.append(owner.owner)
except AttributeError:
# group is None
pass
except TypeError:
# group.get_users returns None
pass
return owners | 4050cba27884f97cbbc27335f8b12c4edfd2da5a | 39,311 |
def _2DprintInxRow(inxRow, lSpacesIndR):
"""
Function prints one index of a row of a 2D array
Input:
- 1 **inxCol** (*int*) Index of the row to be printed
- 2 **lSpacesIndR** (*list*) A list with spaces which should be added
to indices of rows
Output:
- 1 **strArray** (*string*) The string with printed requested index of
a row
"""
# Print index of the row
strRowInx = ('%d:') % inxRow
# Pick up a correct space which is added before the index
strSpaceBefore = lSpacesIndR[len(strRowInx) - 1]
# Connect the above together
strArray = strSpaceBefore + strRowInx
strArray = strArray + ' '
return strArray | f108007a9fcb25a6aa32e22297a654d6d262e247 | 39,312 |
def _DepsToLines(deps):
"""Converts |deps| dict to list of lines for output."""
if not deps:
return []
s = ['deps = {']
for _, dep in sorted(deps.iteritems()):
s.extend(dep.ToLines())
s.extend(['}', ''])
return s | c413197b7cdb5585c3df1f93168613d507f9e0ce | 39,313 |
def calculate_deepest_drawdown(df):
"""
3. Считает максимальную просадку по стратегии за время торгов
:param df: - датафрейм с колонкой '<PERFORMANCE>'
:return: - максимальную просадку стратегии
"""
return min(df.dropna()['<PERFORMANCE>'].values) | 4ce1af9c13bfae8f369db8c88bc1ce26eb36e67f | 39,314 |
import math
def SSphere(r):
"""
Surface of a sphere of radius r.
"""
return 4. * math.pi * r * r | 161b43f95ccf02349b66ac5035457dd962e3ba11 | 39,315 |
def create(hdf5, name, dtype, shape=(None,), compression=None,
fillvalue=0, attrs=None):
"""
:param hdf5: a h5py.File object
:param name: an hdf5 key string
:param dtype: dtype of the dataset (usually composite)
:param shape: shape of the dataset (can be extendable)
:param compression: None or 'gzip' are recommended
:param attrs: dictionary of attributes of the dataset
:returns: a HDF5 dataset
"""
if shape[0] is None: # extendable dataset
dset = hdf5.create_dataset(
name, (0,) + shape[1:], dtype, chunks=True, maxshape=shape)
else: # fixed-shape dataset
dset = hdf5.create_dataset(name, shape, dtype, fillvalue=fillvalue)
if attrs:
for k, v in attrs.items():
dset.attrs[k] = v
return dset | b9000aa26a0f1ebcb86ba61704e8634e081d29c6 | 39,316 |
def sep_join(sep, sequence, begin="", end=""):
"""
Separator join each elements of sequence,
if begin/end is True, insert the sep after
or before the txt.you can also given some
txt as head/tail of txt.
:param sep: separator string
:param sequence: string sequence
:param begin: head of txt
:param end: tail of txt
:return: reversed string
"""
txt = sep.join(sequence)
if begin is True:
txt = "%s%s" % (sep, txt)
else:
txt = "%s%s" % (begin, txt)
if end is True:
txt = "%s%s" % (txt, sep)
else:
txt = "%s%s" % (txt, end)
return txt | c723bed6fe00dbb9d3a0ad4aeb02ee74582b0148 | 39,317 |
def retrieve_positions(position_file):
"""
This function returns a list of strings in the right format representing
the positions that will be read out. [spatialfrequency,xi,xf,y].
Args:
position_file (str): The path of the position file.
Returns:
positions (list,str): List representing the positions that will be read.
"""
position_lines = open(position_file, 'r').readlines()
positions = [line.split() for line in position_lines]
return positions | 832d77e894c92b70edbcf330ad37bbaaf0cc3ed2 | 39,318 |
def children_matching(node, fn):
"""
Returns generator of child element nodes matching a predicate.
"""
return (n for n in node.childNodes
if n.nodeType == n.ELEMENT_NODE and fn(n)) | ec9b6e6239acc395947f9088610fcda192927665 | 39,320 |
def pow__mod_c(a, k, c):
"""computes a^k (mod c),
we assume a,k>=1, c > 1 integers"""
if (k == 0):
return 1
elif (k & 1):
return ((a * pow__mod_c(a, k//2, c)**2) % c)
else:
return ((pow__mod_c(a, k//2, c)**2) % c) | ab21e214e2f6aae2b32825db3e4e67b8d6a51efe | 39,322 |
from typing import List
from typing import Tuple
def get_triple(numbers: List[int], target: int) -> Tuple[int, int, int]:
""" get the triple from the list that adds up to the target number """
for x in numbers:
for y in numbers:
if x + y < target:
for z in numbers:
if x + y + z == target:
return x, y, z
raise ValueError(f"No triple could be found in the list that adds up to {target}") | 7e319328cebddb21861ae2ef52b0f036e271bb63 | 39,323 |
import os
def get_destination_path(current_path: str) -> str:
"""Get the destination directory path of the copied test files."""
return os.path.join(current_path, "destination") | 33c309630d86e33382207466f61aac5194c7563a | 39,324 |
import torch
def group_hidden_by_segs(h, seg_ids, max_len):
"""
:param h: [B, T, H]
:param seg_ids: [B, T]
:return: h_ph: [B, T_ph, H]
"""
B, T, H = h.shape
h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
all_ones = h.new_ones(h.shape[:2])
cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
h_gby_segs = h_gby_segs[:, 1:]
cnt_gby_segs = cnt_gby_segs[:, 1:]
h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
return h_gby_segs, cnt_gby_segs | 0ce74e7b042613490ddbead4a658461552a38697 | 39,326 |
import math
def _log2(n):
"""Returns the log base 2 of an integer."""
return math.log(n)/math.log(2) | 76921c835801e489d648482eadb5f1b52525a21b | 39,327 |
def get_bottom_right(left, right):
"""
Get bottom right on the screen
"""
x = right.x - (right.x - left.x) / 8
y = right.y - (right.y - left.y) / 8
return (x, y) | 681f6a3f3b7fe935ef36991bc040b998e9786633 | 39,328 |
def mask(seq, keep_start, keep_end):
"""Mask the sequence leaving only [keep_start, keep_end) unmasked"""
return 'N' * keep_start + seq[keep_start:keep_end] + 'N' * (len(seq) - keep_end) | 8df0b7d14e1ab2b2a5b9901539e2d2fa210f0eb4 | 39,331 |
def _getBuildObject(buildTypesModule, buildType):
"""Get the build object for the given build type string."""
if buildType.startswith('acceptance'):
build = buildTypesModule.GetBuildType('default' + buildType[10:])
elif buildType.startswith('longacceptance'):
build = buildTypesModule.GetBuildType('default' + buildType[14:])
elif buildType == 'failing':
build = buildTypesModule.GetBuildType('default')
else:
build = buildTypesModule.GetBuildType(buildType)
return build | 5a46385fb6c1063ed89257731eb36069baaa9dd0 | 39,332 |
import requests
def read_changes(jenkins_url, build_url, build_id):
"""Read changes for the cause why the e2e job has been started."""
api_query = "{jenkins}/{job}{build}/api/json".format(jenkins=jenkins_url,
job=build_url, build=build_id)
response = requests.get(api_query)
payload = response.json()
cause = payload["actions"][0]["causes"][0]["shortDescription"]
changes = ""
changeSetItems = payload["changeSet"]["items"]
if changeSetItems:
for changeSetItem in changeSetItems:
changes += changeSetItem["date"] + ": " + changeSetItem["authorEmail"] + ": " + \
changeSetItem["comment"]
else:
changes = "No changes detected"
return cause, changes | b500cc1e5e3465c01569c53efa54f13671443312 | 39,333 |
import numpy
def training(inputs, minvar=0.1):
"""Trains a naive-bayes classifier using inputs
Returns means and variances of the classifiers
"""
return numpy.mean(inputs, axis=0), numpy.maximum(minvar, numpy.var(inputs, axis=0)) | dafbf883e99e53257e33021a2f917115bf303cbf | 39,334 |
import locale
def make_currency_format(input_list):
"""Wandelt alle Integer-Werte in einer Liste ins Währungsformat um"""
output_list = []
for item in input_list:
if type(item) == int or type(item) == float:
output_list.append(locale.currency(item, grouping=True))
else:
output_list.append(item)
return output_list | 14b1bac66f07d86e0f61fd2fb8259e630677d4be | 39,336 |
def player_turn_to_board_location(move):
"""Convert the players move to a board location."""
move -= 1
row, column = divmod(move, 3)
return row, column | 7b7a29775842f6224ba9c161bdfc17215e5079cd | 39,337 |
def semihardneg_triplet_loss_from_S(S, margin):
"""
Input: Similarity matrix S
Output: The one-way triplet loss from rows of S to columns of S. Impostors are taken
to be the most similar point to the anchor that is still less similar to the anchor
than the positive example.
You would need to run this function twice, once with S and once with S.t(),
in order to compute the triplet loss in both directions.
"""
assert(S.dim() == 2)
assert(S.size(0) == S.size(1))
N = S.size(0)
positive_scores = S.diag()
mask = ((S - S.diag().view(-1,1)) < 0).float().detach()
imposter_scores = (S * mask).max(dim=1).values
loss = (imposter_scores - positive_scores + margin).clamp(min=0).mean()
return loss | db4657eb1da34799b0b9ae5f79b64fdb811e0a4b | 39,338 |
def left(n):
"""Is node n a left descendant of the root in a zero-indexed boolean tree?"""
while n > 2:
n = (n-1)//2
return n == 1 | 52c81381bddd048cfb1e07f67f6028a3cfc15514 | 39,339 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.