content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def create_parser(args):
""" Function which add the command line arguments required for the cyclomatic complexity report parser"""
# Create the parser
cyclo_parser = argparse.ArgumentParser(description='cyclomatic complexity gate Parser')
# Add the arguments
cyclo_parser.add_argument('--cyclo', metavar='--c', type=int, help='cyclo benchmark')
return cyclo_parser.parse_args(args) | 5,324,800 |
def get_month_day_range(date):
"""
For a date 'date' returns the start and end date for the month of 'date'.
Month with 31 days:
>>> date = datetime.date(2011, 7, 27)
>>> get_month_day_range(date)
(datetime.date(2011, 7, 1), datetime.date(2011, 7, 31))
Month with 28 days:
>>> date = datetime.date(2011, 2, 15)
>>> get_month_day_range(date)
(datetime.date(2011, 2, 1), datetime.date(2011, 2, 28))
"""
first_day = date.replace(day = 1)
last_day = date.replace(day = calendar.monthrange(date.year, date.month)[1])
return first_day, last_day | 5,324,801 |
def measure_of_risk_callback(app):
"""
Attaches the callback function for the component
rendered in the measure_of_risk function
Args:
app = the dash app
Returns:
None
"""
component_id = 'measure-risk'
@app.callback(
Output(component_id + 'out', 'children'),
[Input(component_id, 'value')])
def _callback(value):
options['Measure of risk'] = value
return '' | 5,324,802 |
def dot(
a: Union[float, ArrayLike],
b: Union[float, ArrayLike],
*,
dims: Optional[Tuple[int, int]] = None
) -> Union[float, Array]:
"""
Get dot product of simple numbers, vectors, and matrices.
Matrices will be detected and the appropriate logic applied
unless `dims` is provided. `dims` should simply describe the
number of dimensions of `a` and `b`: (2, 1) for a 2D and 1D array.
Providing `dims` will sidestep analyzing the matrix for a more
performant operation. Anything dimensions above 2 will be treated
as an ND x MD scenario and the actual dimensions will be extracted
regardless due to necessity.
"""
if dims is None or dims[0] > 2 or dims[1] > 2:
shape_a = shape(a)
shape_b = shape(b)
dims_a = len(shape_a)
dims_b = len(shape_b)
# Handle matrices of N-D and M-D size
if dims_a and dims_b and dims_a > 2 or dims_b > 2:
if dims_a == 1:
# Dot product of vector and a M-D matrix
cols1 = list(_extract_dims(cast(MatrixLike, b), dims_b - 2))
shape_c = shape_b[:-2] + shape_b[-1:]
return cast(
Matrix,
reshape(
[[_vector_dot(cast(VectorLike, a), cast(VectorLike, c)) for c in col] for col in cols1],
shape_c
)
)
else:
# Dot product of N-D and M-D matrices
# Resultant size: `dot(xy, yz) = xz` or `dot(nxy, myz) = nxmz`
cols2 = list(_extract_dims(cast(ArrayLike, b), dims_b - 2)) if dims_b > 1 else cast(ArrayLike, [[b]])
rows = list(_extract_dims(cast(ArrayLike, a), dims_a - 1))
m2 = [
[[sum(cast(List[float], multiply(row, c))) for c in cast(VectorLike, col)] for col in cols2]
for row in rows
]
shape_c = shape_a[:-1]
if dims_b != 1:
shape_c += shape_b[:-2] + shape_b[-1:]
return cast(Matrix, reshape(cast(Array, m2), shape_c))
else:
dims_a, dims_b = dims
# Optimize to handle arrays <= 2-D
if dims_a == 1:
if dims_b == 1:
# Dot product of two vectors
return _vector_dot(cast(VectorLike, a), cast(VectorLike, b))
elif dims_b == 2:
# Dot product of vector and a matrix
return cast(Vector, [_vector_dot(cast(VectorLike, a), col) for col in zipl(*cast(MatrixLike, b))])
elif dims_a == 2:
if dims_b == 1:
# Dot product of matrix and a vector
return cast(Vector, [_vector_dot(row, cast(VectorLike, b)) for row in cast(MatrixLike, a)])
elif dims_b == 2:
# Dot product of two matrices
return cast(
Matrix,
[[_vector_dot(row, col) for col in zipl(*cast(MatrixLike, b))] for row in cast(MatrixLike, a)]
)
# Trying to dot a number with a vector or a matrix, so just multiply
return multiply(a, b, dims=(dims_a, dims_b)) | 5,324,803 |
def read_float64(field: str) -> np.float64:
"""Read a float64."""
return np.float64(field) if field != "" else np.nan | 5,324,804 |
def write_hypergraph(hgr, colored = False):
"""
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(hgr):
dotG.set_name('hypergraph')
else:
dotG.set_name(hgr.name)
colortable = {}
colorcount = 0
# Add all of the nodes first
for node in hgr.nodes():
newNode = pydot.Node(str(node), hyper_node_type = 'hypernode')
dotG.add_node(newNode)
for hyperedge in hgr.hyperedges():
if (colored):
colortable[hyperedge] = colors[colorcount % len(colors)]
colorcount += 1
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge', \
color = str(colortable[hyperedge]), \
shape = 'point')
else:
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge')
dotG.add_node(newNode)
for link in hgr.links(hyperedge):
newEdge = pydot.Edge(str(hyperedge), str(link))
dotG.add_edge(newEdge)
return dotG.to_string() | 5,324,805 |
def sort_gtf(gtf_path, out_path):
"""Sorts a GTF file based on its chromosome, start position, line number.
:param gtf_path: path to GTF file
:type gtf_path: str
:return: path to sorted GTF file, set of chromosomes in GTF file
:rtype: tuple
"""
logger.info('Sorting {}'.format(gtf_path))
gtf = GTF(gtf_path)
return gtf.sort(out_path) | 5,324,806 |
def leia_dinheiro(msg):
"""
-> Recebe um valor digitado pelo usuário e verifica se é um
valor númerico válido
:param msg: Mensagem a ser mostrada ao usuário
:return: Retorno o valor digitado pelo usuário caso seja válido
"""
while True:
num = input(msg).strip().replace(',', '.') # Substitui as vírgulas por pontos
if num.replace('.', '').isdigit(): # 'Exluí' os pontos
num = float(num)
break
else:
print(f'\033[1;31mERRO! \"{num}\" não é um preço válido.\033[m')
return num | 5,324,807 |
def format_analyse(parsed_tokens, to_1d_flag=False):
"""
入力
parsed_tokens # list(list(str)) : 変数毎の変数名/インデックスがtokenizedなトークンリスト
出力
res,dic # FormatNode,OrderedDict<str:VariableInformation> : フォーマット情報のノードと変数の情報を保持した辞書を同時に返す
"""
appearances = {}
dic = OrderedDict()
pos = 0
# 出現位置とかインデックスとしての最小値・最大値をメモ
for token in parsed_tokens:
idxs = token[1:]
varname = token[0]
if varname not in dic:
dic[varname] = VariableInformation(varname, len(idxs))
appearances[varname] = []
appearances[varname].append(pos)
# print(idxs)
for i, idx in enumerate(idxs):
dic[varname].indexes[i].reflesh_min(idx)
dic[varname].indexes[i].reflesh_max(idx)
pos += 1
# フォーマットノードの構築
processed = set()
root = FormatNode(pointers=[])
for i in range(len(parsed_tokens)):
varname = parsed_tokens[i][0]
if varname in processed:
continue
dim = len(dic[varname].indexes)
if dim == 2 and to_1d_flag:
dic[varname].indexes = dic[varname].indexes[:-1]
dim = 1
if dim == 0:
root.pointers.append(FormatNode(varname))
processed.add(varname)
elif dim == 1:
if len(appearances[varname]) >= 2:
# assume it's a arithmetic sequence
span = appearances[varname][1] - appearances[varname][0]
elif len(appearances[varname]) == 1:
# or mono
span = 1
zipped_varnames = [token[0] for token in parsed_tokens[i:i + span]]
for vname in zipped_varnames:
processed.add(vname)
root.pointers.append(
FormatNode(pointers=[FormatNode(varname=vname) for vname in zipped_varnames],
index=dic[varname].indexes[0]
)
)
elif dim == 2:
processed.add(varname)
inner_node = FormatNode(pointers=[FormatNode(
varname=varname)], index=dic[varname].indexes[1])
root.pointers.append(FormatNode(
pointers=[inner_node], index=dic[varname].indexes[0]))
else:
raise NotImplementedError
return root, dic | 5,324,808 |
def KAMA(df: pd.DataFrame, window: int = 10, pow1: int = 2, pow2: int = 30) -> pd.DataFrame:
"""
Kaufman's Adaptive Moving Average (KAMA) is an indicator that
indicates both the volatility and trend of the market.
"""
df_with_signal = df.copy()
df_with_signal["signal"] = kama(df["close"], window, pow1, pow2)
return df_with_signal | 5,324,809 |
def cast_to_str(obj):
"""Return a string representation of a Seq or SeqRecord.
Args:
obj (str, Seq, SeqRecord): Biopython Seq or SeqRecord
Returns:
str: String representation of the sequence
"""
if isinstance(obj, str):
return obj
if isinstance(obj, Seq):
return str(obj)
if isinstance(obj, SeqRecord):
return str(obj.seq)
else:
raise ValueError('Must provide a string, Seq, or SeqRecord object.') | 5,324,810 |
def _octet_bits(o):
"""
Get the bits of an octet.
:param o: The octets.
:return: The bits as a list in LSB-to-MSB order.
:rtype: list
"""
if not isinstance(o, int):
raise TypeError("o should be an int")
if not (0 <= o <= 255):
raise ValueError("o should be between 0 and 255 inclusive")
bits = [0] * 8
for i in range(8):
if 1 == o & 1:
bits[i] = 1
o = o >> 1
return bits | 5,324,811 |
def _collect_calculation_data(calc):
"""
Recursively collects calculations from the tree, starting at given
calculation.
"""
from aiida.common.links import LinkType
from aiida.orm.data import Data
from aiida.orm.calculation import Calculation
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.calculation.inline import InlineCalculation
import hashlib
import os
calcs_now = []
for d in calc.get_inputs(node_type=Data, link_type=LinkType.INPUT):
for c in d.get_inputs(node_type=Calculation, link_type=LinkType.CREATE):
calcs = _collect_calculation_data(c)
calcs_now.extend(calcs)
files_in = []
files_out = []
this_calc = {
'uuid' : calc.uuid,
'files': [],
}
if isinstance(calc, JobCalculation):
retrieved_abspath = calc.get_retrieved_node().get_abs_path()
files_in = _collect_files(calc._raw_input_folder.abspath)
files_out = _collect_files(os.path.join(retrieved_abspath, 'path'))
this_calc['env'] = calc.get_environment_variables()
stdout_name = '{}.out'.format(aiida_executable_name)
while stdout_name in [files_in,files_out]:
stdout_name = '_{}'.format(stdout_name)
stderr_name = '{}.err'.format(aiida_executable_name)
while stderr_name in [files_in,files_out]:
stderr_name = '_{}'.format(stderr_name)
if calc.get_scheduler_output() is not None:
files_out.append({
'name' : stdout_name,
'contents': calc.get_scheduler_output(),
'md5' : hashlib.md5(calc.get_scheduler_output()).hexdigest(),
'sha1' : hashlib.sha1(calc.get_scheduler_output()).hexdigest(),
'role' : 'stdout',
'type' : 'file',
})
this_calc['stdout'] = stdout_name
if calc.get_scheduler_error() is not None:
files_out.append({
'name' : stderr_name,
'contents': calc.get_scheduler_error(),
'md5' : hashlib.md5(calc.get_scheduler_error()).hexdigest(),
'sha1' : hashlib.sha1(calc.get_scheduler_error()).hexdigest(),
'role' : 'stderr',
'type' : 'file',
})
this_calc['stderr'] = stderr_name
elif isinstance(calc, InlineCalculation):
# Calculation is InlineCalculation
python_script = _inline_to_standalone_script(calc)
files_in.append({
'name' : inline_executable_name,
'contents': python_script,
'md5' : hashlib.md5(python_script).hexdigest(),
'sha1' : hashlib.sha1(python_script).hexdigest(),
'type' : 'file',
})
shell_script = '#!/bin/bash\n\nverdi run {}\n'.format(inline_executable_name)
files_in.append({
'name' : aiida_executable_name,
'contents': shell_script,
'md5' : hashlib.md5(shell_script).hexdigest(),
'sha1' : hashlib.sha1(shell_script).hexdigest(),
'type' : 'file',
})
elif isinstance(calc, WorkCalculation):
# We do not know how to recreate a WorkCalculation so we pass
pass
else:
raise ValueError('calculation is of an unexpected type {}'.format(type(calc)))
for f in files_in:
if os.path.basename(f['name']) == aiida_executable_name:
f['role'] = 'script'
else:
f['role'] = 'input'
this_calc['files'].append(f)
for f in files_out:
if os.path.basename(f['name']) != calc._SCHED_OUTPUT_FILE and \
os.path.basename(f['name']) != calc._SCHED_ERROR_FILE:
if 'role' not in f.keys():
f['role'] = 'output'
this_calc['files'].append(f)
calcs_now.append(this_calc)
return calcs_now | 5,324,812 |
def clf():
"""
Clear the current figure.
"""
plt.clf()
plt.show(block=False) | 5,324,813 |
def pickle_squeeze(data, pkl_file, fmt='lzma'):
""" looks like compress_pickle may be the way to go """
if isinstance(pkl_file, str): pkl_file = path(pkl_file)
if type(fmt) not in (list, tuple): fmt = [fmt]
if len(fmt): fmt = [_s.lower() for _s in fmt]
with pkl_file.open('wb') as hfile:
logger.debug(f'Storing pickle: {pkl_file}')
pickle.dump(data, hfile, -1)
pkl_fname = pkl_file.as_posix()
if 'gz' in fmt or 'gzip' in fmt:
zip_file = pkl_fname + '.gz'
logger.debug(f'Zipping and storing pickle: {zip_file}')
with gzip.open(zip_file, 'wb') as hfile:
pickle.dump(data, hfile, -1)
if 'bz' in fmt or 'bz2' in fmt:
zip_file = pkl_fname + '.pbz2'
logger.debug(f'Zipping and storing pickle: {zip_file}')
with bz2.BZ2File(zip_file, 'wb') as hfile:
pickle.dump(data, hfile, -1)
# if 'lzma' in fmt:
# zip_file = pkl_fname + '.xz'
# logger.debug(f'Zipping and storing pickle: {zip_file}')
# with lzma.open(zip_file, 'wb') as hfile:
# pickle.dump(data, hfile, -1)
# with open('no_compression.pickle', 'rb') as f:
# pdata = f.read()
# with open('brotli_test.bt', 'wb') as b:
# b.write(brotli.compress(pdata)) | 5,324,814 |
def to_graph(l):
"""
Credit: Jochen Ritzel
https://stackoverflow.com/questions/4842613/merge-lists-that-share-common-elements
"""
G = networkx.Graph()
for part in l:
# each sublist is a bunch of nodes
G.add_nodes_from(part)
# it also implies a number of edges:
G.add_edges_from(to_edges(part))
return G | 5,324,815 |
def convert_mcmc_labels(param_keys, unit_labels=False):
"""Returns sequence of formatted MCMC parameter labels
"""
keys = list(param_keys)
for i, key in enumerate(keys):
if 'qb' in key:
label_str = r'$Q_\mathrm{b,' + f'{key[-1]}' + '}$'
elif 'mdot' in key:
label_str = rf'$\dot{{m}}_{key[-1]}$'
elif 'Mdot' in key:
label_str = rf'$\dot{{M}}_{key[-1]}$'
else:
if unit_labels:
label_str = full_label(key)
else:
label_str = quantity_label(key)
keys[i] = label_str
return keys | 5,324,816 |
def _set_grid():
"""It sets the grid from the data to be plotted."""
space=set_color(" ", h.point_color)
h.grid=[[space for c in range(h.cols)] for r in range(h.rows)]
if h.line:
_add_to_grid(*_get_line(), h.line_marker, h.line_color)
if h.point:
_add_to_grid(h.x, h.y, h.point_marker, h.point_color) | 5,324,817 |
def superuser_exempt(filter_authorization_verification):
"""Decorator to exempt any superuser from filtering, authorization, or verification functions."""
def superuser_exempt_fun(request, arg):
if request.user.is_superuser:
if isinstance(arg, QuerySet):
return arg
else:
return True
return filter_authorization_verification(request, arg)
return superuser_exempt_fun | 5,324,818 |
def relName(path, cwd=None, root=None):
"""Return pathname relative to `cwd`.
If possible, returns a relative pathname for path. The rules are:
1. If the file is in or below `cwd` then a simple relative name is
returned. For example: 'dir/fred.c'.
2. If both the file and `cwd` are in or below `root` then a relative
path is also generated, but it will contain double dots. For
example: '../../dir/fred.c'.
3. If neither (1) or (2) applies then the absolute path is returned.
:Param cwd:
Used as the current directory. It defaults to ``{os.getcwd()``.
:Param root:
Defines the root directory, which determines whether a relative
pathname can be returned. It defaults to ``projectRoot``.
"""
relRoot = os.path.normpath((root or projectRoot)) + os.sep
cwd = os.path.abspath((cwd or os.getcwd())) + os.sep
if path == cwd or path == cwd[:-1]:
return "."
if path.startswith(cwd):
# The relative name is below the CWD, so we simply strip off the
# leading parts.
return path[len(cwd):]
if path.startswith(relRoot) and cwd.startswith(relRoot):
# The path is below the nominal root but parallel to the CWD. We need
# to add some '../' parts.
relToRootPath = path[len(relRoot):]
relToRootCWD = cwd[len(relRoot):-1]
count = 0
while count < 1000 and relToRootCWD and relToRootCWD != os.sep:
relToRootCWD, b = os.path.split(relToRootCWD)
relToRootPath = ".." + os.sep + relToRootPath
assert count < 1000
return relToRootPath
return path | 5,324,819 |
def paf_to_lastz(job, paf_file, sort_secondaries=True):
"""
Makes lastz output using paftools.js. Also splits the input paf_file into two files
in the output, one for the primary and the other for secondary.
sort_secondaries bool, if true, will cause fxn to return two files instead of one.
"""
primary = list()
primary_mapqs = list()
secondary = list()
secondary_mapqs = list()
if not sort_secondaries:
print("putting all mappings into primary")
with open(job.fileStore.readGlobalFile(paf_file)) as inf:
for line in inf:
primary.append(line)
primary_mapqs.append(line.split()[11])
else:
# print("in paf_to_Lastz - looking for the cg tag.")
with open(job.fileStore.readGlobalFile(paf_file)) as inf:
for line in inf:
if "tp:A:P" in line or "tp:A:I" in line:
#then the line is a primary output file.
primary.append(line)
primary_mapqs.append(line.split()[11])
else:
#then the line is a secondary output file.
secondary.append(line)
secondary_mapqs.append(line.split()[11])
# write output to files; convert to lastz:
lines = [primary, secondary]
mapqs = [primary_mapqs, secondary_mapqs]
sort_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
paftool_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
fixed_paftool_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
out_files = [job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile()) for i in range(len(lines))]
print("lines in primary:", len(lines[0]))
print("len(lines in secondary:", len(lines[1]))
stderr_debug = job.fileStore.getLocalTempFile()
for i in range(len(lines)):
with open(sort_files[i], "w") as sortf:
# convert list to file for paftools input
sortf.writelines(lines[i])
cactus_call(parameters=["paftools.js", "view", "-f", "lastz-cigar", sort_files[i]], outfile=paftool_files[i])
fix_negative_strand_mappings(paftool_files[i], fixed_paftool_files[i])
add_original_mapqs( mapqs[i], fixed_paftool_files[i], job.fileStore.readGlobalFile(out_files[i]))
# check that the lines going into paftools.js are in same order as lines going out.
with open(job.fileStore.readGlobalFile(out_files[0])) as inf:
i = 0
for line in inf:
#comparing primary from paf to final lastz output.
paf_parsed = lines[0][i].split()
lastz_parsed = line.split()
if (lastz_parsed[3] == "+" and paf_parsed[2] != lastz_parsed[1]) or (lastz_parsed[3] == "-" and paf_parsed[2] != lastz_parsed[2]):
raise ValueError("Lines differ between paf and paftools.js lastz output! Paftools.js may be acting in an unexpected manner. paf line: " + lines[0][i] + " lastz line " + line)
i += 1
if not sort_secondaries:
return out_files[0]
else:
return out_files | 5,324,820 |
def test_get_bond_by_host_account_id(populated_table):
"""
Get all bonds for a given host account id.
"""
bonds = crud.get_bonds_by_host_account_id(host_account_id="H0002")
assert len(bonds) == 2
assert 'H0002-S0002' in [bond.bond_id for bond in bonds]
assert 'H0002-S0003' in [bond.bond_id for bond in bonds] | 5,324,821 |
def u1(lambd, q):
"""Openqasm u1 gate."""
from mindquantum import Circuit
return Circuit().rz(lambd, q) | 5,324,822 |
def get_simple_object(key='slug', model=None, self=None):
"""
get_simple_object() => Retrieve object instance.
params => key, model, self
return => object (instane)
"""
try:
if key == 'id':
id = self.kwargs['id']
instance = model.objects.get(id=id)
else:
slug = self.kwargs['slug']
instance = model.objects.get(slug=slug)
except model.DoesNotExist:
raise Http404('Not found!!!')
except model.MultipleObjectsReturned:
if key == 'id':
id = self.kwargs['id']
instance = model.objects.filter(id=id).first()
else:
slug = self.kwargs['slug']
instance = model.objects.filter(slug=slug).first()
except:
raise Http404("Something went wrong !!!")
return instance | 5,324,823 |
def test_windows_powershell(windows):
"""Test powershell execution"""
# Run a real powershell snippet
r = windows.platform.powershell("$PSVersionTable.PSVersion")
assert len(r) == 1
assert isinstance(r[0], dict)
# Ensure we get an exception
with pytest.raises(PowershellError):
windows.platform.powershell("CommandletDoes-NotExist")
# Run from a file descriptor
filp = io.BytesIO(b"""$PSVersionTable.PSVersion""")
r = windows.platform.powershell(filp)
assert len(r) == 1
assert isinstance(r[0], dict) | 5,324,824 |
def test_parse_sstat_nodes_1():
"""Parse nodes from sstat called with args -i -a -p -n
PrologFlags=Alloc
"""
output = "22942.0|prod76-0006|354345|"
nodes = ["prod76-0006"]
parsed_nodes = slurmParser.parse_sstat_nodes(output, "22942.0")
assert nodes == parsed_nodes | 5,324,825 |
def test_iterate_k562_data_loader():
""" Iterate throught the dataloader backed by
the dataset, make sure the number of points seen
is in the neighbourhood of what's correct.
"""
valid_dataset, valid_loader = setup_k562_dataset_and_loader(transform=True)
torch.manual_seed(0)
num_epochs = 1
data_seen = 0
lower_bound = k562_batch_size * (k562_num_peak_batches - 1)
for epoch in range(num_epochs):
for batch_idx, (x, y) in enumerate(valid_loader):
x, y = Variable(x), Variable(y)
ok_(batch_idx <= num_batches)
data_seen += x.size()[0]
if batch_idx % 10 == 0:
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
ok_(lower_bound <= data_seen <= k562_valid_peak_examples)
valid_dataset.close() | 5,324,826 |
def create_app():
"""Create and configure an instance of the Flask application."""
app = Flask(__name__)
# vvvvv use sqlite database vvvvv
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
# vvvvv have DB initialize the app
DB.init_app(app)
@app.route('/')
def root():
return 'Welcome to TwitOff!'
return app | 5,324,827 |
def MPITest(commsize):
"""
A decorator that repeatedly calls the wrapped function,
with communicators of varying sizes.
This converts the test to a generator test; therefore the
underlyig test shall not be a generator test.
Parameters
----------
commsize: scalar or tuple
Sizes of communicator to use
Usage
-----
@MPITest(commsize=[1, 2, 3])
def test_stuff(comm):
pass
"""
from mpi4py import MPI
if not isinstance(commsize, (tuple, list)):
commsize = (commsize,)
sizes = sorted(list(commsize))
def dec(func):
@pytest.mark.parametrize("size", sizes)
def wrapped(size, *args):
func_names = MPI.COMM_WORLD.allgather(func.__name__)
if not all(func_names[0] == i for i in func_names):
raise RuntimeError("function calls mismatched", func_names)
try:
comm, color = create_comm(size)
except WorldTooSmall:
return pytest.skip("Test skipped because world is too small. Include the test with mpirun -n %d" % (size))
try:
if color == 0:
rt = func(*args, comm=comm)
if color == 1:
rt = None
#pytest.skip("rank %d not needed for comm of size %d" %(MPI.COMM_WORLD.rank, size))
finally:
MPI.COMM_WORLD.barrier()
return rt
wrapped.__name__ = func.__name__
return wrapped
return dec | 5,324,828 |
def getClassicalPitchNames(pitches):
"""
takes a list of pitches and returns a list of classical pitch class names
"""
return getPitchNames([normalizePitch(x,12) for x in pitches],getClassicalPitchNameCandidates) | 5,324,829 |
def toPyUIList(res):
# type: (str) -> List[pymel.core.uitypes.PyUI]
"""
returns a list of PyUI objects
Parameters
----------
res : str
Returns
-------
List[pymel.core.uitypes.PyUI]
"""
if res is None:
return []
import pymel.core.uitypes
return [pymel.core.uitypes.PyUI(x) for x in res] | 5,324,830 |
def test_initial_routing():
"""
Test the action of fr.route_flow() on the grid.
"""
fr.route_flow()
assert_array_equal(mg.at_node['flow__receiver_node'], r_old)
assert_array_almost_equal(mg.at_node['drainage_area'], A_old) | 5,324,831 |
def best_B(Ag):
""" Given an antigenic determinant Ag this function returns the binding
value of the best possible binder. """
top = 0
for i in range(len(Ag)):
etop = np.min(cf.TD20[int(Ag[i]) - 1])
top += etop
return top | 5,324,832 |
def forward_gradients_v2(ys, xs, grad_xs=None, gate_gradients=False):
"""Forward-mode pushforward analogous to the pullback defined by tf.gradients.
With tf.gradients, grad_ys is the vector being pulled back, and here d_xs is
the vector being pushed forward."""
if type(ys) == list:
v = [tf.ones_like(yy) for yy in ys]
else:
v = tf.ones_like(ys) # dummy variable
g = tf.gradients(ys, xs, grad_ys=v)
return tf.gradients(g, v, grad_ys=grad_xs) | 5,324,833 |
def __limit_less(lim1, lim2):
"""Helper function for comparing two rlimit values, handling "unlimited" correctly.
Params:
lim1 (integer): first rlimit
lim2 (integer): second rlimit
Returns:
true if lim1 <= lim2
"""
if lim2 == resource.RLIM_INFINITY:
return True
if lim1 == resource.RLIM_INFINITY:
return False
return lim1 <= lim2 | 5,324,834 |
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.error('Update {} caused error {}'.format(update, error), extra={"tag": "err"}) | 5,324,835 |
def getswarmlocations() -> List[str]:
"""
checks if the provided location is a location where a swarm can happen.
:param location: the provided location
:return: boolean if location is in the list of locations where swarms can happen,
"""
swarmlocationlist = open("commands/data/swarmlocations.csv").read().split(",")
swarmlocationlist = [location.lower() for location in swarmlocationlist]
swarmlocationlist = list(set(swarmlocationlist)) # remove duplicates just in case
swarmlocationlist.sort()
return swarmlocationlist | 5,324,836 |
def Request(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
"""
if default is not Undefined and default_factory is not None:
raise ValueError("cannot specify both default and default_factory")
return RequestInfo(default, default_factory=default_factory, alias=alias) | 5,324,837 |
def get_jobs(when=None, only_scheduled=False):
"""
Returns a dictionary mapping of job names together with their respective
application class.
"""
# FIXME: HACK: make sure the project dir is on the path when executed as ./manage.py
import sys
try:
cpath = os.path.dirname(os.path.realpath(sys.argv[0]))
ppath = os.path.dirname(cpath)
if ppath not in sys.path:
sys.path.append(ppath)
except:
pass
_jobs = {}
if True:
from django.conf import settings
for app_name in settings.INSTALLED_APPS:
scandirs = (None, 'minutely', 'quarter_hourly', 'hourly', 'daily', 'weekly', 'monthly', 'yearly')
if when:
scandirs = None, when
for subdir in scandirs:
try:
path = find_job_module(app_name, subdir)
for name in find_jobs(path):
if (app_name, name) in _jobs:
raise JobError("Duplicate job %s" % name)
job = import_job(app_name, name, subdir)
if only_scheduled and job.when == None:
# only include jobs which are scheduled
continue
if when and job.when != when:
# generic job not in same schedule
continue
_jobs[(app_name, name)] = job
except ImportError:
# No job module -- continue scanning
pass
return _jobs | 5,324,838 |
def fill_plug_cache():
"""
主要为遍历vim插件目录,并将插件目录列表显示出来
"""
cb = vim.current.buffer
print(cb.name)
vimplug_root = vim.eval('g:exprjlist_vimplug_root_dir')
if 0 == len(vimplug_root):
return
label = 'vimplug'
candidates = []
for item in os.listdir(vimplug_root):
itempath = os.sep.join([vimplug_root, item])
if os.path.isdir(itempath):
item = label + ":" + itempath
candidates.append(item)
# 添加到buffer
cb[:] = candidates | 5,324,839 |
def get_project_id():
""" Gets the project ID. It defaults to the project declared in the
enviorment variable PROJECT but if it can't find it there it will
try looking for a service account and take the project ID from there
Args:
Returns:
"""
service_acc_address = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None)
if service_acc_address:
service_acc = open(service_acc_address, 'r').read()
service_acc_project_id = json.loads(service_acc)['project_id']
else:
service_acc_project_id = None
project_id = os.environ.get('PROJECT', service_acc_project_id)
if service_acc_project_id != None and project_id != service_acc_project_id:
logging.critical("Warning the project in ENV VAR PROJECT is \
not the same as your service account project")
return project_id | 5,324,840 |
def val_err_str(val: float, err: float) -> str:
"""
Get a float representation of a value/error pair and create a string representation
12.345 +/- 1.23 --> 12.3(12)
12.345 +/- 0.012 -> 12.345(12
12345 +/- 654 ---> 12340(650)
:param val: float representing the value
:param err: float representing the error in the value
:return: a string representation of the value/error pair
"""
err_sig_figs = 2 # future upgrade path is to allow user to set this
dps = 2 - err_sig_figs
if err < 10:
while err < 10.:
err *= 10
dps += 1
err = round(err, 0)
else: # err > 10
while err > 100.:
err /= 10
dps -= 1
err = round(err, 0) * 10 ** (-dps)
val = round(val, dps)
return f"{val:.{max(0, dps)}f}({err:.0f})" | 5,324,841 |
def compareTaggers(model1, model2, string_list, module_name):
"""
Compare two models. Given a list of strings, prints out tokens & tags
whenever the two taggers parse a string differently. This is for spot-checking models
:param tagger1: a .crfsuite filename
:param tagger2: another .crfsuite filename
:param string_list: a list of strings to be checked
:param module_name: name of a parser module
"""
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
count_discrepancies = 0
for string in string_list:
tokens = module.tokenize(string)
if tokens:
features = module.tokens2features(tokens)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if tags1 != tags2:
count_discrepancies += 1
print('\n')
print("%s. %s" %(count_discrepancies, string))
print('-'*75)
print_spaced('token', model1, model2)
print('-'*75)
for token in zip(tokens, tags1, tags2):
print_spaced(token[0], token[1], token[2])
print("\n\n%s of %s strings were labeled differently"%(count_discrepancies, len(string_list))) | 5,324,842 |
def test_update_feed_deleted(
db_path, make_reader, call_update_method, feed_action, entry_action
):
"""reader.update_feed should raise FeedNotFoundError if the feed is
deleted during parsing.
reader.update_feeds shouldn't (but should log).
"""
parser = Parser()
reader = make_reader(db_path)
reader._parser = parser
feed = parser.feed(1, datetime(2010, 1, 1))
reader.add_feed(feed.url)
reader.update_feeds()
if entry_action is not EntryAction.none:
parser.entry(1, 1, datetime(2010, 1, 1))
if entry_action is EntryAction.update:
reader.update_feeds()
parser.entry(1, 1, datetime(2010, 1, 2))
if feed_action is FeedAction.update:
feed = parser.feed(1, datetime(2010, 1, 2), title='new title')
if feed_action is not FeedAction.fail:
parser_cls = BlockingParser
else:
class parser_cls(BlockingParser, FailingParser):
pass
blocking_parser = parser_cls.from_parser(parser)
def target():
# can't use fixture because it would run close() in a different thread
from reader import make_reader
blocking_parser.in_parser.wait()
reader = make_reader(db_path)
try:
reader.delete_feed(feed.url)
finally:
blocking_parser.can_return_from_parser.set()
try:
reader.close()
except StorageError as e:
if 'database is locked' in str(e):
pass # sometimes, it can be; we don't care
else:
raise
t = threading.Thread(target=target)
t.start()
try:
reader._parser = blocking_parser
if call_update_method.__name__ == 'call_update_feed':
with pytest.raises(FeedNotFoundError) as excinfo:
call_update_method(reader, feed.url)
assert excinfo.value.url == feed.url
assert 'no such feed' in excinfo.value.message
elif call_update_method.__name__.startswith('call_update_feeds'):
# shouldn't raise an exception
call_update_method(reader, feed.url)
else:
assert False, "shouldn't happen"
finally:
t.join() | 5,324,843 |
def discrete_distribution(probabilities, path='', fig_name='distribution_events_states.pdf', v_labels=None,
h_labels=None, title=None, color_map=None, figsize=(12, 6), size_labels=16, size_values=14,
bottom=None, top=None, left=None, right=None, savefig=False, usetex=False):
"""
Annotated heatmap of a given discrete distribution with 2 dimensions.
:type probabilities: 2D array
:param probabilities: the 2D discrete distribution.
:type path: string
:param path: where the figure is saved.
:type fig_name: string
:param fig_name: name of the file.
:type v_labels: list of strings
:param v_labels: labels for the first dimension (vertical).
:type h_labels: list of strings
:param h_labels: labels for the second dimension (horizontal).
:type title: string
:param title: suptitle.
:param color_map: color map for the heatmap, see seaborn documentation.
:type figsize: (int, int)
:param figsize: tuple (width, height).
:type size_labels: int
:param size_labels: fontsize of labels.
:type size_values: int
:param size_values: fontsize of the annotations on top of the heatmap.
:type bottom: float
:param bottom: between 0 and 1, adjusts the bottom margin, see matplotlib subplots_adjust.
:type top: float
:param top: between 0 and 1, adjusts the top margin, see matplotlib subplots_adjust.
:type left: float
:param left: between 0 and 1, adjusts the left margin, see matplotlib subplots_adjust.
:type right: float
:param right: between 0 and 1, adjusts the right margin, see matplotlib subplots_adjust.
:type savefig: boolean
:param savefig: set to True to save the figure.
:type usetex: boolean
:param usetex: set to True if matplolib figure is rendered with TeX.
:rtype: Figure
:return: the figure (see matplotlib).
"""
if color_map is None:
color_map = seaborn.cubehelix_palette(as_cmap=True, reverse=False, start=0.5, rot=-.75)
v_size = np.shape(probabilities)[0]
h_size = np.shape(probabilities)[1]
# Create annotation matrix
annot = np.ndarray((v_size, h_size), dtype=object)
for x1 in range(v_size):
for x2 in range(h_size):
p = probabilities[x1, x2]
if p == 0:
if usetex:
annot[x1, x2] = r'$0$\%'
else:
annot[x1, x2] = r'0%'
elif p < 0.01:
if usetex:
annot[x1, x2] = r'$<1$\%'
else:
annot[x1, x2] = r'<1%'
else:
a = str(int(np.floor(100 * p)))
if usetex:
annot[x1, x2] = r'$' + a + r'$\%'
else:
annot[x1, x2] = a + r'%'
f = plt.figure(figsize=figsize)
ax = seaborn.heatmap(probabilities, xticklabels=h_labels, yticklabels=v_labels, annot=annot, cbar=False,
cmap=color_map, fmt='s', square=True, annot_kws={'size': size_values})
ax.tick_params(axis='both', which='major', labelsize=size_labels) # font size for tick labels
ax.set_yticklabels(v_labels, va='center')
if title is not None:
plt.title(title)
plt.tight_layout()
if bottom is not None:
plt.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
if savefig:
entire_path = os.path.join(path, fig_name)
plt.savefig(entire_path)
return f | 5,324,844 |
def predict(model_dir, model_type, train_steps, train_data, test_data):
"""Prediction by the model. train_file_name and test_file_name are the ones
split from train.csv without header and with target. test1_file_name is the
file with header and no target for final predication"""
train_file_name, test_file_name, test1_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
# set the model directory to load the last checkpoint
model_dir = '/home/ethan/Documents/P556 Project 3/model_part_2'
output_dir = "./results_2"
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
m = build_estimator(model_dir, model_type)
#predict training set
train_input_func, train_output = input_fn(train_file_name, False, num_epochs=1, shuffle=False)
train_results = m.predict(train_input_func, predict_keys=["probabilities"]);
train_results_df = pd.Series((v['probabilities'][1] for v in list(train_results)))
train_output['probability'] = train_results_df
print(train_output[:10])
train_output.to_csv(output_dir + "/train_results_2.csv")
#predict test set from training data
test_input_func, test_output = input_fn(test_file_name, False, num_epochs=1, shuffle=False)
test_results = m.predict(test_input_func, predict_keys=["probabilities"]);
test_results_df = pd.Series((v['probabilities'][1] for v in list(test_results)))
test_output['probability'] = test_results_df
print(test_output[:10])
test_output.to_csv(output_dir + "/test_results_2.csv")
#predict test set from test data for submission
test1_input_func, test1_output = input_fn(test1_file_name, True, num_epochs=1, shuffle=False)
print(test1_output[:10])
test1_results = m.predict(test1_input_func, predict_keys=["probabilities"]);
test1_results1 = list(test1_results)
print(len(test1_results1))
test1_results_df = pd.Series((v['probabilities'][1] for v in test1_results1))
print(len(test1_results_df))
test1_output['probability'] = test1_results_df
print(test1_output[:10])
test1_output.to_csv(output_dir + "/submission_2.csv") | 5,324,845 |
def find_map(address):
"""
Look up a specified address in the /proc/PID/maps for a process.
Returns: A string representing the map in question, or None if no match.
"""
maps = fetch_maps()
for m in re.finditer(begin_pattern, maps):
begin = int(m.group("begin"), 16)
end = int(m.group("end"), 16)
if begin <= address < end:
return m.group(0)
return None | 5,324,846 |
def _format_css_declarations(content: list, indent_level: int) -> str:
"""
Helper function for CSS formatting that formats a list of CSS properties, like `margin: 1em;`.
INPUTS
content: A list of component values generated by the tinycss2 library
OUTPUTS
A string of formatted CSS
"""
output = ""
tokens = tinycss2.parse_declaration_list(content)
# Hold on to your butts...
# When we alpha-sort declarations, we want to keep comments that are on the same
# line attached to that declaration after it's reordered.
# To do this, first create a list of sorted_declarations that is list of tuples.
# The first tuple value is the declaration itself, and the second is a comment on the same line, if it exists.
# While we do this, remove those same-line comments from our master list of tokens,
# so that we don't process them twice later when we iterate over the master list again.
sorted_declarations = []
i = 0
while i < len(tokens):
if tokens[i].type == "declaration":
if i + 1 < len(tokens) and tokens[i + 1].type == "comment":
sorted_declarations.append((tokens[i], tokens[i + 1]))
tokens.pop(i + 1) # Remove from the master list
# Use regex to test if the token is on the same line, i.e. if the intervening white space doesn't include a newline
elif i + 2 < len(tokens) and tokens[i + 1].type == "whitespace" and regex.match(r"[^\n]+", tokens[i + 1].value) and tokens[i + 2].type == "comment":
sorted_declarations.append((tokens[i], tokens[i + 2]))
tokens.pop(i + 1) # Remove from the master list
tokens.pop(i + 1)
else:
# Special case in alpha-sorting: Sort -epub-* properties as if -epub- didn't exist
# Note that we modify token.name, which DOESN'T change token.lower_name; and we use token.name
# for sorting, but token.lower_name for output, so we don't have to undo this before outputting
tokens[i].name = regex.sub(r"^-([a-z]+?)-(.+)", r"\2-\1-\2", tokens[i].name)
sorted_declarations.append((tokens[i], None))
i = i + 1
# Actually sort declaration tokens and their associated comments, if any
sorted_declarations.sort(key = lambda x : x[0].name)
# Now, sort the master token list using an intermediary list, output_tokens
# This will iterate over all tokens, including non-declaration tokens. If we encounter a declaration,
# pull the nth declaration out of our sorted list instead.
output_tokens = []
current_declaration_number = 0
for token in tokens:
if token.type == "error":
raise se.InvalidCssException("Couldn’t parse CSS. Exception: {token.message}")
# Append the declaration to the output based on its sorted index.
# This will sort declarations but keep things like comments before and after
# declarations in the expected order.
if token.type == "declaration":
output_tokens.append(sorted_declarations[current_declaration_number])
current_declaration_number = current_declaration_number + 1
else:
output_tokens.append((token, None))
# tokens is now a alpha-sorted list of tuples of (token, comment)
tokens = output_tokens
for token in tokens:
comment = None
if isinstance(token, tuple):
comment = token[1]
token = token[0]
if token.type == "error":
raise se.InvalidCssException("Couldn’t parse CSS. Exception: {token.message}")
if token.type == "declaration":
output += ("\t" * indent_level) + token.lower_name + ": "
output += _format_css_component_list(token.value)
if token.important:
output += " !important"
output += ";"
if comment:
output += " /* " + comment.value.strip() + " */"
output += "\n"
if token.type == "comment":
output = output.rstrip()
if output == "":
output += ("\t" * indent_level) + "/* " + token.value.strip() + " */\n"
else:
output += " /* " + token.value.strip() + " */\n"
return output.rstrip() | 5,324,847 |
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen_hess
>>> X = 0.1 * np.arange(4)
>>> rosen_hess(X)
array([[-38., 0., 0., 0.],
[ 0., 134., -40., 0.],
[ 0., -40., 130., -80.],
[ 0., 0., -80., 200.]])
"""
x = atleast_1d(x)
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
diagonal = np.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + np.diag(diagonal)
return H | 5,324,848 |
def flow_corr():
"""
Symmetric cumulants SC(m, n) at the MAP point compared to experiment.
"""
fig, axes = plt.subplots(
figsize=figsize(0.5, 1.2), sharex=True,
nrows=2, gridspec_kw=dict(height_ratios=[4, 5])
)
observables = ['sc', 'sc_normed']
ylims = [(-2.5e-6, 2.5e-6), (-.9, .8)]
labels = ['(4,2)', '(3,2)']
system = 'PbPb5020'
def label(*mn, normed=False):
fmt = r'\mathrm{{SC}}({0}, {1})'
if normed:
fmt += r'/\langle v_{0}^2 \rangle\langle v_{1}^2 \rangle'
return fmt.format(*mn).join('$$')
for obs, ylim, ax in zip(observables, ylims, axes.flat):
for (mn, cmap), lbl in zip([((4, 2), 'Blues'), ((3, 2), 'Oranges')], labels):
x = model.map_data[system][obs][mn]['x']
y = model.map_data[system][obs][mn]['Y']
ax.plot(x, y, color=getattr(plt.cm, cmap)(.7))
ax.text(1.02*x[-1], y[-1], lbl, va='center', ha='left')
ax.axhline(
0, color='.5', lw=plt.rcParams['xtick.major.width'],
zorder=-100
)
ax.set_xlim(0, 80)
ax.set_ylim(*ylim)
auto_ticks(ax, nbins=7, minor=2)
if ax.is_first_col():
ax.set_ylabel(label('m', 'n', normed='normed' in obs))
if ax.is_first_row():
ax.set_title('Pb-Pb 5.02 TeV')
else:
ax.set_xlabel('Centrality %')
# MAP estimate for Pb-Pb collisions at 5.02 TeV, calibrated to Pb-Pb
# data at 2.76 and 5.02 TeV using a model without nucleon substructure.
# symmetric cumulants
SC = np.array([
[2.5e+00, 5.8591e-09, 5.9204e-09],
[7.5e+00, 2.1582e-08, -2.1367e-08],
[1.5e+01, 1.2228e-07, -1.3942e-07],
[2.5e+01, 4.3989e-07, -5.4267e-07],
[3.5e+01, 9.4414e-07, -1.0677e-06],
[4.5e+01, 1.4138e-06, -1.4616e-06],
[5.5e+01, 1.4456e-06, -1.2317e-06],
[6.5e+01, 7.3726e-07, -3.3222e-07],
])
# normalized symmetric cumulants
NSC = np.array([
[2.5e+00, 7.3202e-02, 2.1091e-02],
[7.5e+00, 7.6282e-02, -2.0918e-02],
[1.5e+01, 1.5216e-01, -4.7261e-02],
[2.5e+01, 2.4814e-01, -8.6423e-02],
[3.5e+01, 3.4423e-01, -1.1640e-01],
[4.5e+01, 4.5614e-01, -1.4251e-01],
[5.5e+01, 6.1072e-01, -1.5021e-01],
])
for ax, obs in zip(axes, [SC, NSC]):
x, y42, y32 = obs.T
ax.plot(x, y42, color=plt.cm.Blues(.7), linestyle='dashed')
ax.plot(x, y32, color=plt.cm.Oranges(.7), linestyle='dashed')
solid_line = lines.Line2D([], [], color=offblack)
dashed_line = lines.Line2D([], [], linestyle='dashed', color=offblack)
handles = [solid_line, dashed_line]
labels = ["p-Pb, Pb-Pb 5.02 TeV", "Pb-Pb 2.76, 5.02 TeV"]
plt.legend(handles, labels, loc=8, title='Bayesian calibration on:')
set_tight(fig) | 5,324,849 |
def close_unclosed_camera_motions(device_id: str):
"""
Camera motion is opened and closed by mqtt events.
- Opened: the field `motion_started_at` is defined but not `motion_ended_at`.
- Closed: both fields are set.
It might happen (in a bad scenario), that the close event does not happen (i.e service crash).
This method close (thanks to the field `closed_by_system` turned to True) all unclosed motion for a given `device_id`.
"""
device = Device.objects.get(device_id=device_id)
no_closed_motions = CameraMotionDetected.objects.select_for_update().filter(device=device, motion_ended_at__isnull=True, closed_by_system=False)
with transaction.atomic():
for no_closed_motion in no_closed_motions:
no_closed_motion.closed_by_system = True
CameraMotionDetected.objects.bulk_update(no_closed_motions, ['closed_by_system'], batch_size=100) | 5,324,850 |
def dodecagon(samples=128, radius=1):
"""Create a dodecagon mask.
Parameters
----------
samples : `int`, optional
number of samples in the square output array
radius : `float`, optional
radius of the shape in the square output array. radius=1 will fill the
x
Returns
-------
`numpy.ndarray`
binary ndarray representation of the mask
"""
return regular_polygon(12, samples=samples, radius=radius) | 5,324,851 |
def test_launch_mesos_container():
"""Launches a Mesos container with a simple command."""
app_def = apps.mesos_app(app_id='/mesos-container-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
common.deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'MESOS', "The container type is not MESOS" | 5,324,852 |
def port_name(name, nr=0):
"""Map node output number to name."""
return name + ":" + str(nr) | 5,324,853 |
def workflow_run_jobs_page_info(response):
""" Show information about workflow run jobs search response:
the list of job IDs.
"""
ids = []
for data in response.json()['jobs']:
ids.append(data['id'])
info('Workflow run job IDs: {}', ids) | 5,324,854 |
def url(ticker, start_date, end_date):
"""Format the correct URL from the params"""
base_url = ''.join([API_BASE_PATH, ticker, '.csv'])
params = {'start_date': start_date, 'end_date': end_date}
if API_KEY_ENV in os.environ:
params['api_key'] = os.environ[API_KEY_ENV]
return ''.join([base_url, '?', urllib.parse.urlencode(params)]) | 5,324,855 |
def deploy_htdf_faucet(conftest_args):
"""
run this test case, if only run single test
run this test case, if run this test file
"""
time.sleep(5)
gas_wanted = 3000000
gas_price = 100
tx_amount = 0
data = BYTECODES
memo = 'test_deploy_htdf_faucet'
htdfrpc = HtdfRPC(chaid_id=conftest_args['CHAINID'], rpc_host=conftest_args['RPC_HOST'], rpc_port=conftest_args['RPC_PORT'])
from_addr = Address(conftest_args['ADDRESS'])
# new_to_addr = HtdfPrivateKey('').address
private_key = HtdfPrivateKey(conftest_args['PRIVATE_KEY'])
time.sleep(10)
from_acc = htdfrpc.get_account_info(address=from_addr.address)
print('from_acc balance: {}'.format(from_acc.balance_satoshi))
assert from_acc is not None
assert from_acc.balance_satoshi > gas_price * gas_wanted + tx_amount
signed_tx = HtdfTxBuilder(
from_address=from_addr,
to_address='',
amount_satoshi=tx_amount,
sequence=from_acc.sequence,
account_number=from_acc.account_number,
chain_id=htdfrpc.chain_id,
gas_price=gas_price,
gas_wanted=gas_wanted,
data=data,
memo=memo
).build_and_sign(private_key=private_key)
tx_hash = htdfrpc.broadcast_tx(tx_hex=signed_tx)
print('tx_hash: {}'.format(tx_hash))
tx = htdfrpc.get_transaction_until_timeout(transaction_hash=tx_hash, timeout_secs=5000/5)
pprint(tx)
assert tx['logs'][0]['success'] == True
txlog = tx['logs'][0]['log']
# txlog = json.loads(txlog)
assert tx['gas_wanted'] == str(gas_wanted)
assert int(tx['gas_used']) <= gas_wanted
tv = tx['tx']['value']
assert len(tv['msg']) == 1
assert tv['msg'][0]['type'] == 'htdfservice/send'
assert int(tv['fee']['gas_wanted']) == gas_wanted
assert int(tv['fee']['gas_price']) == gas_price
assert tv['memo'] == memo
mv = tv['msg'][0]['value']
assert mv['From'] == from_addr.address
assert mv['To'] == '' # new_to_addr.address
assert mv['Data'] == data
assert int(mv['GasPrice']) == gas_price
assert int(mv['GasWanted']) == gas_wanted
assert 'satoshi' == mv['Amount'][0]['denom']
assert tx_amount == int(mv['Amount'][0]['amount'])
pprint(tx)
time.sleep(10) # wait for chain state update
# to_acc = htdfrpc.get_account_info(address=new_to_addr.address)
# assert to_acc is not None
# assert to_acc.balance_satoshi == tx_amount
from_acc_new = htdfrpc.get_account_info(address=from_addr.address)
print("from_acc_new balance is {}".format(from_acc_new.balance_satoshi))
assert from_acc_new.address == from_acc.address
assert from_acc_new.sequence == from_acc.sequence + 1
assert from_acc_new.account_number == from_acc.account_number
assert from_acc_new.balance_satoshi == from_acc.balance_satoshi - (gas_price * int(tx['gas_used'])) - tx_amount
log = tx['logs'][0]['log']
conaddr = log[log.find("contract address:") : log.find(", output:")]
contract_address = conaddr.replace('contract address:', '').strip()
contract_address = Address.hexaddr_to_bech32(contract_address)
htdf_faucet_contract_address.append(contract_address)
pass | 5,324,856 |
def make_vgroup(module, array):
""" Create Volume Group"""
changed = True
if not module.check_mode:
api_version = array._list_available_rest_versions()
if module.params['bw_qos'] or module.params['iops_qos'] and VG_IOPS_VERSION in api_version:
if module.params['bw_qos'] and not module.params['iops_qos']:
if 549755813888 >= int(human_to_bytes(module.params['bw_qos'])) >= 1048576:
try:
array.create_vgroup(module.params['name'],
bandwidth_limit=module.params['bw_qos'])
except Exception:
module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
else:
module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
elif module.params['iops_qos'] and not module.params['bw_qos']:
if 100000000 >= int(human_to_real(module.params['iops_qos'])) >= 100:
try:
array.create_vgroup(module.params['name'],
iops_limit=module.params['iops_qos'])
except Exception:
module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
else:
module.fail_json(msg='IOPs QoS value {0} out of range.'.format(module.params['iops_qos']))
else:
bw_qos_size = int(human_to_bytes(module.params['bw_qos']))
if 100000000 >= int(human_to_real(module.params['iops_qos'])) >= 100 and 549755813888 >= bw_qos_size >= 1048576:
try:
array.create_vgroup(module.params['name'],
iops_limit=module.params['iops_qos'],
bandwidth_limit=module.params['bw_qos'])
except Exception:
module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
else:
module.fail_json(msg='IOPs or Bandwidth QoS value out of range.')
else:
try:
array.create_vgroup(module.params['vgroup'])
except Exception:
module.fail_json(msg='creation of volume group {0} failed.'.format(module.params['vgroup']))
module.exit_json(changed=changed) | 5,324,857 |
def test_mnist_basic():
"""
Validate MnistDataset
"""
logger.info("Test MnistDataset Op")
# case 1: test loading whole dataset
data1 = ds.MnistDataset(DATA_DIR)
num_iter1 = 0
for _ in data1.create_dict_iterator(num_epochs=1):
num_iter1 += 1
assert num_iter1 == 10000
# case 2: test num_samples
data2 = ds.MnistDataset(DATA_DIR, num_samples=500)
num_iter2 = 0
for _ in data2.create_dict_iterator(num_epochs=1):
num_iter2 += 1
assert num_iter2 == 500
# case 3: test repeat
data3 = ds.MnistDataset(DATA_DIR, num_samples=200)
data3 = data3.repeat(5)
num_iter3 = 0
for _ in data3.create_dict_iterator(num_epochs=1):
num_iter3 += 1
assert num_iter3 == 1000
# case 4: test batch with drop_remainder=False
data4 = ds.MnistDataset(DATA_DIR, num_samples=100)
assert data4.get_dataset_size() == 100
assert data4.get_batch_size() == 1
data4 = data4.batch(batch_size=7) # drop_remainder is default to be False
assert data4.get_dataset_size() == 15
assert data4.get_batch_size() == 7
num_iter4 = 0
for _ in data4.create_dict_iterator(num_epochs=1):
num_iter4 += 1
assert num_iter4 == 15
# case 5: test batch with drop_remainder=True
data5 = ds.MnistDataset(DATA_DIR, num_samples=100)
assert data5.get_dataset_size() == 100
assert data5.get_batch_size() == 1
data5 = data5.batch(batch_size=7, drop_remainder=True) # the rest of incomplete batch will be dropped
assert data5.get_dataset_size() == 14
assert data5.get_batch_size() == 7
num_iter5 = 0
for _ in data5.create_dict_iterator(num_epochs=1):
num_iter5 += 1
assert num_iter5 == 14 | 5,324,858 |
def AC3(csp, queue=None, removals=None, arc_heuristic=csp.dom_j_up):
"""[Figure 6.3]"""
if queue is None:
queue = {(Xi, Xk) for Xi in csp.variables for Xk in csp.neighbors[Xi]}
csp.support_pruning()
queue = arc_heuristic(csp, queue)
checks = 0
while queue:
(Xi, Xj) = queue.pop()
revised, checks = revise(csp, Xi, Xj, removals, checks)
if revised:
if not csp.curr_domains[Xi]:
return False, checks # CSP is inconsistent
for Xk in csp.neighbors[Xi]:
if Xk != Xj:
queue.add((Xk, Xi))
return True, checks | 5,324,859 |
def load(
name: str,
ids: Optional[List[Union[str, int]]] = None,
limit: Optional[int] = None,
) -> pandas.DataFrame:
"""Load dataset data to a pandas DataFrame.
Args:
name:
The dataset name.
ids:
If provided, load dataset records with given ids.
limit:
The number of records to retrieve.
Returns:
The dataset as a pandas Dataframe.
Examples:
>>> import rubrix as rb
>>> dataframe = rb.load(name="example-dataset")
"""
return _client_instance().load(name=name, limit=limit, ids=ids) | 5,324,860 |
def _stringify(values):
"""internal method: used to convert values to a string suitable for an xml attribute"""
if type(values) == list or type(values) == tuple:
return " ".join([str(x) for x in values])
elif type(values) == type(True):
return "1" if values else "0"
else:
return str(values) | 5,324,861 |
def xml_safe(s):
"""Returns the XML-safe version of a given string.
"""
new_string = s.replace("&", "&").replace("<", "<")
new_string = new_string.replace("\r", "").replace("\n", "<br/>")
return new_string | 5,324,862 |
def range_values(ent):
"""Extract values from the range and cached label."""
data = {}
range_ = [e for e in ent.ents if e._.cached_label.split('.')[0] == 'range'][0]
values = re.findall(FLOAT_RE, range_.text)
if not all([re.search(INT_TOKEN_RE, v) for v in values]):
raise RejectMatch
keys = range_.label_.split('.')[1:]
for key, value in zip(keys, values):
data[key] = to_positive_int(value)
range_._.data = data
range_._.new_label = 'count'
return range_ | 5,324,863 |
def plot_mw_nii_bars(ax, snr_min = None, shaded_kwargs = {}, **kwargs):
"""
Plots vertical lines and bars on bpt Diagram for Tilted Disk where only
NII/HA line is detected
Parameters
----------
ax 'matplotlib.pyplot.figure.axes'
axes to plot lines on
snr_min: 'number', optional, must be keyword
Miniumum sigma detection level to plot
Default of 2 sigma
shaded_kwargs: 'dict', optional, must be keyword
kwargs passed to ax.fill_betweenx for shaded error boxes
**kwargs: 'dict', optional, must be keywords
passed to ax.plot for lines
"""
# Default line color
if "color" not in kwargs:
kwargs["color"] = pal[4]
if "facecolor" not in shaded_kwargs:
shaded_kwargs["facecolor"] = kwargs["color"]
# Default zorder
if "zorder" not in kwargs:
if "zorder" in shaded_kwargs:
kwargs["zorder"] = shaded_kwargs["zorder"] + 1
else:
kwargs["zorder"] = 2
shaded_kwargs["zorder"] = 1
else:
if "zorder" not in shaded_kwargs:
shaded_kwargs["zorder"] = kwargs["zorder"] - 1
# Default alpha
if "alpha" not in kwargs:
kwargs["alpha"] = 0.5
if "alpha" not in shaded_kwargs:
shaded_kwargs["alpha"] = 0.1
# Default line style
if "ls" not in kwargs:
kwargs["ls"] = ':'
# Default line width
if "lw" not in kwargs:
kwargs["lw"] = 2
# Default SNR
if snr_min is None:
snr_min = 2.
# Default Label
if "label" not in kwargs:
kwargs["label"] = r'$>{0:2.1f}\sigma$ Tilted Disk'.format(snr_min)
# Load Data
nii_ha_data_filepath = os.path.join(directory, "mw_data/WHAM_NII_HA_DATA_021219.fits")
nii_ha_data = Table.read(nii_ha_data_filepath)
# SNR Cut Data
snr_cut = (nii_ha_data["NII_SIGMA_LEVEL"] > snr_min) & (nii_ha_data["HA_SIGMA_LEVEL"] > snr_min)
# in Tilted Disk OIII/HB points cut
oiii_hb_cut = np.ones(len(nii_ha_data), dtype = bool)
oiii_hb_cut[0] = False
oiii_hb_cut[6] = False
oiii_hb_cut[7] = False
nii_ha_data = nii_ha_data[snr_cut & oiii_hb_cut]
for ell, entry in enumerate(nii_ha_data):
if ell == 0:
ax.plot([entry["log_nii_ha"], entry["log_nii_ha"]], [-2-ell/3.,2],
**kwargs)
del kwargs["label"]
else:
ax.plot([entry["log_nii_ha"], entry["log_nii_ha"]], [-2-ell/3.,2],
**kwargs)
ax.fill_betweenx([-2,2],
[entry["log_nii_ha_lower"], entry["log_nii_ha_lower"]],
x2 = [entry["log_nii_ha_upper"], entry["log_nii_ha_upper"]],
**shaded_kwargs)
return ax | 5,324,864 |
def get_weights(connections):
"""Returns the weights of the connections
:param connections:
:return: Numpy array of weights
"""
return np.array(nest.GetStatus(connections, keys="weight")) | 5,324,865 |
def w2v_matrix_vocab_generator(w2v_pickle):
"""
Creates the w2v dict mapping word to index and a numpy matrix of (num words, size of embedding), words will
be mapped to their index, such that row ith will be the embedding of the word mapped to the i index.
:param w2v_pickle: Dataframe containing token and vector columns, where token is a string and vector is the
embedding vector, each vector must have the same length (and the length must be equal to the argument embedding_dim).
:return: A dict, np matrix pair, the dict maps words to indexes, the matrix ith row will contain the embedding
of the word mapped to the ith index.
"""
# create internal w2v dictionary
w2v_df = pd.read_pickle(w2v_pickle)
w2v = dict()
embedding_dim = len(w2v_df.iloc[0, 1])
# shape +2 for unknown and padding tokens
w2v_weights = np.zeros(shape=(w2v_df.shape[0] + 2, embedding_dim))
for index, data_point in w2v_df.iterrows():
curr_index = len(w2v)
w2v[data_point["token"]] = curr_index
w2v_weights[curr_index, :] = np.array(data_point["vector"])
w2v["<UNK>"] = len(w2v_weights) - 2
w2v["<padding>"] = len(w2v_weights) - 1
return w2v, w2v_weights | 5,324,866 |
def concat(
dfs, axis=0, join="outer", uniform=False, filter_warning=True, ignore_index=False
):
"""Concatenate, handling some edge cases:
- Unions categoricals between partitions
- Ignores empty partitions
Parameters
----------
dfs : list of DataFrame, Series, or Index
axis : int or str, optional
join : str, optional
uniform : bool, optional
Whether to treat ``dfs[0]`` as representative of ``dfs[1:]``. Set to
True if all arguments have the same columns and dtypes (but not
necessarily categories). Default is False.
ignore_index : bool, optional
Whether to allow index values to be ignored/droped during
concatenation. Default is False.
"""
if len(dfs) == 1:
return dfs[0]
else:
func = concat_dispatch.dispatch(type(dfs[0]))
return func(
dfs,
axis=axis,
join=join,
uniform=uniform,
filter_warning=filter_warning,
ignore_index=ignore_index,
) | 5,324,867 |
def resize(dataset: xr.Dataset, invalid_value: float = 0) -> xr.Dataset:
"""
Pixels whose aggregation window exceeds the reference image are truncated in the output products.
This function returns the output products with the size of the input images : add rows and columns that have been
truncated. These added pixels will have bit 0 = 1 ( Invalid pixel : border of the reference image )
in the validity_mask and will have the disparity = invalid_value in the disparity map.
:param dataset: Dataset which contains the output products
:type dataset: xarray.Dataset with the variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
:param invalid_value: disparity to assign to invalid pixels ( pixels whose aggregation window exceeds the image)
:type invalid_value: float
:return: the dataset with the size of the input images
:rtype : xarray.Dataset with the variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
"""
offset = dataset.attrs['offset_row_col']
if offset == 0:
return dataset
c_row = dataset.coords['row']
c_col = dataset.coords['col']
row = np.arange(c_row[0] - offset, c_row[-1] + 1 + offset)
col = np.arange(c_col[0] - offset, c_col[-1] + 1 + offset)
resize_disparity = xr.Dataset()
for array in dataset:
if array == 'disparity_map':
data = xr.DataArray(np.full((len(row), len(col)), invalid_value, dtype=np.float32), coords=[row, col],
dims=['row', 'col'])
resize_disparity[array] = dataset[array].combine_first(data)
if array == 'confidence_measure':
depth = len(dataset.coords['indicator'])
data = xr.DataArray(data=np.full((len(row), len(col), depth), np.nan, dtype=np.float32),
coords={'row': row, 'col': col}, dims=['row', 'col', 'indicator'])
resize_disparity[array] = dataset[array].combine_first(data)
if array == 'validity_mask':
data = xr.DataArray(np.zeros((len(row), len(col)), dtype=np.uint16), coords=[row, col], dims=['row', 'col'])
# Invalid pixel : border of the reference image
data += PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER
resize_disparity[array] = dataset[array].combine_first(data).astype(np.uint16)
if array == 'interpolated_coeff':
data = xr.DataArray(np.full((len(row), len(col)), np.nan, dtype=np.float32), coords=[row, col],
dims=['row', 'col'])
resize_disparity[array] = dataset[array].combine_first(data)
resize_disparity.attrs = dataset.attrs
resize_disparity.attrs['offset_row_col'] = 0
return resize_disparity | 5,324,868 |
def score(graphs, schema, url, port):
"""
graphs is expected to be a list of dictionaries, where each entry in the
list represents a graph with
* key idx -> index value
* key nodes -> list of ints representing vertices of the graph
* key edges -> list of list of ints representing edges of graph
"""
stream = BufferedWriter(BytesIO())
writer = DataFileWriter(stream, avro.io.DatumWriter(), schema)
# writer = DataFileWriter(open("imdb-graph.avro", "wb"), DatumWriter(), schema)
for graph in graphs:
writer.append({"edges": graph["edges"], "vertices": graph["vertices"], "index": graph["idx"], "label": graph.get("label")})
writer.flush()
raw_bytes = stream.raw.getvalue()
writer.close()
url = "{}:{}/predictUnstructured/?ret_mode=binary".format(url.strip("/"), port)
payload = raw_bytes
headers = {
'Content-Type': 'application/octet-stream'
}
response = requests.request("POST", url, headers=headers, data = payload)
return response | 5,324,869 |
def group_by_author(commits: List[dict]) -> Dict[str, List[dict]]:
"""Group GitHub commit objects by their author."""
grouped: Dict[str, List[dict]] = {}
for commit in commits:
name = commit["author"]["login"]
if name not in grouped:
grouped[name] = []
grouped[name].append(commit)
return grouped | 5,324,870 |
def convert(name):
"""
CamelCase to under_score
:param name:
:return:
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | 5,324,871 |
def plot(graph, particles=None, polyline=None, particles_alpha=None, label_start_end=True,
bgcolor='white', node_color='grey', node_size=0, edge_color='lightgrey', edge_linewidth=3, **kwargs):
"""
Plots particle approximation of trajectory
:param graph: NetworkX MultiDiGraph
UTM projection
encodes road network
generating using OSMnx, see tools.cam_graph.py
:param particles: MMParticles object (from inference.particles)
particle approximation
:param polyline: list-like, each element length 2
UTM - metres
series of GPS coordinate observations
:param particles_alpha: float in [0, 1]
plotting parameter
opacity of routes
:param label_start_end: bool
whether to label the start and end points of the route
:param bgcolor: str
background colour
:param node_color: str
node (intersections) colour
:param node_size: float
size of nodes (intersections)
:param edge_color: str
colour of edges (roads)
:param edge_linewidth: float
width of edges (roads
:param kwargs:
additional parameters to ox.plot_graph
:return: fig, ax
"""
fig, ax = ox.plot_graph(graph, show=False, close=False,
bgcolor=bgcolor, node_color=node_color, node_size=node_size,
edge_color=edge_color, edge_linewidth=edge_linewidth,
**kwargs)
ax.set_aspect("equal")
start_end_points = None
if particles is not None:
if isinstance(particles, np.ndarray):
particles = [particles]
start_end_points = np.zeros((2, 2))
alpha_min = 0.1
if particles_alpha is None:
particles_alpha = 1 / len(particles) * (1 - alpha_min) + alpha_min
xlim = [None, None]
ylim = [None, None]
for i, particle in enumerate(particles):
if particle is None:
continue
if len(particle) > 1:
int_path = interpolate_path(graph, particle, t_column=True)
cart_int_path = cartesianise_path(graph, int_path, t_column=True)
ax.plot(cart_int_path[:, 0], cart_int_path[:, 1], color='orange', linewidth=1.5,
alpha=particles_alpha)
cart_path = cartesianise_path(graph, observation_time_rows(particle), t_column=True)
else:
cart_path = cartesianise_path(graph, particle, t_column=True)
ax.scatter(cart_path[:, 0], cart_path[:, 1], color='orange', alpha=particles_alpha, zorder=2)
start_end_points[0] += cart_path[0] / len(particles)
start_end_points[1] += cart_path[-1] / len(particles)
xlim[0] = np.min(cart_path[:, 0]) if xlim[0] is None else min(np.min(cart_path[:, 0]), xlim[0])
xlim[1] = np.max(cart_path[:, 0]) if xlim[1] is None else max(np.max(cart_path[:, 0]), xlim[1])
ylim[0] = np.min(cart_path[:, 1]) if ylim[0] is None else min(np.min(cart_path[:, 1]), ylim[0])
ylim[1] = np.max(cart_path[:, 1]) if ylim[1] is None else max(np.max(cart_path[:, 1]), ylim[1])
xlim, ylim = expand_lims(xlim, ylim, 0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
if polyline is not None:
poly_arr = np.array(polyline)
ax.scatter(poly_arr[:, 0],
poly_arr[:, 1],
marker='x', c='red', s=100, linewidth=3, zorder=10)
if particles is None:
start_end_points = poly_arr[np.array([0, -1])]
xlim = [np.min(poly_arr[:, 0]), np.max(poly_arr[:, 0])]
ylim = [np.min(poly_arr[:, 1]), np.max(poly_arr[:, 1])]
xlim, ylim = expand_lims(xlim, ylim, 0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
if start_end_points is not None and label_start_end:
plt.annotate('Start', start_end_points[0] + 25, zorder=12)
plt.annotate('End', start_end_points[1] + 25, zorder=12)
plt.tight_layout()
return fig, ax | 5,324,872 |
def non_empty_string(value):
"""Must be a non-empty non-blank string"""
return bool(value) and bool(value.strip()) | 5,324,873 |
def metadataAbstractElementRequiredChildElementTest6():
"""
Optional child elements, child elements required.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementKnownChildElements,
... metadataAbstractElementRequiredChildElementTest6(),
... requiredChildElements=["foo"],
... optionalChildElements=["bar"])
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test>
<foo />
<bar />
</test>
"""
return ElementTree.fromstring(metadata) | 5,324,874 |
def index():
"""
List containers
"""
containers = g.api.get_containers()
clonable_containers = []
for container in containers:
if container['state'] == 'STOPPED':
clonable_containers.append(container['name'])
context = {
'containers': containers,
'clonable_containers': clonable_containers,
'host': g.api.get_host(),
}
return render_template('containers.html', **context) | 5,324,875 |
def spatial_discounting_mask():
"""
Input:
config: Config should have configuration including HEIGHT, WIDTH,
DISCOUNTED_MASK.
Output:
tf.Tensor: spatial discounting mask
Description:
Generate spatial discounting mask constant.
Spatial discounting mask is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
"""
gamma = cfg.spatial_discounting_mask
height, width = cfg.context_mask_shape
shape = [1, 1, height, width]
if cfg.discounted_mask:
mask_values = np.ones((height, width))
for i in range(height):
for j in range(width):
mask_values[i, j] = max(
gamma ** min(i, height - i),
gamma ** min(j, width - j))
mask_values = np.expand_dims(mask_values, 0)
mask_values = np.expand_dims(mask_values, 0)
else:
mask_values = np.ones(shape)
spatial_discounting_mask_tensor = torch.tensor(mask_values, dtype=torch.float32)
if cfg.use_cuda:
spatial_discounting_mask_tensor = spatial_discounting_mask_tensor.cuda()
return spatial_discounting_mask_tensor | 5,324,876 |
def does_file_exist(filepath:Path)-> bool:
"""
Checks if file path exists.
"""
if os.path.exists(filepath):
LOG.info("Data path detected:\n{}\.".format(filepath))
return True
else:
LOG.info("Data path\n{}\nnot detected. Downloading now...".format(filepath))
return False | 5,324,877 |
def pca_results(scaled, pca):
"""
Plot the explained variance of the DataSet as a barchart,
and return a DataFrame with the explained variance for each
feature, for each dimension of the PCA.
-----------------------------------------------------------
# Parameters:
# scaled (pd.DataFrame): The DataFrame in which we are performing PCA on, scaled
down using sklearn.preprocessing.scale():
from sklearn.preprocessing import scale
scaled = pd.DataFrame(scale(data))
Where `data` is the original DataFrame.
# pca: The sklearn.decomposition.PCA() object, which has been fitted to the
scaled down DataFrame:
pca = PCA(**args).fit(scaled)
"""
# Dimension indexing
dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = scaled.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar')
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios#
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1) | 5,324,878 |
def get_uid_from_user(user):
"""Return UID from user name
Looks up UID matching the supplied user name;
returns None if no matching name can be found.
NB returned UID will be an integer.
"""
try:
return pwd.getpwnam(str(user)).pw_uid
except KeyError:
return None | 5,324,879 |
def get_approval_distance(election_1: ApprovalElection, election_2: ApprovalElection,
distance_id: str = None) -> float or (float, list):
""" Return: distance between approval elections, (if applicable) optimal matching """
inner_distance, main_distance = extract_distance_id(distance_id)
metrics_without_params = {
'flow': mad.compute_flow,
'hamming': mad.compute_hamming,
}
metrics_with_inner_distance = {
'approvalwise': mad.compute_approvalwise,
'coapproval_frequency': mad.compute_coapproval_frequency_vectors,
'pairwise': mad.compute_pairwise,
'voterlikeness': mad.compute_voterlikeness,
'candidatelikeness': mad.compute_candidatelikeness,
}
if main_distance in metrics_without_params:
return metrics_without_params.get(main_distance)(election_1, election_2)
elif main_distance in metrics_with_inner_distance:
return metrics_with_inner_distance.get(main_distance)(election_1, election_2,
inner_distance) | 5,324,880 |
def test_plot_part_of():
"""Plot both the standard 'is_a' field and the 'part_of' relationship."""
fout_log = "plot_relationship_part_of.log"
obj = _Run()
names = NAME2GOIDS
# names = ["heartjogging"]
with open(fout_log, 'w') as prt:
for name in names:
goids = NAME2GOIDS[name]
obj.plot_all(goids, name, prt)
print(" WROTE: {LOG}\n".format(LOG=fout_log)) | 5,324,881 |
def get_base_folder():
"""Return the base folder of ProfileQC."""
return Path(__file__).parent | 5,324,882 |
def u16le_list_to_byte_list(data):
"""! @brief Convert a halfword array into a byte array"""
byteData = []
for h in data:
byteData.extend([h & 0xff, (h >> 8) & 0xff])
return byteData | 5,324,883 |
def test_Add2():
""" x+i+y, i=0..100
"""
n = 101
while n:
n -= 1
x + n + y | 5,324,884 |
def test_list_non_positive_integer_min_length_1_nistxml_sv_iv_list_non_positive_integer_min_length_2_5(mode, save_output, output_format):
"""
Type list/nonPositiveInteger is restricted by facet minLength with
value 6.
"""
assert_bindings(
schema="nistData/list/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-list-nonPositiveInteger-minLength-2.xsd",
instance="nistData/list/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-list-nonPositiveInteger-minLength-2-5.xml",
class_name="NistschemaSvIvListNonPositiveIntegerMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,324,885 |
def process_IBM_strings(string):
"""
Format all the IBM string in the same way, creating a single string of lowercase characters
:param string:
:return:
"""
parts = string.split()
result = str(parts[0].lower())
for part in parts[1:]:
result += " " + str(part.lower())
return result | 5,324,886 |
def get_sample_size(number_of_clones, fold_difference, error_envelope_x_vals, error_envelope_y_vals, number_of_error_bars):
"""
This returns the number of cells in a sample that produce the an
error bar of max_error_bar for a given number_of_clones in the
parent population.
This is the inverse function of the calculation performed by
error_bar_on_fit_qD
Delta = (fold_difference - 1.0) * number_of_clones
"""
target = fold_difference - 1.
sample_size = bisect_eb(eb, number_of_clones, error_envelope_x_vals, error_envelope_y_vals, number_of_error_bars, target)
return sample_size | 5,324,887 |
def get_r2(y,yhat):
""" Calcualte the coef. of determination (R^2) """
ybar = np.mean(y)
return 1 - (np.sum((y-yhat)**2))/(np.sum((y-ybar)**2)) | 5,324,888 |
def get_context() -> Dict[str, Any]:
"""
Retrieve the current Server Context.
Returns:
- Dict[str, Any]: the current context
"""
ctx = _context.get() # type: ignore
if ctx is not None:
assert isinstance(ctx, dict)
return ctx.copy()
else:
return {} | 5,324,889 |
def kiinteisto_alueiksi(kiinteisto):
"""
kiinteist: kiinteisto/property register
An artificial property / constituency division will be made for the regionalization of postal codes.
A brute-force distribution is used, where the relative number of residential properties in the constituency is divided into postcodes.
Generally, constituencies are smaller than postcode areas.
The paid property data (kiinteistorekisteri) also includes the number of apartments in the property data. In this way, the division would be more accurate.
In various inspections, the division seemed competent.
This returns the estimate of shares
Returns:
kiintosuus
"""
import pandas as pd
#new kiint and kiintpnrot -dataframes, with muncipality, constituency area and postcode
kiint=kiinteisto[kiinteisto['Käyttötarkoitus']==1].reset_index().groupby(['Kuntanumero','Alue','Postinumero'],as_index=False ).count()
kiint=kiint[['Alue','Postinumero','index']]
kiintpnrot=kiint.reset_index().groupby(['Postinumero', 'Alue'],as_index=False ).sum()[['Alue','Postinumero','index']]
kiintalueet=kiint.reset_index().groupby(['Alue'],as_index=False).sum()[['Alue','index']]
#join them by constituency area
kiintosuus= pd.merge(kiintpnrot, kiintalueet, how='inner', on='Alue',
left_index=False, right_index=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None)
#brute-force calculation of share of areas based on the amount of properties
kiintosuus['Osuus']=kiintosuus['index_x']/kiintosuus['index_y']
kiintosuus=kiintosuus[['Alue','Postinumero','Osuus']]
return(kiintosuus) | 5,324,890 |
def tokenizeBedStream(bedStream):
"""
Iterator through bed file, returning lines as list of tokens
"""
for line in bedStream:
if line != '':
tokens = line.split()
yield tokens | 5,324,891 |
def test_constructor_types():
""" """
tc = TaxonomyConverter()
for dname, conv in tc.convs.items():
assert isinstance(conv, torch.nn.Module) | 5,324,892 |
def G1DListCrossoverSinglePoint(genome, **args):
"""
The crossover of G1DList, Single Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
utils.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = prng.randint(1, len(gMom))
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother) | 5,324,893 |
def optional_observation_map(env, inner_obs):
"""
If the env implements the `observation` function (i.e. if one of the
wrappers is an ObservationWrapper), call that `observation` transformation
on the observation produced by the inner environment
"""
if hasattr(env, 'observation'):
return env.observation(inner_obs)
else:
return inner_obs | 5,324,894 |
def RunMetadataLabels(run_metadata):
"""Returns all labels in run_metadata."""
labels = []
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
labels.append(node_stats.timeline_label)
return labels | 5,324,895 |
def run(DEBUG, HOSTNAME, PORT, SQL_ROUTE):
"""
This function actually runs the application using some
configuration variables located at the config module.
This function has to be runned from the run.py file
"""
global dh
dh = DataHandler(SQL_ROUTE)
setup_table(dh)
app.run(host=HOSTNAME, debug=DEBUG, port=PORT) | 5,324,896 |
def ShouldPackageFile(filename, target):
"""Returns true if the file should be a part of the resulting archive."""
if chromium_utils.IsMac():
file_filter = r'^.+\.(a|dSYM)$'
elif chromium_utils.IsLinux():
file_filter = r'^.+\.(o|a|d)$'
else:
raise NotImplementedError('%s is not supported.' % sys.platform)
if re.match(file_filter, filename):
return False
# Skip files that we don't care about. Mostly directories.
things_to_skip = chromium_utils.FileExclusions()
if filename in things_to_skip:
return False
return True | 5,324,897 |
def is_entity_extractor_present(interpreter: Interpreter) -> bool:
"""Checks whether entity extractor is present."""
extractors = get_entity_extractors(interpreter)
return extractors != [] | 5,324,898 |
def get_workflow(name, namespace):
"""Get a workflow."""
api_group = "argoproj.io"
api_version = "v1alpha1"
co_name = "workflows"
co_client = _get_k8s_custom_objects_client()
return co_client.get_namespaced_custom_object(api_group, api_version,
namespace, co_name, name) | 5,324,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.