content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def dmlab_level_label(level) -> str:
"""Returns the label for a DMLab level."""
return level.replace('_', ' ').title()
|
332de2e4b0a834083ec6ed6b3921c43c99b0678a
| 21,280
|
def get_classifier_index(chain, i, point=None):
"""
:param chain: Ensemble chain
:param i: index of the classifier in the chain
:return: The classifier id in the index i
"""
c = chain.get(chain.get_start())
if point is not None:
c = chain.get(point)
if c is not None:
next = c.component_id
i_ = 0
while i_ != i:
trigger = chain.get(next)
if trigger is None:
return None
c = chain.get(trigger.component_ids[0])
if c is None:
return None
next = c.component_id
i_ += 1
else:
return None
return c.id
|
9adc727436f1faf9111a9deed5f39b82d7a4656c
| 21,281
|
import os
def _process_exists(pid):
"""Check if a process with PID ``pid`` exists
:param pid: PID to check
:type pid: ``int``
:returns: ``True`` if process exists, else ``False``
:rtype: ``Boolean``
"""
try:
os.kill(pid, 0)
except OSError: # not running
return False
return True
|
c382daa8b47c8c05d23e5c5962ec796b77e14911
| 21,282
|
import time
def epochnow():
"""Returns now as UNIX timestamp.
Invariant:
epochnow() ~= datetime_to_timestamp(utcnow())
"""
return time.time()
|
02e253c154971fd41cfb015ae249b14d13b3e580
| 21,283
|
from typing import Dict
from typing import List
from typing import Tuple
def get_raw_dependency_information_from_dep_file(dep_file: str) -> Dict[str, List[Tuple[str, str]]]:
"""return RAW dependency information contained in dep_file in the form of a dictionary.
Format: {source_line: [(sink_line, var_name)]
:param dep_file: path to dependency file
:return: RAW dictionary
"""
raw_dependencies: Dict[str, List[Tuple[str, str]]] = dict()
with open(dep_file) as f:
for line in f.readlines():
line = line.replace("\n", "")
# format of dependency entries in _dep.txt-file:
# sourceLine NOM RAW sinkLine|variable
if " NOM " not in line:
continue
split_line = line.split(" NOM ")
source_line = split_line[0]
# split entries
entries = []
current_entry = ""
for word in split_line[1].split(" "):
word = word.replace(" ", "")
if word == "RAW" or word == "WAR" or word == "WAW" or word == "INIT":
if len(current_entry) > 0:
entries.append(current_entry)
current_entry = ""
if len(current_entry) > 0:
current_entry += " " + word
else:
current_entry += word
if len(current_entry) > 0:
entries.append(current_entry)
if source_line not in raw_dependencies:
raw_dependencies[source_line] = []
for entry in entries:
# filter for RAW dependencies
split_entry = entry.split(" ")
if split_entry[0] != "RAW":
continue
split_sink_line_var = split_entry[1].split("|")
sink_line = split_sink_line_var[0]
var_name = split_sink_line_var[1].replace(".addr", "")
raw_dependencies[source_line].append((sink_line, var_name))
return raw_dependencies
|
84a137b0620215f27fc04b92d23d1004e6241b7d
| 21,284
|
def flag(s):
"""Turn 'flag_name' into `--flag-name`."""
return '--' + str(s).replace('_', '-')
|
3f76ba5a765d9f050576e1535d0d72bbd260bc43
| 21,288
|
def pmi(financedamount, pmirate):
"""Return annual private mortgage insurance cost.
:param financedamount: Amount of money borrowed.
:type financedamount: double
:param pmirate: Rate charged when loan-to-value > 80%.
:type pmirate: double
:return: double
"""
return financedamount * pmirate
|
5290fd7bd6e90d8d6b5447e4d367b6e91f94ced3
| 21,289
|
def backend(entry):
"""Custom spam checker backend for testing Zinnia"""
return False
|
6c1505fec7d76043da53e33cff42b7c12ae2bed1
| 21,290
|
from OpenSSL.SSL import Error
import ssl
import sys
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
|
aa69b5df2b0fbe39ac779de96ae8070464835de5
| 21,291
|
def get_relevant_files(session_data: dict):
"""
Generates the pipeline's "starting node"
Parameters
----------
session_data : dict
A dictionary with the locations of all necessary session's data
Returns
-------
str,str
The "starting" node for processing
"""
return session_data.get("dwi"), session_data.get("fmap")
|
ceac08ed24fa081f63fb931cb97e8c433646114e
| 21,292
|
import hashlib
def md5hex(src: bytes) -> str:
"""生成字符串的md5 hash值
Args:
src: 要生成md5的源, 必须是二进制字节数组(bytes)
Returns:
md5的16进制字符串, 长度: 32, 类型: str
"""
m = hashlib.md5()
m.update(src)
return m.hexdigest()
|
04bed72aa6e420b50f433757bd9d731f1fc220eb
| 21,293
|
def _last_index(x, default_dim):
"""Returns the last dimension's index or default_dim if x has no shape."""
if x.get_shape().ndims is not None:
return len(x.get_shape()) - 1
else:
return default_dim
|
e6fe7892329b623c59c7e1c41f51bf7d9b1e81b6
| 21,296
|
def qsize(queue):
"""Return the (approximate) queue size where available; -1 where not (OS X)."""
try:
return queue.qsize()
except NotImplementedError:
# OS X doesn't support qsize
return -1
|
155dc900a4d31be6b3e1cdad2f3b9caf39c096b4
| 21,297
|
def get_color(r, g, b):
"""Bitshift the red, green and blue components around to get the proper bits that represent that color"""
return ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF)
|
e77322dc3ee7fc172bf9f543c7a26ae216906d83
| 21,298
|
import torch
def get_gradient(params):
"""
Function used to get the value of the gradient of a set of
torch parameters.
Args:
parameters (list): list of parameters to be considered.
"""
views = []
for p in params:
if p.grad is None:
view = p.new(p.numel()).zero_()
else:
view = p.grad.view(-1)
views.append(view)
return torch.cat(views, 0)
|
61e5ead705df5285db1b52fc965658bd38df7d1c
| 21,299
|
def layer_point_to_map(map_layer, point):
"""Convert a pair of coordinates from layer projection to map projection."""
return [point[0] / map_layer.data.tilewidth,
point[1] / map_layer.data.tileheight]
|
178472c93947405f855d1860b84105bd0290edcd
| 21,301
|
def prodigal_gene_start(rec_description: str) -> int:
"""Get a gene start index from a Prodigal FASTA header
Examples
--------
Given the following Prodigal FASTA output header, parse the gene start index (i.e. 197)
>>> prodigal_gene_start("k141_2229_1 # 197 # 379 # 1 # ID=4_1;partial=00;start_type=ATG;rbs_motif=AGGAGG;rbs_spacer=5-10bp;gc_cont=0.437")
197
Parameters
----------
rec_description : str
SeqRecord description of Prodigal FASTA header
Returns
-------
int
Gene start index
"""
return int(rec_description.split('#')[1].strip())
|
d0aaa9d09d67dea75537f2f48c550a9df31bcf45
| 21,302
|
def dummy_file_to_dataset(file):
"""Find dataset name associated to file
Can parse the file name, query a data base, whatever
:param file: Path of the input file
:type file: string
:return: Dataset name associated to that file
:rtype: string
"""
return "dummy"
|
22e8a529002907eba67a3e470bbf3272518e508f
| 21,304
|
from typing import List
def split_segment(segment: str) -> List[str]:
"""different payers use different characters to delineate elements"""
asterisk = '*'
pipe = '|'
asterisk_segment_count = len(segment.split(asterisk))
pipe_segment_count = len(segment.split(pipe))
if asterisk_segment_count > pipe_segment_count:
return segment.split(asterisk)
else:
return segment.split(pipe)
|
2225d8309e8e64f24afa5d8b4468d394c3e68c4a
| 21,305
|
def ini_conf_to_bool(value):
"""
Depending INI file interpreter, False values are simple parsed as string,
so use this function to consider them as boolean
:param value: value of ini parameter
:return: bollean value
"""
if value in ('False', 'false', '0', 'off', 'no'):
return False
return bool(value)
|
f9e5e14066bf4d2e17bbdb5cb97f3b2f1ba867c7
| 21,308
|
def strip_sensitive_data(data: dict):
"""
Function that strips request data before it is sent to our graylog server.
"""
sensitive_keys = ("password", "response", "user_responses")
# don't overwrite request!
data = dict(data)
# Scrub Request Data
for key in sensitive_keys:
if key in data:
data[key] = "[CENSORED]"
return str(data)
|
6c52fe7d0e0f9294a7b922e2427530cccb12a748
| 21,309
|
def shift(aid, afold, bid, bfold):
"""determine direction of change.
pos to neg - proximal
neg to pos - distal
fold change does not flip - indicate whether both increased or decreased
"""
if aid > bid:
return shift(bid, bfold, aid, afold)
else:
assert afold != 0
if afold < 0:
if bfold > 0:
return "distal"
if bfold < 0:
return "decreased"
if afold > 0:
if bfold < 0:
return "proximal"
if bfold > 0:
return "increased"
|
bdbbff188b2ec7cbed55d563d1a61a99e458622b
| 21,310
|
def has_cloned_parent(c, p):
"""Return True if p has a cloned parent within the @rst tree."""
root = c.rstCommands.root
p = p.parent()
while p and p != root:
if p.isCloned():
return True
p.moveToParent()
return False
|
1e0964520e188dad082b0f3e7c70f17bb945c063
| 21,312
|
def auto_num(num, model, **kwargs):
"""自动返回编号的最大值"""
if not num:
if not model.query.filter_by(**kwargs).all():
return 1
else:
return model.query.filter_by(**kwargs).order_by(model.num.desc()).first().num + 1
return num
|
ddddc30738ac68d8fd8369128c925e03cc78f2f9
| 21,313
|
import re
def get_depth_of_exec_function(backtrace):
"""
>>> get_depth_of_exec_function(["#1 0x00007f29e6eb7df5 in standard_ExecutorRun (queryDesc=0x562aad346d38,"])
1
>>> get_depth_of_exec_function(["#27 0x00007f29e6eb7df5 in pgss_ExecutorRun (queryDesc=0x562aad346d38,"])
27
>>> get_depth_of_exec_function(["#13 0x00007f29e6eb7df5 in explain_ExecutorRun (queryDesc=0x562aad346d38,"])
13
>>> get_depth_of_exec_function(["#4 0x00007f29e6eb7df5 in ExecEvalNot (notclause=<optimized out>,"])
4
>>> get_depth_of_exec_function(["#5 0x00007f29e6eb7df5 in ExecProcNode (node=node@entry=0x562aad157358,)"])
5
>>> get_depth_of_exec_function(["#12 0x00007f29e6eb7df5 in ExecutePlan (dest=0x562aad15e290,"])
12
>>> get_depth_of_exec_function(["#21 standard_ExecutorRun (queryDesc=0x562aad0b46f8, direction=<optimized out>,"])
21
>>> bt = ["#0 palloc0 (size=size@entry=328)", \
"#1 0x0000562aac6c9970 in InstrAlloc (n=n@entry=1, instrument_options=4)", \
"#2 0x0000562aac6bdddb in ExecInitNode (node=node@entry=0x562aad49e818,"]
>>> get_depth_of_exec_function(bt)
2
"""
exec_regexp = re.compile(r"#([0-9]+) .*Exec[a-zA-Z]+ \(")
for frame in backtrace:
m = exec_regexp.search(frame)
if m:
return int(m.group(1))
return None
|
8e5af7d4cda5db53f3be87916a60d2f6f146ed6c
| 21,314
|
def get_parse_script_date( job ):
"""
The last time the job log file was parsed for the start/stop dates.
"""
if hasattr( job, 'parse_script_date' ):
return job.parse_script_date
return 0
|
c6174aebf3e468a62019d7598f74dacf65ad462e
| 21,316
|
def read_weights_file(weights_file):
"""
Given a tab separated file with leaf names for a phylogenetic tree in column one and multipliers for that leaf's
branch length in column two, will create a dictionary with leaf names as keys and multipliers as values
:param weights_file: Path to a tab-separated text file described above.
:return: dictionary with leaf names as keys and multipliers as values
"""
weights = dict()
with open(weights_file) as f:
for line in f:
stripped_line = line.rstrip()
x = stripped_line.split('\t')
if len(x) != 2 and stripped_line != '':
raise RuntimeError('One of the lines in your weights file ({}) is not formatted correctly. '
'Correct format is leafname\tweight, tab-separated. '
'Offending line was: {}'.format(weights_file, stripped_line))
elif len(x) == 2:
try:
weight = float(x[1])
except ValueError:
raise ValueError('The second column in your weights file ({}) must be a number. Please fix the '
'following line: {}'.format(weights_file, stripped_line))
weights[x[0]] = weight
return weights
|
68b1af3238f1f2b564c18139e55ee644f0bd4da0
| 21,317
|
def modelSpin(model, nodes):
"""
Determines and reports spin state of nodes.
Args:
model: an instance of a Model object.
nodes: a dictionary of node objects.
Returns:
state: a list of node spins for the model, either -1 or +1.
"""
state = []
for e in nodes:
state.append(nodes[e].getSpin())
state = ['+' if x > 0 else '-' for x in state]
return state
|
bf64721f47ab061cb0dc0c4a5f606a1987debc3e
| 21,318
|
import argparse
def create_arg_parser():
""" Create ArgumentParser
"""
parser = argparse.ArgumentParser(description='Prepare SCHISM input files.')
parser.add_argument(dest='main_inputfile', default=None,
help='main input file name')
return parser
|
24531b243936bc0d019ce11c23fd02d5bd878f36
| 21,319
|
def engineer_data(data):
"""
Returns modified version of data with left and right aggregate features while dropping weight and distance features
:param data: data to work with
:return: modified dataframe
"""
data['left'] = data['left_weight'] * data['left_distance']
data['right'] = data['right_weight'] * data['right_distance']
data = data.drop(['left_weight', 'left_distance', 'right_weight', 'right_distance'], axis=1)
return data
|
38774fe9213c95eb743679b5162daadeefe8f2ac
| 21,320
|
def feast(beast: str, dish: str) -> bool:
"""
A function feast that takes the animal's name and
dish as arguments and returns true or false to
indicate whether the beast is allowed to bring the
dish to the feast.
Assume that beast and dish are always lowercase strings,
and that each has at least two letters. beast and dish
may contain hyphens and spaces, but these will not appear
at the beginning or end of the string. They will not
contain numerals.
:param beast:
:param dish:
:return:
"""
return beast[0] == dish[0] and beast[-1] == dish[-1]
|
490bbd83b99855578384725aab36eb38c001cd09
| 21,322
|
def filter_items(value, startswith=None, strip_prefix=False):
"""Jinja2 filter used to filter a dictionary's keys by specifying a
required prefix.
Returns a list of key/value tuples.
.. code-block:: jinja
{{ my_dict|filter_items }}
{{ my_dict|filter_items("MY_PREFIX_") }}
{{ my_dict|filter_items("MY_PREFIX_", True) }}
This is most useful in combination with the special
:ref:`_all_env <all_env>` variable that shpkpr injects into every template.
For example, to iterate over only the template variables that start with
``LABEL_`` you could do:
.. code-block:: jinja
{% for k, v in _all_env|filter_items("LABEL_", strip_prefix=True) %}
"{{k}}": "{{v}}",
{% endfor %}
"""
if startswith is not None:
value = [x for x in value.items() if x[0].startswith(startswith)]
else:
value = value.items()
if startswith is not None and strip_prefix:
value = [(x[0].replace(startswith, "", 1), x[1]) for x in value]
return value
|
456bb82c4c76de40795a64930d2c00163bd79055
| 21,323
|
import os
def check_user_proxy():
"""
Check whether there is a user proxy.
"""
if 'X509_USER_PROXY' in os.environ:
client_proxy = os.environ['X509_USER_PROXY']
else:
client_proxy = '/tmp/x509up_u%d' % os.geteuid()
if not os.path.exists(client_proxy):
return False
else:
return True
|
da412d98299b8bee27a80c07f1b201aeebe0f7eb
| 21,324
|
def convert_8_int_to_tuple(int_date):
""" Converts an 8-digit integer date (e.g. 20161231) to a date tuple (Y,M,D).
"""
return (int(str(int_date)[0:4]), int(str(int_date)[4:6]), int(str(int_date)[6:8]))
|
733982c2de2c74c5116c15a1a91adf03c3bd6871
| 21,325
|
import pytz
from datetime import datetime
def epoch_seconds_to_datetime(secs, tz=pytz.utc):
"""get time in UTC"""
utc_dt = datetime.utcfromtimestamp(secs).replace(tzinfo=pytz.utc)
# convert it to tz
return tz.normalize(utc_dt.astimezone(tz))
|
5b105e2fb3b027c7add9d8e1c3c5ebc5d3c0bbc3
| 21,326
|
def _ShouldGenerateVM(options):
"""Returns true if we will need a VM version of our images."""
# This is a combination of options.vm and whether or not we are generating
# payloads for vm testing.
return options.vm and (options.basic_suite or options.full_suite)
|
2932e7c8621a689cf627e54e29c446d67158e999
| 21,327
|
def clean(iterator) -> list:
"""
Takes an iterator of strings and removes those that consist
that str.strip considers to consist entirely of whitespace.
"""
iterator = map(str.strip, iterator)
return list(filter(bool, iterator))
|
9c24da4a8cdfe59bf92fee3cea4b98b4b479147d
| 21,328
|
import os
def check_if_folder_exists(ctx, param, value):
""" check_if_folder_exists and if not, create it """
# making path absolute
value = os.path.abspath(value)
if not os.path.exists(value):
os.makedirs(value)
return value + '/'
|
3ec5bd39908a499e2eaf7d4dbc8e46f6e6a6c0b8
| 21,329
|
import json
def load_js(fname):
"""
Parameters
----------
fname: str
Returns
-------
obj
content of the json file, generally dict
"""
with open(fname,"r") as f:
jsdict = json.load(f)
return jsdict
|
6742fd1744eb30d51e937c7aa4069c161bb459c5
| 21,331
|
from typing import Union
import pathlib
from typing import Optional
import os
def delete_file(filepath: Union[pathlib.Path, str]) -> Optional[str]:
"""Deletes the file given by `filepath`.
Args:
filepath (Union[pathlib.Path, str]): Path to the file to be deleted.
Returns:
str: The error message, if the file could not be deleted.
"""
# Try to delete the file
try:
os.remove(filepath)
return None
except OSError as e: # catch exception
# print("Error: %s - %s." % (e.filepath, e.strerror))
return str(f"Error: {e.filename} - {e.strerror}.")
|
2fcabb180a95e118f92498cacf27ddc4dbb1ba69
| 21,332
|
def clean_submission(submission):
"""
Take Reddit submission and turn into dictionary
"""
try:
submission_author = submission.author.name
except:
submission_author = "None"
data = {
"id": submission.id,
"title": submission.title,
"score": submission.score,
"url": submission.url,
"name": submission.name,
"author": submission_author,
"is_video": submission.is_video,
"selftext": submission.selftext,
"shortlink": submission.shortlink,
"subreddit_subscribers": submission.subreddit_subscribers,
"thumbnail": submission.thumbnail,
"ups": submission.ups,
"downs": submission.downs,
"created": submission.created
}
for k, v in data.items():
if v == "":
data[k] = "None"
return data
|
725426160b275f9c132e2119eac792140ab72675
| 21,334
|
from pathlib import Path
def default_path_factory(refname: str, ispkg: bool) -> Path:
"""Default path factory for markdown."""
path = Path(*refname.split("."))
if ispkg:
filepath = path / "index.md"
else:
filepath = path.with_suffix(".md")
return filepath
|
96fb86391567269695b3638fec80d7f32f88407a
| 21,337
|
def loadtxt(filename):
"""Read list fo strings from file"""
txt = []
with open(filename, 'r') as f:
for l in f:
txt.append(l.strip())
return txt
|
ce4d3c411d571a4fc629d1664ee06d15a11b8614
| 21,338
|
import os
import logging
def check_project_src_path(project_src_path):
"""Returns True if |project_src_path| exists."""
if not os.path.exists(project_src_path):
logging.error(
'PROJECT_SRC_PATH: %s does not exist. '
'Are you mounting it correctly?', project_src_path)
return False
return True
|
0aa54ee3d4bd8c9deb446cf4bb9a8517c8785c73
| 21,339
|
import torch
def deltaE(lab1, lab2):
"""Delta E (CIE 1976).
lab1: Bx3xHxW
lab2: Bx3xHxW
return: Bx1xHxW
>>> lab1 = torch.tensor([100., 75., 50.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([50., 50., 100.]).view(1, 3, 1, 1)
>>> deltaE(lab1, lab2).item()
75.0
"""
return torch.norm(lab1 - lab2, 2, 1, keepdim=True)
|
cbf123e42c74c15e4a4851e17ab5f475280387b1
| 21,340
|
def _execute_with_retries(request):
"""Executes a request, retrying with exponential backup.
Args:
request: Request to be executed.
Returns:
Response from server.
"""
return request.execute()
|
41bff6ea5ead548174b7a9be6d7dcab8a8d13040
| 21,341
|
def derivative_relu(relu_output):
""" Compute derivative of ReLu function """
relu_output[relu_output <= 0] = 0
relu_output[relu_output > 0] = 1
return relu_output
|
eb5d3d2f3fe912c4426cbd60a33bc96b81bfe5a0
| 21,344
|
import torch
def pdist2(x, y):
"""
Compute distance between each pair of row vectors in x and y
Args:
x: tensor of shape n*p
y: tensor of shape m*p
Returns:
dist: tensor of shape n*m
"""
p = x.shape[1]
n = x.shape[0]
m = y.shape[0]
xtile = torch.cat([x] * m, dim=1).view(-1, p)
ytile = torch.cat([y] * n, dim=0)
dist = torch.pairwise_distance(xtile, ytile)
return dist.view(n, m)
|
2e3694f58c3b7b7b743c57c64a0aeacbb78288b6
| 21,345
|
import json
def handle_success(resp):
"""
We discovered a few cases where response body can be empty so we handle the case
"""
response = ""
try:
response = resp.json().get("response", {})
except (json.JSONDecodeError, json.decoder.JSONDecodeError):
response = resp.text
return response
|
56de367045a1f428c625a6c8654bf2812d8291e2
| 21,349
|
def run(a, b):
"""
>>> a = 3
>>> b = 30
>>> run(a, b)
33
"""
return a + b
|
3d96bfa0c2230ef155ba27f780d1a239263d4303
| 21,350
|
import os
def read_datapoint(path):
"""
Read an image and it's metadata from disk.
"""
fname = os.path.basename(path)
fname = os.path.splitext(fname)[0]
elems = fname.split("_")
if len(elems) != 3:
raise Exception('Invalid data set: ' + path)
# return id, timestamp, steering value
return int(elems[0]), int(elems[1]), float(elems[2])
|
28e396d38d2c4191eeed80daabe35d3cff0e374a
| 21,351
|
import os
import re
def _check_new_pkg(module, package, repository_path):
"""
Check if the package of fileset is correct name and repository path.
:param module: Ansible module arguments spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package information.
"""
if os.path.isdir(repository_path):
installp_cmd = module.get_bin_path('installp', True)
rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
if package == 'all':
pkg_info = "All packages on dir"
return True, pkg_info
else:
pkg_info = {}
for line in package_result.splitlines():
if re.findall(package, line):
pkg_name = line.split()[0].strip()
pkg_version = line.split()[1].strip()
pkg_info[pkg_name] = pkg_version
return True, pkg_info
return False, None
else:
module.fail_json(msg="Repository path %s is not valid." % repository_path)
|
4825ac69953c57cd1a776aff8fc212d151965773
| 21,353
|
import json
def format_json(item, **kwargs):
""" formats a datatype object to a json value """
try:
json.dumps(item.value)
return item.value
except TypeError:
if 'time' in item.class_type.lower() \
or 'date' in item.class_type.lower():
return item.value.isoformat()
raise
|
8fd6a6732b0de8964a84c2980895791b47662ff3
| 21,354
|
def s2ca(s):
"""Takes a string of cipher texts and returns it as an array of cipher texts"""
cypher_array = []
for i in range(int((len(s))/314)):
cypher_array.append(s[i:i+314])
return cypher_array
|
c92bb8d2ec4bf48b2ca1c1b2b8143450ae6a3ac3
| 21,355
|
def is_dict(value):
""" is value a dict"""
return isinstance(value, dict)
|
73244012a40dd73cc8bdfea34a9453b03af5941f
| 21,356
|
def rec_binary_search(array, l, u, num):
"""recursively performs binary search for a given
array.
l = lower index set to zero
u = upper index set to zero
num = target value
"""
if all(isinstance(x, (int, float)) for x in array):
if l < u:
#initialize the upper/lower index
mid = (l+u)//2 #find midway point
if array[mid] == num:
return True
elif num < array[mid]:
return rec_binary_search(array, l, mid-1, num)
else:
return rec_binary_search(array, mid+1, u, num)
else:
return False
else:
print('all values in array must be numbers')
|
3e88d892b95d1304221aa583ff8885d0eeb6d5ca
| 21,359
|
import collections
def dict_constructor(loader, node):
""" Constructor with OrderedDict
"""
return collections.OrderedDict(loader.construct_pairs(node))
|
8f8e4220566ded1081f04500d19d90718e90ab9a
| 21,360
|
def _kernel_seq(inputs, estimator):
"""
Wrapper around a function that computes anything on two sequences and returns a dict
While it is written as a general purpose kernel for anything, here it is used for
causal discovery and estimation from CCM based methods.
The function unpacks inputs into an index element and a sequence pair and runs the
estimator function on the sequence pair, returning various estimates in a dict
Parameters
----------
inputs : tuple
Tuple of two elements - (a, b) where a is an index, b is a tuple of two. a can
be produced manually or more typically using enumerate; b holds the two sequences
usually passed in by zip-ping larger iterables or itertools' product/combinations.
a, the index, is passed to keep track of order in case of asynchronous execution
Should look like this: (index, (sequence_x, sequence_y)
estimator : function
A function that can compute something on two arrays and return a dict. Preferably
one that can compute something meaningful, like causal discovery
Returns
-------
out : dict
Estimates obtained by running estimator on inputs.
"""
# Unpack inputs
idx, seqs = inputs
# Unpack sequences
idx_x, idx_y, seq_x, seq_y = seqs
# Initialize dictionary of output estimates with index
out = {"index_pair": idx, "index_x": idx_x, "index_y": idx_y}
# Execute the estimator function on the sequence pair
out.update(estimator(seq_x, seq_y))
# Some feedback to console
# print(".", end="")
return out
|
bd1b02ae84e959f9dabf712d7193408f5ddc6ff0
| 21,362
|
def _lookup_response_str(status_code):
"""
Simple function to return a response string for a Ping StatusCode
:param status_code: int:
:return: str: Response string
"""
status_msg = {0: 'Success',
11001: 'Buffer Too Small',
11002: 'Dest Net Unreachable',
11003: 'Dest Host Unreachable',
11004: 'Dest Protocol Unreachable',
11005: 'Dest Port Unreachable',
11006: 'No Resources',
11007: 'Bad Option',
11008: 'Hardware Error',
11009: 'Packet Too Big',
11010: 'Timed Out',
11011: 'Bad Request',
11012: 'Bad Route',
11013: 'TTL Expired Transit',
11014: 'TTL Expired Reassembly',
11015: 'Parameter Problem',
11016: 'Source Quench',
11017: 'Option Too Big',
11018: 'Bad Destination',
11032: 'Negotiating IPSEC',
11050: 'General Failure'}
return status_msg.get(status_code, 'Unknown StatusCode')
|
984b857b4fbc0bd407d5da592ead5e19160cadcd
| 21,363
|
def __create_python_code_block(message):
"""Create a python code block"""
return f"```python\n{message}```"
|
8397187487af0780542e8a227118994e1fc8ced8
| 21,364
|
def label_map(value):
""" Function that determines the diagnosis according to the Glucose level of an entry.
The three possible diagnosis are: Hypoglycemia, hyperglycemia and normal
:param value: Glucose level
:return: Diagnosis (String)
"""
hypoglycemia_threshold = 70
hyperglycemia_threshold = 180
severe_hyperglycemia_threshold = 240
if value < hypoglycemia_threshold:
return 'Hypoglycemia'
elif value > hyperglycemia_threshold:
if value > severe_hyperglycemia_threshold:
return 'Severe_Hyperglycemia'
else:
return 'Hyperglycemia'
else:
return 'In_Range'
|
7c9798dbce01c3de3ec4a09b20523ef2b3ba5888
| 21,365
|
def unshift(arr: list, *args):
"""
将一个或多个元素添加到目录中,并返回该方法的新长度(该方法原有修改目录)
"""
arr[0:0] = args
return len(arr)
|
5e4dfc5f7f84eca7c910ad406988423943ebb0bd
| 21,366
|
def getSizeOfVST(vst):
"""
Description: Return the size of the vector space of an vst variable.
Look for the first existing vector of the vst and get its size.
NB: Used only to not have to pass the size of the vst as a parameter.
"""
size = 0
for key in vst:
if vst[key] is not None:
#size = vst[key].size
size = len(vst[key])
break
return size
|
c8704d51fac22ad3ada5cf14bf2df268f7f0558e
| 21,367
|
def within_bounds(
x: float, y: float, min_x: float, min_y: float, max_x: float, max_y: float
):
"""
Are x and y within the bounds.
>>> within_bounds(1, 1, 0, 0, 2, 2)
True
"""
return (min_x <= x <= max_x) and (min_y <= y <= max_y)
|
d57c8f63b8548dd62efd126ccb68a92c3d4ca5af
| 21,372
|
import socket
def is_ipv6(host):
"""
Checks if address is IPv6, and returns true if matches regex
"""
try:
check = bool(socket.inet_pton(socket.AF_INET6, host))
except socket.error:
check = False
return check
|
1c7be22644cbed238019a05609e1601de7a0f455
| 21,373
|
def validate_sp(sp):
"""Validate seasonal periodicity.
Parameters
----------
sp : int
Seasonal periodicity
Returns
-------
sp : int
Validated seasonal periodicity
"""
if sp is None:
return sp
else:
if not isinstance(sp, int) and (sp >= 0):
raise ValueError(f"Seasonal periodicity (sp) has to be a positive integer, but found: "
f"{sp} of type: {type(sp)}")
return sp
|
265bed38cad2f6eae96c3611a90e8e4b3ef4d620
| 21,375
|
from typing import List
def convertFrom(data) -> List[dict]:
"""
Converts scores data from old to new format.
:param data: Old format data
:return: Old format data
"""
return [
{
'teamno': oldteam['Team']['Number'],
'teamname': oldteam['Team']['DisplayName'],
'scores': oldteam['Scores']
}
for oldteam in data
]
|
e48e7619a1b031a2900547b9c5052a3d6e79875a
| 21,376
|
def integrate(func, interval=None, rects=100000):
"""
Returns the result of the integral from the inclusive
interval (a, b) using a Riemann sum approximation
"""
if interval is None or not isinstance(interval, tuple):
interval = eval(input('Interval (a, b): '))
a, b = interval
if a > b:
print('note: the calculated area will be negative')
if b - a > rects:
rects = b - a
area = 0
x = a
dx = (b - a) / rects
for n in range(rects):
try:
area += func(x) * dx
except Exception as e:
print('Error:', e)
x += dx
return area
|
1068e78718c411c151952de27bca5c9b6bb3dcf5
| 21,377
|
import os
import subprocess
def clean_directory(dir):
""" Recursively remove all directories and files including the hidden ones in the given directory """
if os.path.exists(dir):
if os.path.isdir(dir):
myCmd = "rm -rf ./..?* ./.[!.]* ./*"
myProcess = subprocess.Popen(
myCmd,
shell=True,
cwd=dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
myStdout, myStderr = myProcess.communicate()
if myProcess.returncode != 0:
return {
"statusFlag": False,
"statusDescr": "'%s' in %s finished with return code %d." %
(myCmd,
dir,
myProcess.returncode),
"stdout": myStdout.rstrip(),
"stderr": myStderr.rstrip()}
else:
os.remove(dir)
return {"statusFlag": True}
|
b86e8fe6ee83f2241313dc541afadc856af1f195
| 21,378
|
def log_hide(message):
"""Hide security sensitive information from log messages"""
if type(message) != dict:
return message
if "token" in message:
message["token"] = "xxxxxxxx-xxxx-xx"
if "AccessToken" in message:
message["AccessToken"] = "xxxxxxxx-xxxx-xx"
return message
|
cb19624b7e6153c4eba70024c5b584edf42e5b9a
| 21,379
|
def extractLabels(data, label_column):
"""
Extracts the labels from the data
Arguments:
data {Dict/List of times series} -- Time series
label_column {str} -- Name of the column to extract
"""
if isinstance(data, dict):
labels = {d: data[d][label_column] for d in data}
data = {d: data[d][[c for c in data[d].columns if c != label_column]] for d in data}
else:
labels = data[label_column]
data = data[[c for c in data.columns if c != label_column]]
return data, labels
|
19990c64bfa2055b739928797f875656724995fa
| 21,380
|
from functools import reduce
def product(nums):
"""
Like sum, but for product.
"""
return reduce(lambda x,y:x*y,nums)
|
fdc492d6aa94ccbeb83de21d4c5164c62f1b2ca8
| 21,381
|
def get_published_templateid(context):
"""
Return the template id, as of the PUBLISHED variable, or None
"""
request = context.REQUEST
if request.has_key('PUBLISHED'):
return request['PUBLISHED'].__name__
return None
|
5e036a957e2085690437a8e6d927a5bafc75d53f
| 21,382
|
def get_result():
"""Demo function for test_offloading"""
return 'abc'
|
6569a387828ade976792e1f7f2ce37a818515bc5
| 21,384
|
def pyBoolToSQL(inBool: bool):
""" Simple function for converting a python bool to a database insertable integer(bit).
:param inBool: The bool to insert to the database.
:return: The integer(bit) to insert to the database.
note:: Author(s): Mitch """
if inBool:
return 1
else:
return 0
|
b5fdb11f7e736a4336bb5867c5e8a6eb82405935
| 21,385
|
import subprocess
def cpucorethreads():
"""The number of hardware treads per a CPU core
Used to specify CPU affinity dedicating the maximal amount of CPU cache L1/2.
"""
# -r or -E - extended regex syntax, -n - quiet output, /p - print the match
return int(subprocess.check_output(
[r"lscpu | sed -rn 's/^Thread\(s\).*(\w+)$/\1/p'"], shell=True))
|
702e8855a6689bbf711c13996020f6a533297b17
| 21,386
|
import os
def get_configfile_path(args):
""" Return the path to the test suite config file """
args_configfile = args.get('config_file')
if not args_configfile:
raise FileNotFoundError('Config file is not provided')
configfile_path = os.path.join(os.getcwd(), args_configfile)
if not os.path.exists(configfile_path):
raise FileNotFoundError(
'Config file path does not exist {PATH}'.format(
PATH=configfile_path))
return configfile_path
|
910d0dc9e3bd05acba7d513c3c3047d849ff40af
| 21,387
|
def CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model):
"""Return the time step size in the numerical approximation.
Call Signature:
CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model)
Parameters
----------
CFL: float
In this program, CFL is treated as the
diffusion number for diffusion equations, and
Courant number for the convection equations.
Caution: This is not a true numerical definition of CFL though.
diff: float
Physics specific coefficient in the diffusion model.
For example, kinematic viscosity or thermal diffusivity.
conv: float
Physics specific coefficient in the convection model.
For example, speed of sound in the first-order linear wave eq.
dX: float
Grid step size along X-axis.
dY: float
Grid step size along Y-axis. Value required for 2D applications.
Dimension: str
Dimension of the domain. Allowed inputs are "1D" or "2D".
Model: str
Model of the governing equation. To see available options for this
parameter, type the following command on your terminal
python fetchoption.py "model"
Returns
-------
TimeStep: float
Time step in the model equation.
"""
print("Calculating time step size for the simulation: Completed.")
# ************** DIFFUSION EQN. ******************
if Model.upper() == "DIFFUSION":
dX2 = dX*dX
if Dimension.upper() == "1D":
TimeStep = CFL*dX2/diff
return TimeStep
elif Dimension.upper() == "2D":
dY2 = dY*dY
TimeStep = CFL*(1.0/((1/dX2) + (1/dY2)))/diff
return TimeStep
# ************** FIRST-ORDER WAVE EQN. *****************
elif Model.upper() == "FO_WAVE":
if Dimension.upper() == "1D":
TimeStep = CFL*dX/conv
return TimeStep
# ************** BURGERS EQN. *****************
elif Model.upper() == "INV_BURGERS":
if Dimension.upper() == "1D":
TimeStep = CFL*dX
return TimeStep
elif Model.upper() == "VISC_BURGERS":
if Dimension.upper() == "1D":
dX2 = dX*dX
TimeStep = CFL*dX2
return TimeStep
|
0ecbfb3c9179140b920947240e25f6e91296e387
| 21,390
|
import math
def smooth(x):
""" smooth value x by using a cosinus wave (0.0 <= x <= 1.0)
"""
return (-math.cos(math.pi * x) + 1) / 2
|
5b8c041835e49cd858f439a4a902935bb78b87a7
| 21,391
|
from pathlib import Path
def create_temp_files(tmpdir):
"""Return temp file factory function."""
temp_dir = Path(tmpdir)
def _create_temp_files(number):
"""Create `number` tempfiles in seperate directories and yield paths."""
for _number in range(number):
temp_file = temp_dir / str(_number) / f'file{_number}.temp'
temp_file.parent.mkdir(parents=True)
temp_file.touch()
yield temp_file
return _create_temp_files
|
9f4ccb032f05ebe9033df4ae41d1559e934d7ed0
| 21,392
|
import ast
def matches(value, pattern):
"""Check whether `value` matches `pattern`.
Parameters
----------
value : ast.AST
pattern : ast.AST
Returns
-------
matched : bool
"""
# types must match exactly
if type(value) != type(pattern):
return False
# primitive value, such as None, True, False etc
if not isinstance(value, ast.AST) and not isinstance(pattern, ast.AST):
return value == pattern
fields = [
(field, getattr(pattern, field))
for field in pattern._fields
if hasattr(pattern, field)
]
for field_name, field_value in fields:
if not matches(getattr(value, field_name), field_value):
return False
return True
|
a7e05fc31e6387794f28d04e5aedfdbe7eb5f9bc
| 21,393
|
def int_to_base36(integer: int) -> str:
"""Convert an integer to a base36 string."""
char_set = "0123456789abcdefghijklmnopqrstuvwxyz"
if integer < 0:
raise ValueError("Negative base36 conversion input.")
if integer < 36:
return char_set[integer]
b36 = ""
while integer != 0:
integer, index = divmod(integer, 36)
b36 = char_set[index] + b36
return b36
|
115091a7b8766fe4488127d9b5fcfc904f67bae0
| 21,394
|
def _fit_estimator(clf, X, y):
"""Helper to fit estimator"""
return clf.fit(X, y)
|
1f99369f29260336f5a5f2a6e10e9c5721421831
| 21,395
|
import torch
def sign(input, *args, **kwargs):
"""
Returns a tree of new tensors with the signs of the elements of input.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.sign(ttorch.tensor([12, 0, -3]))
tensor([ 1, 0, -1])
>>> ttorch.sign(ttorch.tensor({
... 'a': [12, 0, -3],
... 'b': {'x': [[-3, 1], [0, -2]]},
... }))
<Tensor 0x7f1c81d02d30>
├── a --> tensor([ 1, 0, -1])
└── b --> <Tensor 0x7f1c81d02a60>
└── x --> tensor([[-1, 1],
[ 0, -1]])
"""
return torch.sign(input, *args, **kwargs)
|
768b4a6a4d12d4f7de5854c060fe1d7a494e8e6a
| 21,396
|
def _get_included_folds(folds, holdout_index = None):
"""
Returns a list that contains the holds to include whe joining.
If a holdout index is specified, it will not be included.
:param folds: the folds to join
:param holdout_index: an index to holdout
:return: the folds to use when joining
"""
return [folds[index] for index in range(len(folds)) if index != holdout_index]
|
f6715734620d3acbb43fc1146b84d312199ad741
| 21,398
|
def determine_colors(percentage):
"""
Determine the color of the duplicated section.
The color depends on the amount of code duplication.
"""
colors = []
if 3 >= percentage > 0:
colors.append("rgb(121, 185, 79)")
elif 5 >= percentage > 3:
colors.append("rgb(255, 204, 5)")
elif 20 >= percentage > 5:
colors.append("rgb(251, 135, 56)")
else:
colors.append("rgb(204, 5, 5)")
colors.append("rgb(121, 185, 79)")
return colors
|
3b9aa1071476a34c30fef274fb2a0d86f3eeb687
| 21,399
|
def fetch_vertex(known_vertices, chromosome, pos, gene_id):
""" Look for a vertex matching the input criteria. Throw error if not found"""
try:
vertex_matches = known_vertices[chromosome][pos]
for v in vertex_matches:
if v.gene_id == gene_id:
return v
except Exception as e:
raise ValueError('Vertex at ' + chromosome + ':' + str(pos) +
' does not exist!')
|
79bc55a25bb41d390661b06356ee0f130da0cbe4
| 21,400
|
import fileinput
def import_muts(path, effect=False, gene_filter=None):
"""Import mutations into a dictionary"""
mutations = {}
with fileinput.input(path) as fun_file:
header = next(fun_file).strip().split('\t')
if effect:
for line in fun_file:
line = line.strip().split('\t')
if gene_filter is None or line[0] in gene_filter:
line_id = ' '.join(line[:2])
mutations[line_id] = {'effect': line[2],
'gene': line[0],
'pos_aa': line[1][1:-1],
'ref_aa': line[1][:1],
'alt_aa': line[1][-1]}
else:
for line in fun_file:
line = line.strip().split('\t')
if gene_filter is None or line[6] in gene_filter:
line_id = ''.join((line[6], ' ', line[4], line[3], line[5]))
mutations[line_id] = {header[i]:line[i] for i in range(8)}
return mutations
|
664b49e7b379481bee54d6f6dd9173927d94a42f
| 21,401
|
def init_table_config_data():
"""Initialize the dictionary to store the channel master table data
Items are the channels or attributes fullname in a list
items_data is a dictionary with each channel configuration, the keys is the
fullName
Returns:
dict: configuration dictionary
"""
config_data = {}
config_data["channels"] = []
config_data["channels_data"] = {}
return config_data
|
cc87d1987b6451ca6fcd83b6001eaa29bd10a4fc
| 21,402
|
def equal_near(item_1: float, item_2: float, thresold: float = 0.1) -> bool:
"""Is two item close to equl?
Return True is difference less than thresold.
Args:
item_1 (float): First item.
item_2 (float): Second item.
thresold (float, optional): Thresold for compare. Defaults to 0.01.
Returns:
bool: Return True if difference less than thresold.
"""
return abs(1 - (item_1 / item_2)) < thresold
|
e9d61f1e14d7c09a42444d32da19e0d81be56af2
| 21,403
|
def _pg_utcnow(element, compiler, **kw):
"""Postgresql-specific compilation handler."""
return "(CURRENT_TIMESTAMP AT TIME ZONE 'utc')::TIMESTAMP WITH TIME ZONE"
|
f66ffec3883b4b3fcd2ac8fae531a8beb9da2c12
| 21,406
|
import itertools
def reject(iterable, conditional=None):
"""
Returns the values in list without the elements that the truth test (predicate) passes. The opposite of filter.
params: iterable, conditional
iterable -> list, sequenece, set, dictionary, generator etc
conditional -> a lambda or function that takes one or two inputs, first is element from iterable, second is index (optional)
Examples:
>>> odds = _.reject([1, 2, 3, 4, 5, 6], lambda x: x % 2 == 0)
>>> list(odds)
>>> [1,3,5]
"""
return itertools.filterfalse(conditional, iterable)
|
02baa409454d20c07e328b669014768fdd971e5c
| 21,407
|
import os
def transform_id(id: str, rel_url: str):
"""normalize id to foo/bar/section:id"""
head, tail = os.path.split(rel_url)
section, _ = os.path.splitext(tail)
if len(head) > 0:
head += '/'
return '{}{}:{}'.format(head, section, id)
|
e36dcad483cb81cfb39e1b20a001ce2427b1aa90
| 21,408
|
import shlex
def extract_info_from_line(line):
""" Translate one line from txt file into arguments for execution: instance, process, parameters
:param line: Arguments for execution. E.g. instance="tm1srv01" process="Bedrock.Server.Wait" pWaitSec=2
:return: instance_name, process_name, parameters
"""
parameters = {}
for pair in shlex.split(line):
param, value = pair.split("=")
# if instance or process needs to be case insensitive
if param.lower() == 'process' or param.lower() == 'instance':
parameters[param.lower()] = value.strip('"').strip()
# parameters (e.g. pWaitSec) are case sensitive in TM1 REST API !
else:
parameters[param] = value.strip('"').strip()
instance_name = parameters.pop("instance")
process_name = parameters.pop("process")
return instance_name, process_name, parameters
|
f55c5d711f5dd4762a01abd199b916c62f74d2a9
| 21,409
|
def build_msg(request, msg_type, name, drink):
"""Personalize SMS by replacing tags with customer info"""
msg = request.form.get(msg_type)
msg = msg.replace('<firstName>', name)
msg = msg.replace('<productType>', drink)
return msg
|
2a22f1b48717d9a874e2397791f734fce483e703
| 21,410
|
from typing import Dict
from typing import Any
def temperature_module_set_temp(
temperature_module_default: Dict[str, Any]
) -> Dict[str, Any]:
"""Temperature module with user-specified temperature settings."""
temperature_module_default["hardware-specific-attributes"]["temperature"] = {
"degrees-per-tick": 5.0,
"starting": 20.0,
}
return temperature_module_default
|
19812e01ea07199be22d6d481a6f6730dd2d79fa
| 21,411
|
import csv
import sys
def import_actual_data(fname):
"""
Method to parse a csv of data into the correct format for comparing with simulated data.
Inputs:
======
:param fname: Filepath/filename of the actual data to parse
:type fname: str
Returns:
=======
:return: Dictionary of actual data. Keys are columns headers, values are lists of data.
:rtype: dict
"""
actual_data = {}
with open(fname, "r") as csvfile:
reader = csv.DictReader(csvfile, delimiter=",")
for row in reader:
for k in row.keys():
try:
actual_data.setdefault(k.strip(), []).append(float(row[k]))
except ValueError as e:
print(e)
sys.exit(1)
return actual_data
|
1d81270bf060ac00baf7bdef515150a19fe4a85d
| 21,413
|
import argparse
def get_training_input_args():
"""
Retrieves and parses the command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to create and define these command line arguments. If
the user fails to provide some or all of the arguments, then the default
values are used for the missing arguments. The following command line
arguments let the user specify the model training specification.
Command Line Arguments:
1. Image Folder as --dir with default value '/flowers' - Required Input
2. CNN Model Architecture as --arch with default value 'vgg'
3. Hyperparameter --learning_rate with default value 0.001
4. Hyperparameter --hidden_units with default value 1000
5. Hyperparameter --epochs with default value 10
6. Hyperparameter --dropout with default value of 0.2
7. Device --gpu with default value of 'cpu'
8. Save Directory --save_dir to specify where the checkpoint should be saved to.
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser(description= "Process the command line arguments provide by the user")
parser.add_argument("dir", type=str, help="Path to the folder of flower images" )
parser.add_argument("--arch", type=str, choices=["vgg16", "densenet121"], default = "densenet121", help="Type of CNN model architecture to use")
parser.add_argument("--learning_rate", type=float, default= 0.001, help="The learning rate of the CNN training")
parser.add_argument("--hidden_units", type=int, default= 1000, help="Number of units in the hidden layer of the classifier")
parser.add_argument("--epochs", type=int, default= 5, help="Number of epochs for training")
parser.add_argument("--dropout", type=float, default= 0.2, help="Probability of unit dropout")
parser.add_argument("--gpu", action='store_true', help="Select to run the programme with GPU")
parser.add_argument("--save_dir", type=str,
help="Specify the folder path to save the trained model checkpoint to")
return parser.parse_args()
|
46733f28fa2876f51d08ee8c62cee04675a52a47
| 21,414
|
import subprocess
def collect_and_install(supress_output: bool = False) -> bool:
"""Recursively collects requirements.txt and installs all of them
Args:
supress_output (bool, optional): Produces no output if True. Defaults to False.
Returns:
bool: Returns True if requirements were found
"""
if not supress_output:
print('Collecting requirements...')
out = subprocess.run(
["find", ".", "-name", "requirements.txt"], stdout=subprocess.PIPE)
files = list(filter(lambda x: x, out.stdout.decode().split('\n')))
if not supress_output:
print('Found:', end=" ")
try:
if not files:
if not supress_output:
print('Nothing, exiting...')
return False
else:
print()
for f in files:
print('>\t', f)
except:
pass
requirements = ' '.join(
map(lambda file: "-r {}".format(file), files)
)
command = "./bin/pip install %s" % requirements
if not supress_output:
print('Running: %s' % command)
subprocess.run(command.split(' '))
print("Done installing.")
return True
|
c5f60040aadd301d9183bf46a053b998acdde747
| 21,415
|
def _pascal_case_to_underscore_case(value: str) -> str:
"""
Converts a pascal case string (e.g. MyClass)
into a lower case underscore separated string (e.g. my_class).
"""
result = ""
state = "initial"
partial = ""
for char in value:
if "A" <= char <= "Z":
if state == "initial":
state = "upper"
elif state == "upper":
state = "multi-upper"
else:
if result:
result += "_"
result += partial
partial = ""
state = "upper"
partial += char.lower()
else:
if state == "multi-upper":
if result:
result += "_"
result += partial
partial = ""
partial += char
state = "lower"
if result:
result += "_"
result += partial
return result
|
540d4b7525424d3e2f40f52d357fcb6c94b77d52
| 21,416
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.