content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def superior():
"""a fixture for lake superior"""
superior = LakeFactory(lake_name="Lake Superior", abbrev="SU")
return superior | db21ff1ffbaf6be91dd8f0907083ee87bc4541de | 30,100 |
def hsv_mask(img, hue_mask, sat_mask, val_mask):
"""
Returns a binary image based on the mask thresholds
:param img: The image to mask
:param hue_mask: Tuple of (hue_min, hue_max)
:param sat_mask: Tuple of (sat_min, sat_max)
:param val_mask: Tuple of (val_min, val_max)
:return: Binary image mask
"""
hue_mask = h_binary(img, hue_mask[0], hue_mask[1])
sat_mask = s_binary(img, sat_mask[0], sat_mask[1])
val_mask = v_binary(img, val_mask[0], val_mask[1])
mask = np.zeros_like(hue_mask)
mask[(hue_mask == 1) & (sat_mask == 1) & (val_mask == 1)] = 1
return mask | 194cb97b42850244b601653551d359b2c42caacd | 30,101 |
def convert_parameter_dict_to_presamples(parameters):
"""Convert a dictionary of named parameters to the form needed for ``parameter_presamples``.
``parameters`` should be a dictionary with names (as strings) as keys and Numpy arrays as values. All Numpy arrays should have the same shape.
Returns (numpy samples array, list of names).
"""
names = sorted(parameters.keys())
shapes = {obj.shape for obj in parameters.values()}
if len(shapes) != 1:
raise ValueError(
"Hetergeneous array shapes ({}) not allowed".format(shapes)
)
return names, np.vstack([parameters[key].reshape((1, -1)) for key in names]) | f136c9c795ab4c7023e774866c061b19488cc81f | 30,102 |
def convert_to_rle(annotation, width, height):
"""Convert complex polygons to COCO RLE format.
Arguments:
annotation: a dictionary for an individual annotation in Darwin's format
Returns: an annotation in encrypted RLE format and a bounding box
@author Dinis Gokaydin <d.gokaydin@nationaldrones.com>
"""
# complex polygons have multiple "paths" (polygons)
polygons = annotation['complex_polygon']['path']
mask = np.zeros([height, width, len(polygons)], dtype=np.uint8)
for ind_pol, pol in enumerate(polygons):
pol = fix_polygon(pol, width, height)# not sure whether assignment is necessary here
all_points_y = []; all_points_x = [];
for pt in pol:
all_points_y.append(pt['y'])
all_points_x.append(pt['x'])
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(all_points_y, all_points_x)
mask[rr, cc, ind_pol] = 1
# once we sum all the polygons any even values are holes (this should allow for "ring" holes, but it is not tested)
mask = ((np.sum(mask, axis=2)%2) == 1).astype(np.uint8)
# Return mask, and array of class IDs of each instance
return pycocotools.mask.encode(np.asarray(mask, order="F")), Image.fromarray(mask).getbbox() | a9562e95817585798164a91ef793841143329dd7 | 30,103 |
import socket
def getfqdn(name=None):
"""return (a) local IPv4 or v6 FQDN (Fully Qualified Domain Name)
if name is not given, returns local hostname
may raise socket.gaierror"""
return _getfqdn(socket.AF_UNSPEC, name) | cbebf1e3deda3a095996034b559af8f2ae4692c3 | 30,104 |
def create_answer_dict(elem, restrict_elem=None, checkbox=False):
"""
Construct dict with choices to fulfil form's div attribute
:param elem: ElemntTree element
:param restrict_elem: name of element which is not included in choice text
:param checkbox: boolean flag to work return data for checkbox problem
:return: tuple, (constructed dict, correct_answer)
"""
answer_dict = {}
correct_answer = []
for index, choice in enumerate(elem.iter('choice')):
answer_dict['choice{}'.format(index)] = parse_text_field(choice, tagwrap=False, restrict_elem=restrict_elem)
if choice.attrib['correct'] == 'true':
correct_answer.append(index)
return answer_dict, str(correct_answer if checkbox else correct_answer[0]) | c87d5d22b3f779f4645263ae18febaa95984d614 | 30,105 |
def fasta(file_allname: str):
"""
需要传入file_allname的路径
:param file_allname:
:return: 返回fasta格式的序列list
"""
try:
# file_allname = input("输入你要分析出的文件,包括后缀名\n")
f = open(file_allname).read()
fasts = f.split(">")
fast_seq = []
index = 0
for fast in fasts:
if fast:
fast = ">" + fast
fast_seq.append(fast)
index = index + 1
return fast_seq
except:
print("请正确输入文件名称。") | bbd03531a7d311c322fdbd66e401788fb6526120 | 30,106 |
def ask_version(version):
""" interact with user to determine what to do"""
upgrades = get_upgrades()
latest = get_latest(version, upgrades)
answer = False
if latest > version:
msg = "a new version (%s) is available. You have %s. Upgrade?" % (latest, version)
answer = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False
if answer:
path = get_path(version, latest, upgrades)
else:
print "you already have the latest revision (%s)" % latest
if version == latest or not answer:
while True:
msg = "do you want to up/down grade to a different revision? If so, which version?"
answer = raw_input("%s (rev no) " % msg)
if not answer.isdigit():
print "please enter a version NUMBER"
continue
answer = int(answer)
path = get_path(version, answer, upgrades)
break
return path | 1e6c7c87eeb4e222efd2b952e9d23b7c95275f85 | 30,107 |
def format_size(size):
"""
:param float size:
:rtype: str
"""
size = float(size)
unit = 'TB'
for current_unit in ['bytes', 'KB', 'MB', 'GB']:
if size < 1024:
unit = current_unit
break
size /= 1024
return '{0:.2f}'.format(size).rstrip('0').rstrip('.') + ' ' + unit | 95470360fcc34df5a51a7cf354138413b41940aa | 30,108 |
def make_full_block_header_list(block_header):
"""Order all block header fields into a list."""
return make_short_block_header_list(block_header) + [
block_header.timestamp,
block_header.extraData,
] | 59bcfdd3cefd3a1b7a8dcaf063964eb27dbafd67 | 30,109 |
from typing import Union
import os
def path_constructor(
loader: Union[Loader, FullLoader, UnsafeLoader], node: Node
) -> PathLike:
"""
Extract the matched value, expand env variable, and replace the match.
:param loader: not used
:param node: YAML node
:return: path
:raises SyntaxError: if the node value does not match the regex expression for a path-like string
:raises KeyError: raises an exception if the environment variable is missing
"""
value = node.value
match = path_matcher.match(value)
if match is None:
raise SyntaxError("Can't match pattern")
env_var = match.group()[2:-1]
try:
return os.environ[env_var] + value[match.end() :]
except KeyError:
raise KeyError(
f"Missing definition of environment variable {env_var} "
f"needed when parsing configuration file"
) | bd528fe8b5decdba57781fef47a5774fc8eceafb | 30,110 |
def rc_seq(seq=""):
"""Returns the reverse compliment sequence."""
rc_nt_ls = []
rc_dict = {
"a": "t",
"c": "g",
"t": "a",
"g": "c",
"n": "n",
"A": "T",
"C": "G",
"T": "A",
"G": "C",
"N": "N"
}
rc_nt_ls = [rc_dict[seq[i]] for i in range(len(seq)-1, -1, -1)]
rc_seq_ = "".join(rc_nt_ls)
return rc_seq_ | 827877a76d4ffbe61e40e4f00641afa4277f3ff5 | 30,111 |
def descriptions(path, values):
"""Transform descriptions."""
if not values:
return
root = E.descriptions()
for value in values:
elem = E.description(
value['description'], descriptionType=value['descriptionType']
)
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
root.append(elem)
return root | 34d570f0c2a97616833af5432ed5607413e2af9a | 30,112 |
from typing import Mapping
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Any
def build_default_region_dataset(
metrics: Mapping[FieldName, Union[Sequence[float], TimeseriesLiteral]],
*,
region=DEFAULT_REGION,
start_date="2020-04-01",
static: Optional[Mapping[FieldName, Any]] = None,
) -> timeseries.MultiRegionDataset:
"""Returns a `MultiRegionDataset` containing metrics in one region"""
return build_dataset(
{region: metrics},
start_date=start_date,
static_by_region_then_field_name=({region: static} if static else None),
) | 4c50876817b80ae412a193ba078b7948b7603322 | 30,113 |
import requests
import unicodedata
import time
def process(entries):
"""
Look for Science Direct (SCOPUS) database to update the bibliography entries.
This update evaluates only '@ARTICLE' entry types.
:param entries: list of bibtex entries
:return:
"""
log.info("Seeking for Science Direct (SCOPUS) entries")
count = 0
for e in entries:
if e.online_processed:
log.debug("Entry '%s' already processed." % e.cite_key)
continue
if not e.entry_type == entry.EntryType.ARTICLE:
log.debug("Skipping non-journal entry '%s'." % e.title)
continue
sd_params = {'query': 'TITLE("%s")' % e.title, 'view': 'STANDARD'}
sd_url = 'http://api.elsevier.com/content/search/scidir'
sd_headers = {'X-ELS-APIKey': '6b7571677244819622d79889d590f307', 'X-ELS-ResourceVersion': 'XOCS'}
sd_response = requests.get(sd_url, params=sd_params, headers=sd_headers)
sd_res = sd_response.json()
if 'error' in sd_res['search-results']['entry'][0]:
log.debug("No results found in Science Direct for '%s'." % e.title)
continue
sc_params = {'query': 'TITLE("%s")' % e.title, 'view': 'STANDARD'}
sc_url = 'http://api.elsevier.com/content/search/scopus'
sc_headers = {'X-ELS-APIKey': '6b7571677244819622d79889d590f307', 'X-ELS-ResourceVersion': 'XOCS'}
sc_response = requests.get(sc_url, params=sc_params, headers=sc_headers)
sc_res = sc_response.json()
# SCOPUS fields
sc_res_obj = sc_res['search-results']['entry'][0]
entry_type = _parse_entry_type(sc_res_obj['prism:aggregationType'])
volume = sc_res_obj['prism:volume']
year = _parse_year(sc_res_obj['prism:coverDisplayDate'])
# Science Direct fields
sd_res_obj = sd_res['search-results']['entry'][0]
# Authors
authors = ""
for author in sd_res_obj['authors']['author']:
if len(authors) > 0:
authors += " and "
authors += author['surname'] + ", " + author['given-name']
authors = unicodedata.normalize('NFKD', authors).encode('ascii','ignore')
# Other fields
title = sd_res_obj['dc:title']
doi = sd_res_obj['prism:doi']
journal = sd_res_obj['prism:publicationName']
e.merge(entry.Entry(
entry_type=entry_type,
title=title,
authors=authors,
journal=journal,
volume=volume,
year=year,
doi=doi
))
e.online_processed = True
log.debug("[Science Direct] Updated entry '%s'." % e.title)
count += 1
time.sleep(0.5)
if count > 0:
log.info("Updated %s entries according to Science Direct." % count)
return entries | 8db324dcea38d65a09132fb35a7114891534c7ca | 30,114 |
def load_espnet_model(model_path):
"""Load an end-to-end model from ESPnet.
:param model_path: Path to the model.
:type model_path: str
:return: The model itself, mapping from subword to index,
and training arguments used.
:rtype: (torch.nn.Module, dict, dict)
"""
model, train_args = load_trained_model(model_path)
char_dict = {v: k for k, v in enumerate(train_args.char_list)}
model.eval()
return model, char_dict, train_args | d9f001a64465547cf27c6d600939e57e9b8f1a19 | 30,115 |
from typing import Iterator
from typing import Union
from typing import Match
def full_match(nfa: NFA, text: Iterator[str]) -> Union[Match, None]:
"""
:param nfa: a NFA
:param text: a text to match against
:return: match or ``None``
"""
text_it = _peek(text, sof='', eof='')
curr_states_set = StatesSet()
next_states_set = StatesSet()
curr_states_set.extend(curr_states(
state=nfa.state,
captured=None,
chars=next(text_it)))
for char, next_char in text_it:
if not curr_states_set:
break
for curr_state, captured in curr_states_set:
if char != curr_state.char:
continue
if curr_state.is_captured:
captured = captures.capture(
char=char,
prev=captured)
next_states_set.extend(next_states(
state=curr_state,
captured=captured,
chars=(char, next_char)))
curr_states_set, next_states_set = (
next_states_set, curr_states_set)
next_states_set.clear()
try:
captured = _get_match(curr_states_set)
except exceptions.MatchError:
return None
return Match(
captures=captures.matched(captured, nfa.groups_count),
named_groups=nfa.named_groups) | 9cbb30633f648405e193f61f46b5e2dd80fffde0 | 30,116 |
def smiles_tokenizer(line, atoms=None):
"""
Tokenizes SMILES string atom-wise using regular expressions. While this
method is fast, it may lead to some mistakes: Sn may be considered as Tin
or as Sulfur with Nitrogen in aromatic cycle. Because of this, you should
specify a set of two-letter atoms explicitly.
Parameters:
atoms: set of two-letter atoms for tokenization
"""
if atoms is not None:
reg = get_tokenizer_re(atoms)
else:
reg = _atoms_re
return reg.split(line)[1::2] | c31916558fdbeda345a0667b43364f8bff504840 | 30,117 |
from typing import Set
def merge_parameter_sets(first: Set[ParameterDefinition], second: Set[ParameterDefinition]) -> Set[ParameterDefinition]:
"""
Given two sets of parameter definitions, coming from different dependencies for example, merge them into a single set
"""
result: Set[ParameterDefinition] = first.intersection(second)
difference = first.symmetric_difference(second)
for param in difference:
# add the param if it's either required or no-other param in difference is the same but required
if param.is_required or not any(p.field_alias == param.field_alias and p.is_required for p in difference):
result.add(param)
return result | 4b60ae17eb6e8b1ccd5149517c9d0ae809c33411 | 30,118 |
import time
import sys
def pca(data, submeans=0, keep=-1, flip=1, sparse=0):
"""Auto-selecting PCA, with data in columns.
The 'data' matrix should be ndims X npts.
If npts > ndims, then does a PCA directly.
If ndims > npts, then does PCA on transpose, and does appropriate normalization.
Returns (eigvals, eigvecs)...or something equivalent.
You can apply dot(vecs.T, data) to transform data, and dot(evecs, t) to transform back.
'keep' determines how many dimensions to keep:
<= 0: all dimensions (default)
0.0 < keep <= 1.0: given percentage of total variance (must be a float)
>= 1: given number of dimensions (must be an int)
In all cases, at most min(data.shape) will be kept.
If you definitely don't want to flip data, then set flip=0.
"""
t1 = time.time()
if submeans:
data, means = submean(data)
t2 = time.time()
log(' Done subtracting means in %0.3f secs...' % (t2-t1))
ndims, npts = data.shape
if npts >= ndims or not flip:
if sparse:
assert type(keep) == type(123)
values, vecs = sparseeigs(cov(data), keep)
return prunedims(values, vecs, keep)
else:
values, vecs = sortedeigs(cov(data))
return prunedims(values, vecs, keep)
else:
#TODO this path is broken right now
assert 1 == 0, 'This path is broken!'
d2 = data.transpose()
t3 = time.time()
log(' Computed transpose in %0.3f secs' % (t3-t2))
c = cov(d2) # 3.35 secs
t4 = time.time()
log(' Computed covariance in %0.3f secs' % (t4-t3))
values, vecs = linalg.eigh(c) # 0.34 secs
t5 = time.time()
log(' Computed eigendecomposition in %0.3f secs' % (t5-t4))
del c
ndims = len(values)
#log(values)
#log(vecs)
for i in range(ndims):
if abs(values[i]) > 0.00001:
values[i] = sqrt(1/values[i]/(max(data.shape))) # needed for normalization...why? # fast
values, vecs = sortvals(values, vecs)
# TODO prune dimensions here (before matrix-mult)?
v2 = dot(data, vecs) * values # 2.53
t6 = time.time()
v2 = normcols(v2)
t7 = time.time()
if 0:
log(values)
log('Should be I: %s' % (dot(v2.T, v2),)) # identity...good
t = dot(v2.T, data)
log('T: ', t)
log('%s' % (data.shape, d2.shape, values.shape, vecs.shape, v2.shape,))
# (5788, 500) (500, 5788) (500,) (500, 500) (500, 500) (5788, 500) (5788, 500)
log(dot(v2, t))
log(data) # this should equal the previous line
log(' Times were %s' % ([t2-t1, t3-t2, t4-t3, t5-t4, t6-t5, t7-t6],))
sys.stdout.flush()
return prunedims(values, v2, keep) | 8af0fe69351cf5a580936555ba5e289155d648c1 | 30,119 |
import struct
def build_udp_header(src_port, dst_port, length):
"""Builds a valid UDP header and returns it
Parameters:
- src_port: A uint16 which will be used as source port for the UDP
header
- dst_port: A uint16 which will be used as destination port for the
UDP header
- length: Length of the data that will be sent in the UDP package.
The actual length field in the UDP package will be 8 bytes
longer to make room for the UDP header itself
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source port | Destination port |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
if (src_port == DEFAULT_SRC_PORT):
src_port = dst_port
# error-checking of header AND data. If no checksum is generated set the
# value all-zeros
checksum = 0
udp_header = struct.pack('!HHHH',
src_port, dst_port, (length + 8), checksum);
return udp_header | d110c19ff38f88bc892ecb52c8203e356a930bab | 30,120 |
def plot_confus_mat(y_true, y_pred, classes_on=None,
normalize='true',
linewidths=0.02, linecolor='grey',
figsize: tuple = (4, 3),
ax=None, fp=None,
**kwargs):
""" by default, normalized by row (true classes)
"""
if classes_on is None:
classes_on = list(set(y_true).union(y_pred))
mat = metrics.confusion_matrix(y_true, y_pred, labels=classes_on,
normalize=normalize)
# return sns.heatmap(mat, linewidths=linewidths, linecolor=linecolor,
# xticklabels=classes_on, yticklabels=classes_on,
# **kwargs)
mat = pd.DataFrame(data=mat, index=classes_on, columns=classes_on)
ax = heatmap(mat, figsize=figsize, ax=ax, fp=fp,
linewidths=linewidths, linecolor=linecolor,
**kwargs)
return ax, mat | 59ef04547b4829d7c3c1049c93fab69faaa3b23d | 30,121 |
from typing import Any
import os
def set_config_option(section: str, option: str, value: Any, write_to_disk: bool = False) -> bool:
"""
Function which updates configuration option value.
By default if "write_to_disk" is False, value will only be updated in memory and not on disk.
This means it won't be reflected in Plugin processes when using process plugin execution where
each plugin is executed in a new sub process.
"""
config = _get_config()
config_path = os.environ.get("RADIO_BRIDGE_CONFIG_PATH", None)
try:
config[section][option] = value
except KeyError:
config[section] = {}
config[section][option] = value
if write_to_disk and config_path:
LOG.debug("Writing updated config file to disk", file_path=config_path)
with open(config_path, "wb") as fp:
config.write(fp) # type: ignore
return True | 32cb66f2fbaa983f38e010f99c485fa35237f96e | 30,122 |
async def home():
"""
Home endpoint to redirect to docs.
"""
return RedirectResponse("/docs") | 1ebece9db1a86f54ec101037279087065aaa2f0a | 30,123 |
def robust_hist(x, ax=None, **kwargs):
"""
Wrapper function to `plt.hist` dropping values that are not finite
Returns:
Axes
"""
mask = np.isfinite(x)
ax = ax or plt.gca()
ax.hist(x[mask], **kwargs)
return ax | 32165e3e5cb796fe941bc0f177606dbc502c61ef | 30,124 |
import os
import re
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, "__init__.py")).read()
return re.search(
r"^__version__ = ['\"]([^'\"]+)['\"]",
init_py,
re.MULTILINE,
).group(1) | d62d9cc95955c1979a6ef7f612789028aaa3bffc | 30,125 |
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvxxyz'):
"""Convert positive integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
# Special case for zero
if number == 0:
return alphabet[0]
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = - number
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36 | d670a047d210f1d452d2acde76dc47208be2f4bf | 30,126 |
def pipe(*args, **kwargs):
"""A source that builds a url.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'base'. May
contain the keys 'params' or 'path'.
base (str): the sever name
path (str): the resource path
params (dict): can be either a dict or list of dicts. Must contain
the keys 'key' and 'value'.
key (str): the parameter name
value (str): the parameter value
Yields:
dict: a url item
Examples:
>>> params = {'key': 's', 'value': 'gm'}
>>> path = [{'value': 'rss'}, {'value': 'headline'}]
>>> base = 'http://finance.yahoo.com'
>>> conf = {'base': base, 'path': path, 'params': params}
>>> result = next(pipe(conf=conf))
>>> sorted(result.keys()) == [
... 'fragment', 'netloc', 'params', 'path', 'query', 'scheme',
... 'url']
True
>>> result['url'] == 'http://finance.yahoo.com/rss/headline?s=gm'
True
"""
return parser(*args, **kwargs) | a9fca4149bca2ee50ffe5efcbb67c3066523cdf8 | 30,127 |
import six
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360 | 7ed0fd31f9a90ddb5743faa8e45e46f0d5cc08bd | 30,128 |
def checkWrite(request):
"""Check write"""
try:
_path = request.query_params.get("path")
_file = open(_path + "test.txt", "w")
_file.write("engine write test")
_file.close()
return HttpResponse(_path + "test.txt")
except ValueError as e:
return genericApiException(e) | c3d196126c67cc9b8ba5482a4ebb7df778cd1d5e | 30,129 |
def most_seen_creators(event_kind=None, num=10):
"""
Returns a QuerySet of the Creators that are associated with the most Events.
"""
return Creator.objects.by_events(kind=event_kind)[:num] | 60d4865b56ea2d2ede8cad5123fbaa3f49e72bcd | 30,130 |
from .extract.utils import _get_dataset_dir
from datetime import datetime
import os
def use_memmap(logger, n_files=1):
"""Memory-map array to a file, and perform cleanup after.
.. versionadded:: 0.0.8
Parameters
----------
logger : :obj:`logging.Logger`
A Logger with which to log information about the function.
n_files : :obj:`int`, optional
Number of memory-mapped files to create and manage.
Notes
-----
This function is used as a decorator to methods in which memory-mapped arrays may be used.
It will only be triggered if the class to which the method belongs has a ``memory_limit``
attribute that is set to something other than ``None``.
It will set an attribute within the method's class named ``memmap_filenames``, which is a list
of filename strings, with ``n_files`` elements.
If ``memory_limit`` is None, then it will be a list of ``Nones``.
Files generated by this function will be stored in the NiMARE data directory and will be
removed after the wrapped method finishes.
"""
def inner_function(function):
@wraps(function)
def memmap_context(self, *args, **kwargs):
if hasattr(self, "memory_limit") and self.memory_limit:
self.memmap_filenames, filenames = [], []
for i_file in range(n_files):
start_time = datetime.datetime.now().strftime("%Y%m%dT%H%M%S")
dataset_dir = _get_dataset_dir("temporary_files", data_dir=None)
_, filename = mkstemp(
prefix=self.__class__.__name__, suffix=start_time, dir=dataset_dir
)
logger.debug(f"Temporary file written to {filename}")
self.memmap_filenames.append(filename)
filenames.append(filename)
else:
filenames = self.memmap_filenames = [None] * n_files
try:
return function(self, *args, **kwargs)
except:
for filename in filenames:
logger.error(f"{function.__name__} failed, removing {filename}")
raise
finally:
if (
hasattr(self, "memory_limit")
and self.memory_limit
and os.path.isfile(filename)
):
for filename in filenames:
logger.debug(f"Removing temporary file: {filename}")
os.remove(filename)
return memmap_context
return inner_function | 0e55a521386af49b95e6be49d32235422461b80d | 30,131 |
import argparse
def _create_parser():
"""
@rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument("tasks", help="perform specified task and all its dependencies",
metavar="task", nargs = '*')
parser.add_argument('-l', '--list-tasks', help = "List the tasks",
action = 'store_true')
parser.add_argument('-v', '--version',
help = "Display the version information",
action = 'store_true')
parser.add_argument('-f', '--file',
help = "Build file to read the tasks from. 'build.py' is default value assumed if this argument is unspecified",
metavar = "file", default = "build.py")
return parser | af9f5087546ea5a16d8f55d9b6231419d623c1e8 | 30,132 |
def read_lexicon():
"""
Returns the dict of {'word': string, 'score': int} represented by lexicon.txt
"""
return read_dict('resources/lexicon.txt') | 69cdf729aabfd42d4e02690cabcd91b1162598aa | 30,133 |
from tpDcc.libs.python import path
import os
def get_usd_qt_path():
"""
Returns path where USD Qt files are located
:return: str
"""
platform_dir = get_platform_path()
if not platform_dir or not os.path.isdir(platform_dir):
LOGGER.warning('No USD platform directory found: "{}"'.format(platform_dir))
return
usd_qt_path = path.clean_path(os.path.join(platform_dir, 'qt'))
if not os.path.isdir(platform_dir):
LOGGER.warning('No USD Qt folder found: "{}"'.format(usd_qt_path))
return None
return usd_qt_path | de5d01401807f6558f0c98a4524b98e8d985776f | 30,134 |
import tqdm
def show_erps(Ds, align_window, labels=None, show_sem=True, co_data=None,
**kwargs):
"""
Use plot ERPs on electrode_grid
Parameters
----------
Ds: list
list of D tensors (electrodes x time x trials)
align_window: tuple
time before and after stim in seconds
labels: tuple
Optional. labels for data legend
show_sem: bool
co_data: list
List of RGB (0<x<1) values for the data colors. Default: cbrewer Set1
kwargs: see electrode_grid
Returns
-------
fig: plt.Figure
axs: list(plt.Axes)
"""
if co_data is None:
co_data = b2mpl.get_map('Set1', 'Qualitative', 4).mpl_colors[1:]
fig, axs, elects_to_plot = electrode_grid(xlims=align_window, **kwargs)
h_lines = []
for D, color in zip(Ds, co_data):
D = D[np.array(elects_to_plot).astype(int)]
mean_erp = np.ma.mean(D, axis=2)
tt = np.linspace(align_window[0], align_window[1], mean_erp.shape[1])
for ax, data in tqdm(zip(axs, mean_erp), desc='Drawing data'):
h_line, = ax.plot(tt, data, color=color, linewidth=1)
h_lines.append(h_line)
if show_sem:
sem_erp = np.ma.std(D, axis=2) / np.sqrt(D.shape[2])
for ax, data, err in tqdm(zip(axs, mean_erp, sem_erp),
desc='Drawing sem'):
ax.fill_between(tt, data - err, data + err,
alpha=.4, facecolor=color, edgecolor=color)
for ax in axs:
yl = ax.get_ylim()
ax.set_yticks((yl[0], 0, yl[1]))
ax.grid(True)
if labels is not None:
fig.legend(h_lines, labels, loc='upper right', ncol=2)
return fig, axs | 988a89af259387796e3735ce9526304591c09131 | 30,135 |
def insert_with_key_enumeration(agent, agent_data: list, results: dict):
"""
Checks if agent with the same name has stored data already in the given dict and enumerates in that case
:param agent: agent that produced data
:param agent_data: simulated data
:param results: dict to store data into
:return: dict with inserted data/name pair
"""
# add to results dict and don't double agent names
if agent.get_name() not in results:
results[agent.get_name()] = agent_data
else:
# add index to agent name if another agent of same type was simulated before
new_name = agent.get_name() + "_" + str(
sum([agent.get_name() in s for s in list(results.keys())]))
results[new_name] = agent_data
return results | d2d653dcff20836c4eaf8cf55b31b1a1209a4ddd | 30,136 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='First Bank of Change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('value', metavar='int', type=int, help='Sum')
args = parser.parse_args()
if not 0 < args.value <= 100:
parser.error('value "{}" must be > 0 and <= 100'.format(args.value))
return args | 117ae5596c95154ac0dc10fd9b1793d89d471da8 | 30,137 |
def parse_condition_code(value, is_day: bool) -> str:
"""Convert WeatherAPI condition code to standard weather condition."""
if value is None:
return None
try:
condition_code = int(value)
if condition_code == 1000:
return ATTR_CONDITION_SUNNY if is_day else ATTR_CONDITION_CLEAR_NIGHT
matches = [k for k, v in CONDITION_MAP.items() if condition_code in v]
condition = matches[0]
except: # noqa: E722 pylint: disable=bare-except
condition = None
return condition | cd650a27b907f6d0ced7c05bd8aec5a316bf3b42 | 30,138 |
def min__to__s():
"""Convert minute to second"""
return '6.0E+1{kind}*{var}' | 2730af2cc79a6c4af6d1b18f79326623c0fd0289 | 30,139 |
import html
def home():
"""Home tab."""
icon = html.I(className="fas fa-home fa-lg", title="Home")
return html.Li(html.Span(icon), id="view-info", className="active") | f1771b014b3d0332965b4bb0d74038dfddda8c21 | 30,140 |
def score_per_term(base_t, mis_t, special_t, metric):
"""Computes three distinct similarity scores for each list of terms.
Parameters
----------
base_t, mismatch_t special_t: list of str
Lists of toponym terms identified as base, mismatch or frequent (special) respectively.
metric: str
Indicates the metric to utilize in order to calculate the similarity score by comparing individually the
three lists.
Returns
-------
tuple of (float, float, float)
A similarity score for every list of terms. Each score is normalized in range [0,1].
"""
scores = [0, 0, 0] # base, mis, special
for idx, (term_a, term_b) in enumerate(zip(
[base_t['a'], mis_t['a'], special_t['a']],
[base_t['b'], mis_t['b'], special_t['b']]
)):
if term_a or term_b: scores[idx] = globals()[metric](u' '.join(term_a), u' '.join(term_b))
return scores[0], scores[1], scores[2] | 55e5b9b0d9feaa359ab0907b399eb37514dcfacd | 30,141 |
import bisect
def _eliminationOrder_OLD(gm, orderMethod=None, nExtra=-1, cutoff=inf, priority=None, target=None):
"""Find an elimination order for a graphical model
Args:
gm (GraphModel): A graphical model object
method (str): Heuristic method; one of {'minfill','wtminfill','minwidth','wtminwidth','random'}
nExtra (int): Randomly select eliminated variable from among the best plus nExtra; this adds
randomness to the order selection process. 0 => randomly from best; -1 => no randomness (default)
cutoff (float): Quit early if ``score`` exceeds a user-supplied cutoff value (returning ``target, cutoff``)
target (list): If the identified order is better than cutoff, write it directly into passed ``target`` list
priority (list, optional): Optional list of variable priorities; lowest priority variables are
eliminated first. Useful for mixed elimination models, such as marginal MAP inference tasks.
Returns:
list: The identified elimination order
float: The "score" of this ordering
Using ``target`` and ``cutoff`` one can easily search for better orderings by repeated calls:
>>> ord, score = eliminationOrder(model, 'minfill', nExtra=2, cutoff=score, target=ord)
"""
orderMethod = 'minfill' if orderMethod is None else orderMethod.lower()
priority = [1 for x in gm.X] if priority is None else priority
if orderMethod == 'minfill': score = lambda adj,Xj: sum([0.5*len(adj[Xj]-adj[Xk]) for Xk in adj[Xj]])
elif orderMethod == 'wtminfill': score = lambda adj,Xj: sum([(adj[Xj]-adj[Xk]).nrStatesDouble() for Xk in adj[Xj]])
elif orderMethod == 'minwidth': score = lambda adj,Xj: len(adj[Xj])
elif orderMethod == 'wtminwidth': score = lambda adj,Xj: adj[Xj].nrStatesDouble()
elif orderMethod == 'random': score = lambda adj,Xj: np.random.rand()
else: raise ValueError('Unknown ordering method: {}'.format(orderMethod))
adj = [ VarSet([Xi]) for Xi in gm.X ]
for Xi in gm.X:
for f in gm.factorsWith(Xi, copy=False):
adj[Xi] |= f.vars
# initialize priority queue of scores using e.g. heapq or sort
scores = [ (priority[Xi],score(adj,Xi),Xi) for Xi in gm.X ]
reverse = scores[:]
scores.sort()
totalSize = 0.0
_order = [0 for Xi in gm.X]
for idx in range(gm.nvar):
pick = 0
Pi,Si,Xi = scores[pick]
if nExtra >= 0:
mx = bisect.bisect_right(scores, (Pi,Si,gm.X[-1])) # get one past last equal-priority & score vars
pick = min(mx+nExtra, len(scores)) # then pick a random "near-best" variable
pick = np.random.randint(pick)
Pi,Si,Xi = scores[pick]
del scores[pick]
_order[idx] = Xi.label # write into order[idx] = Xi
totalSize += adj[Xi].nrStatesDouble()
if totalSize > cutoff: return target,cutoff # if worse than cutoff, quit with no changes to "target"
fix = VarSet()
for Xj in adj[Xi]:
adj[Xj] |= adj[Xi]
adj[Xj] -= [Xi]
fix |= adj[Xj] # shouldn't need to fix as much for min-width?
for Xj in fix:
Pj,Sj,Xj = reverse[Xj]
jPos = bisect.bisect_left(scores, (Pj,Sj,Xj))
del scores[jPos] # erase (Pj,Sj,Xj) from heap
reverse[Xj] = (Pj,score(adj,Xj),Xj)
bisect.insort_left(scores, reverse[Xj]) # add (Pj,score(adj,Xj),Xj) to heap & update reverse lookup
if not (target is None):
target.extend([None for i in range(len(target),len(_order))]) # make sure order is the right size
for idx in range(gm.nvar): target[idx]=_order[idx] # copy result if completed without quitting
return _order,totalSize | dfe770db099dc65bcba1afb8c2706005dd7bb81d | 30,142 |
import sys
def inject_module(module, *args, **kwargs):
"""
Imports a function from a python module :module: and executes it with *args, **kwargs arguments. Dotted referencing
can be used to specify the function from the module.
For example, the following code will execute func1 and func2 from module mymodule with no arguments
inject_module('mymodule.func1')
inject_module('mymodule.func2')
Everytime this function is called the module is reloaded so that you can alter your
debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised)
"""
try:
parsed = module.split('.')
if len(parsed) == 1:
module_name, func_name = parsed[0], 'debug'
elif len(parsed) == 2:
module_name, func_name = parsed
if module_name in sys.modules:
mod = sys.modules[module_name]
reload(mod)
else:
mod = __import__(module_name)
f = getattr(mod, func_name, None)
if f:
return f(*args, **kwargs)
except Exception as e:
print (e)
return e | 68c9044501a8418f6200aba4fa2ee076dce15d59 | 30,143 |
import torch
def get_pretrain_data_loader(mode, pretrain_data_setting):
"""Get pre-training loader.
Args:
mode (str): either "train" or "valid".
pretrain_data_setting (dict, optional): pretrain dataset setting.
Returns:
loader (torch.dataloader): a PyTorch dataloader with all input
datasets.
"""
is_train = mode == "train"
use_aug = pretrain_data_setting["use augmentation"]
batch_size = pretrain_data_setting["batch size"]
if use_aug and is_train:
transform = train_transform
else:
transform = valid_transform
dataset_arr = [] # hold all datasets.
for dataset_info in pretrain_data_setting["datasets"]:
dataset_ = SignalDataset(signal_length=dataset_info["signal length"],
is_train=is_train,
folder_name=dataset_info["path"],
aug_transform=transform)
dataset_arr.append(dataset_)
dataset = torch.utils.data.ConcatDataset(dataset_arr)
sampler = _ConcatBatchSampler(dataset,
batch_size=batch_size,
drop_last=False,
shuffle=is_train)
loader = torch.utils.data.DataLoader(dataset,
pin_memory=True,
batch_sampler=sampler)
assert len(loader) > 0, "empty data loader from %s" % pretrain_data_setting
return loader | dc894eb5fb41cf49910568d01a749ebc93aded6d | 30,144 |
def do_simple_math(number1, number2, operator):
"""
Does simple math between two numbers and an operator
:param number1: The first number
:param number2: The second number
:param operator: The operator (string)
:return: Float
"""
ans = 0
if operator is "*":
ans = number1 * number2
elif operator is "/":
ans = number1 / number2
elif operator is "+":
ans = number1 + number2
elif operator is "-":
ans = number1 - number2
elif operator is "^":
ans = number1 ** number2
elif operator is "%":
ans = number1 % number2
return ans | eb745f9c3f3c1e18de30cbe6c564d68c29e39ff4 | 30,145 |
def test_global_settings_data():
"""Ensure that GlobalSettingsData objects are properly initialized
per-thread"""
def check_initialized(index):
if index == 0:
sleep(0.1)
with pytest.raises(AttributeError):
_global_settings_data.testing_index # pylint: disable=W0104
_global_settings_data.testing_index = index
sleep(0.5)
return (
test_global_settings_data_obj.shared_state['_output_type'] is None
and test_global_settings_data_obj.shared_state['root_cm'] is None
and _global_settings_data.testing_index == index
)
results = [
delayed(check_initialized)(index)
for index in range(5)
]
assert (delayed(all)(results)).compute() | bd1229bb9150b25c88be621d5af0f8da9cf7327d | 30,146 |
def set_client(client):
"""
Set the global HTTP client for sdk.
Returns previous client.
"""
global _global_client
previous = _global_client
_global_client = client
return previous | 9f29f5491cee42581fb2b0a22edd36a2297754b4 | 30,147 |
def readNetAddress(b, hasStamp):
"""
Reads an encoded NetAddress from b depending on the protocol version and
whether or not the timestamp is included per hasStamp. Some messages like
version do not include the timestamp.
Args:
b (ByteArray): The encoded NetAddress.
hasStamp (bool): Whether or not the NetAddress has a timestamp.
Returns:
NetAddress: The decoded NetAddress.
"""
expLen = 30 if hasStamp else 26
if len(b) != expLen:
raise DecredError(
f"readNetAddress wrong length (hasStamp={hasStamp}) expected {expLen}, got {len(b)}"
)
# NOTE: The Decred protocol uses a uint32 for the timestamp so it will
# stop working somewhere around 2106. Also timestamp wasn't added until
# protocol version >= NetAddressTimeVersion
stamp = b.pop(4).unLittle().int() if hasStamp else 0
services = b.pop(8).unLittle().int()
ip = b.pop(16)
if ip[:12] == ipv4to16prefix:
ip = ip[12:]
# Sigh. Decred protocol mixes little and big endian.
port = b.pop(2).int()
return NetAddress(ip=ip, port=port, services=services, stamp=stamp,) | 7d523c0465039008e0015c075e8282a1aacea000 | 30,148 |
def get_all_clouds(session, return_type=None, **kwargs):
"""
Retrieves details for all available storage clouds.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = '/api/v2/clouds.json'
return session.get_api(path=path, return_type=return_type, **kwargs) | 61029884408733398d8e2c3bb52c18ef4e9f83fc | 30,149 |
def _get_account_balances_by_regid(user_regid):
"""
returns uw_sws.models.Finance object for a given regid
"""
if user_regid is None:
return None
return get_account_balances_by_regid(user_regid) | 6c81ca23411a415d3551d856a44c44f6377ec1b9 | 30,150 |
def scrape_meaning(word):
"""
Method to scrape the meaning of a word from google search
"""
# TODO: Add validation checks on the word passed
raw_html_response = fetch_content_from_web(word)
response = parse_html_content(raw_html_response)
return response | fa9bc34d4052e5d5b0362dfa00ac99d61e3e894a | 30,151 |
import os
def libname(name):
"""gets 'name' and returns something like libname.cpython-37m-darwin.so"""
filename = build_ext_cmd.get_ext_filename(name)
fn, ext = os.path.splitext(filename)
return build_ext_cmd.shlib_compiler.library_filename(fn, libtype) | 853aa244d06be4661a7d4a87c7e669286bb38e65 | 30,152 |
def make_embed(msg_type='', title=None, icon=None, content=None,
msg_colour=None, guild=None, title_url=None,
thumbnail='', image='', fields=None, footer=None,
footer_icon=None, inline=False):
"""Returns a formatted discord embed object.
Define either a type or a colour.
Types are:
error, warning, info, success, help.
"""
embed_types = {
'error':{
'icon':'https://i.imgur.com/juhq2uJ.png',
'colour':'red'
},
'warning':{
'icon':'https://i.imgur.com/4JuaNt9.png',
'colour':'gold'
},
'info':{
'icon':'https://i.imgur.com/wzryVaS.png',
'colour':'blue'
},
'success':{
'icon':'https://i.imgur.com/ZTKc3mr.png',
'colour':'green'
},
'help':{
'icon':'https://i.imgur.com/kTTIZzR.png',
'colour':'blue'
}
}
if msg_type in embed_types.keys():
msg_colour = embed_types[msg_type]['colour']
icon = embed_types[msg_type]['icon']
if guild and not msg_colour:
msg_colour = colour(guild)
else:
if not isinstance(msg_colour, discord.Colour):
msg_colour = colour(msg_colour)
embed = discord.Embed(description=content, colour=msg_colour)
if not title_url:
title_url = discord.Embed.Empty
if not icon:
icon = discord.Embed.Empty
if title:
embed.set_author(name=title, icon_url=icon, url=title_url)
if thumbnail:
embed.set_thumbnail(url=thumbnail)
if image:
embed.set_image(url=image)
if fields:
for key, value in fields.items():
ilf = inline
if not isinstance(value, str):
ilf = value[0]
value = value[1]
embed.add_field(name=key, value=value, inline=ilf)
if footer:
footer = {'text':footer}
if footer_icon:
footer['icon_url'] = footer_icon
embed.set_footer(**footer)
return embed | 5cdeb5862ffc525160361f760b5530e15d3258c1 | 30,153 |
import torch
def dynamic_stitch(indices, data):
"""
Args
indices: A list of at least 1 Tensor objects with type int32.
data: A list with the same length as indices of Tensor objects with
the same type.
Returns
A Tensor. Has the same type as data.
"""
dim_0 = int(max([torch.max(idx) if idx.shape[0] != 0
else 0 for idx in indices]) + 1)
shape = torch.Size([dim_0] + list(data[0].shape[indices[0].ndim:]))
tensor = torch.empty(shape, dtype=data[0].dtype)
for i in range(len(indices)):
tensor[indices[i]] = data[i]
return tensor | 6988b400ca1110187643eba932f00103f5f393b6 | 30,154 |
def snake(string):
"""snake_case"""
return "_".join(string.split()) | 6bf99dede918937ad59ec9be14ffade8fadb5794 | 30,155 |
def parse_standard_metadata():
"""
Gather the standard metadata information from Jenkins and the DBMS.
Returns
-------
The metadata obtained from Jenkins and the DBMS.
Warnings
--------
Underlying implementation is hacky right now.
"""
return {**_parse_jenkins_env_vars(), **_parse_db_metadata()} | 535bc56eabbdc2d178b448951127adf37af217eb | 30,156 |
import jinja2
import json
def load_resource(api_server):
"""Load a default resource file.
:type api_server: str
:rtype: dict
"""
logger.debug('load_resource({0})...'.format(api_server))
rsrcs = {}
rsrc_files = [
"resources/misc.json.j2",
"resources/api_clients.json.j2",
"resources/api_apps.json.j2",
"resources/api_files.json.j2",
"resources/api_jobs.json.j2",
"resources/api_meta.json.j2",
"resources/api_monitors.json.j2",
"resources/api_notifications.json.j2",
"resources/api_postits.json.j2",
"resources/api_profiles.json.j2",
"resources/api_systems.json.j2",
"resources/api_transforms.json.j2",
"resources/api_actors.json.j2",
"resources/api_admin.json.j2",
]
for rsrc_file in rsrc_files:
conf = ConfigGen(rsrc_file)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(HERE),
trim_blocks=True,
lstrip_blocks=True)
new_rsrcs = json.loads(
conf.compile({"api_server_base": urlparse(api_server).netloc},
env))
updateDict(rsrcs, new_rsrcs)
logger.debug('load_resource finished')
return rsrcs | 54ceee11114eae56c4a1e663a23c18ac3ef86cda | 30,157 |
def match_all_args(ctx, node, func, args):
"""Call match_args multiple times to find all type errors.
Args:
ctx: The abstract context.
node: The current CFG node.
func: An abstract function
args: An Args object to match against func
Returns:
A tuple of (new_args, errors)
where new_args = args with all incorrectly typed values set to Any
errors = a list of [(type mismatch error, arg name, value)]
Reraises any error that is not function.InvalidParameters
"""
positional_names = func.get_positional_names()
needs_checking = True
errors = []
while needs_checking:
try:
func.match_args(node, args)
except FailedFunctionCall as e:
if isinstance(e, WrongKeywordArgs):
errors.append((e, e.extra_keywords[0], None))
for i in e.extra_keywords:
args = args.delete_namedarg(i)
elif isinstance(e, DuplicateKeyword):
errors.append((e, e.duplicate, None))
args = args.delete_namedarg(e.duplicate)
elif isinstance(e, MissingParameter):
errors.append((e, e.missing_parameter, None))
args = args.replace_namedarg(
e.missing_parameter, ctx.new_unsolvable(node))
elif isinstance(e, WrongArgTypes):
arg_name = e.bad_call.bad_param.name
for name, value in e.bad_call.passed_args:
if name != arg_name:
continue
errors.append((e, name, value))
try:
pos = positional_names.index(name)
except ValueError:
args = args.replace_namedarg(name, ctx.new_unsolvable(node))
else:
args = args.replace_posarg(pos, ctx.new_unsolvable(node))
break
else:
raise AssertionError(
"Mismatched parameter %s not found in passed_args" %
arg_name) from e
else:
# This is not an InvalidParameters error.
raise
else:
needs_checking = False
return args, errors | 88bd473876dd3a286c02330023555dab211336df | 30,158 |
import torch
def samples_from_cpprb(npsamples, device=None):
"""
Convert samples generated by cpprb.ReplayBuffer.sample() into
State, Action, rewards, State.
Return Samples object.
Args:
npsamples (dict of nparrays):
Samples generated by cpprb.ReplayBuffer.sample()
device (optional): The device where the outputs are loaded.
Returns:
Samples(State, Action, torch.FloatTensor, State)
"""
# device = self.device if device is None else device
states = npsamples["obs"]
actions = npsamples["act"]
rewards = torch.tensor(npsamples["rew"], dtype=torch.float32).squeeze()
next_states = npsamples["next_obs"], npsamples["done"]
return Samples(states, actions, rewards, next_states) | 6775f0eee7544f35e04e6e6fd3096516411dc0e8 | 30,159 |
def generateKeys():
"""
generates and returns a dictionary containing the original columns names from the
LIDAR file as values and the currently used column names as corresponding keys
ws_1 : Speed Value.1
dir_1 : Direction Value.1
h_1 : Node RT01 Lidar Height
"""
keys = {"ws_0" : "Speed Value", "dir_0" : "Direction Value", "h_0" : "Node RT00 Lidar Height"}
for i in range(1, 11):
keys.update({"ws_{}".format(i) : "Speed Value.{}".format(i),
"dir_{}".format(i) : "Direction Value.{}".format(i),
"h_{}".format(i) : "Node RT{:02d} Lidar Height".format(i+1),
})
return keys | 9d0d55c3fdc32ddda46da4a9e876d4ce1ecde25d | 30,160 |
def process_line(line):
"""Return the syntax error points of line."""
stack = []
for c in line:
if c in '([{<':
stack.append(c)
elif c != closings[stack.pop()]:
return points[c]
return 0 | 4ab64c74d89f950cc6c87b7a91addeb29717d74a | 30,161 |
def get_uniprot_homologs(rev=False):
"""As above, but exclusively uniprot => mouse uniprot"""
homologs = {}
with open('data/corum_mouse_homologs.txt') as infile:
data = [line.strip().split('\t') for line in infile]
for line in data:
original = line[1].split('|')[1]
uniprot = line[0]
# Picks first, and subsequently best. Seqid must be in desc order!
if original not in homologs:
homologs[original] = uniprot
if rev:
homologs = {value: key for key, value in homologs.items()}
return homologs | 969085375265b90b5501b4b86eaaed3e1c48795f | 30,162 |
import typing
def flatten(
value: list,
levels: typing.Optional[int] = None
) -> list:
"""Flatten a list.
.. code-block:: yaml
- vars:
new_list: "{{ [1, 2, [3, [4, 5, [6]], 7]] | flatten }}"
# -> [1, 2, 3, 4, 5, 6, 7]
To flatten only the top level, use the ``levels`` argument:
.. code-block:: yaml
- vars:
new_list: "{{ [1, 2, [3, [4, 5, [6]], 7]] | flatten(levels=1) }}"
# -> [1, 2, 3, [4, 5, [6]], 7]
.. versionadded:: 1.1
:param levels: Number of levels to flatten. If `None` - flatten everything.
"""
return list(_utils.flatten(value, levels=levels)) | 569ccb15f140a517792bc6b5ea962537db0b31f8 | 30,163 |
def GetStage(messages):
"""Returns corresponding GoogleCloudFunctionsV2(alpha|beta)Stage."""
if messages is apis.GetMessagesModule(_API_NAME, _V2_ALPHA):
return messages.GoogleCloudFunctionsV2alphaStage
elif messages is apis.GetMessagesModule(_API_NAME, _V2_BETA):
return messages.GoogleCloudFunctionsV2betaStage
else:
return messages.GoogleCloudFunctionsV2Stage | 3bdb130cf78694b223bd555f6db20e1c687b5552 | 30,164 |
def get_general_case_info(adapter, institute_id=None, slice_query=None):
"""Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
"""
general = {}
# Potentially sensitive slice queries are assumed allowed if we have got this far
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
# If only looking at one institute we need to save the case ids
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general | a5afc2244db59f7a3dd0da55dd4759a57af641a4 | 30,165 |
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg) | 1a7b930896a046357474000b8ebc598f70fbba76 | 30,166 |
def is_intersection(g, n):
"""
Determine if a node is an intersection
graph: 1 -->-- 2 -->-- 3
>>> is_intersection(g, 2)
False
graph:
1 -- 2 -- 3
|
4
>>> is_intersection(g, 2)
True
Parameters
----------
g : networkx DiGraph
n : node id
Returns
-------
bool
"""
return len(set(g.predecessors(n) + g.successors(n))) > 2 | 415e5154095cd78112ef029b6c4d62c36da0b3b8 | 30,167 |
def AxisRotation(p, ang, inplace=False, deg=True, axis='z'):
""" Rotates points p angle ang (in deg) about an axis """
axis = axis.lower()
# Copy original array to if not inplace
if not inplace:
p = p.copy()
# Convert angle to radians
if deg:
ang *= np.pi / 180
if axis == 'x':
y = p[:, 1] * np.cos(ang) - p[:, 2] * np.sin(ang)
z = p[:, 1] * np.sin(ang) + p[:, 2] * np.cos(ang)
p[:, 1] = y
p[:, 2] = z
elif axis == 'y':
x = p[:, 0] * np.cos(ang) + p[:, 2] * np.sin(ang)
z = - p[:, 0] * np.sin(ang) + p[:, 2] * np.cos(ang)
p[:, 0] = x
p[:, 2] = z
elif axis == 'z':
x = p[:, 0] * np.cos(ang) - p[:, 1] * np.sin(ang)
y = p[:, 0] * np.sin(ang) + p[:, 1] * np.cos(ang)
p[:, 0] = x
p[:, 1] = y
else:
raise Exception('invalid axis. Must be either "x", "y", or "z"')
if not inplace:
return p | 1df385b98edb69134849cb052380fb99261f96b2 | 30,168 |
from pathlib import Path
from typing import List
def get_dir_list(path: Path)->List[str]:
"""
Return directory list
"""
dir_list = []
paths = Path(path).glob("**/*")
for p in paths:
if p.is_dir():
dir_list.append(str(p))
return dir_list | a0fe0659ad0175364048be6ef96026584fa6f3ef | 30,169 |
import typing
def tokenize(data: typing.Union[str, typing.Sequence[str]]) -> list[str]:
"""break up string into tokens, tokens can be separated by commas or spaces
creates separate tokens for:
- "(" or "[" at beginning
- ")" or "]" at end
"""
# break into tokens
if isinstance(data, str):
data = [data]
tokens = []
for datum in data:
datum = datum.replace(',', ' ')
subtokens = datum.split(' ')
for token in subtokens:
if len(token) == 0:
continue
elif len(token) == 1:
tokens.append(token)
else:
start_interval = token[0] in ['(', '[']
end_interval = token[-1] in [')', ']']
# convert token based on contained intervals
if start_interval and end_interval:
tokens.append(token[0])
if len(token) > 2:
tokens.append(token[1:-1])
tokens.append(token[-1])
elif start_interval:
tokens.append(token[0])
tokens.append(token[1:])
elif end_interval:
tokens.append(token[:-1])
tokens.append(token[-1])
else:
tokens.append(token)
return tokens | 832343067c8777aa386c0c87c2c4e8202a7cb88f | 30,170 |
def de_comma(string):
"""Remove any trailing commas
>>> de_comma(',fred,,') == ',fred'
True
"""
return string.rstrip(',') | 453d615c1fbbef5139d05d6e4510731c969d6a86 | 30,171 |
def MakeData(ea, flags, size, tid):
"""
Create a data item at the specified address
@param ea: linear address
@param flags: FF_BYTE..FF_PACKREAL
@param size: size of item in bytes
@param tid: for FF_STRU the structure id
@return: 1-ok, 0-failure
"""
return idaapi.do_data_ex(ea, flags, size, tid) | ab890848784407bf0ee2864469a5c8874346c5ec | 30,172 |
def get_node_network_receive(cluster_id, ip, start, end, bk_biz_id=None):
"""获取网络数据
start, end单位为毫秒,和数据平台保持一致
数据单位KB/s
"""
step = (end - start) // 60
prom_query = f"""
max(rate(node_network_receive_bytes_total{{cluster_id="{cluster_id}",job="node-exporter", instance=~"{ ip }:9100"}}[5m]))
""" # noqa
resp = query_range(prom_query, start, end, step)
return resp.get("data") or {} | 9ba68d19c6ca959fd92020f50498d4aa14dfeb58 | 30,173 |
def verify_vrrpv3_summary(dut,**kwargs):
"""
Author: Raghukumar Rampur
email : raghukumar.thimmareddy@broadcom.com
:param dut:
:param interface:
:type string or list
:param vrid:
:type string or list
:param vip:
:type virtual-ip in string or list
:param state:
:type vrrp state as string or list
:param config_prio:
:type configured vrrp priority as list or string
:param current_prio:
:type Current vrrp priority as list or string
:return:
Usage
verify_vrrpv3_summary(dut1,vrid=['49','85'],state=['Master','Backup'],
interface=['Vlan2996','Vlan2998'],vip=['73.73.73.66','85.85.85.71'],
config_prio=[222,97],current_prio=[222,99])
verify_vrrpv3_summary(dut1,vrid='49',state='Master')
"""
ret_val = True
cli_type = kwargs.get("cli_type", st.get_ui_type(dut))
if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish'
output = st.show(dut,'show vrrp6',type=cli_type)
if len(output) == 0:
st.error("Output is Empty")
return False
if 'return_output' in kwargs:
return output
#Converting all kwargs to list type to handle single or list of vrrp instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if not entries:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val | b5d9ae54fc316cadfd8c4d067439b19ecac4c371 | 30,174 |
def attributes_restore(node):
"""Restore previously unlocked attributes to their default state.
Args:
node (str): Node to restore attributes
Returns:
bool: False if attribute doesn't exists else True
"""
attr_name = "attributes_state"
base_attr = "{}.{}".format(node, attr_name)
if not cmds.objExists(base_attr):
return False
attr_data = literal_eval(cmds.getAttr(base_attr) or "{}")
for _attr, values in attr_data.iteritems():
node_attr = "{}.{}".format(node, _attr)
cmds.setAttr(node_attr, **values)
cmds.deleteAttr(base_attr)
return True | 8c598518d7df1bcc88cbbb3c48d34fecd41b0487 | 30,175 |
import pickle
def get_actual_data(base, n_run, log_path, subfolders):
"""
:param base: the sub folder name right before the _DATE_InstanceNumber
:param n_run: the INSTANCE number in the subfolder name
:param log_path: path to the main log folder containing all the runs of an experiment (e.g. ../data/CH6-14S1G1TNSV/)
:param subfolders: the list of all the sub folders contained in log_folder
:param log_path: complete path
:return:
"""
for subfolder in subfolders:
splitted = subfolder.split('_')
# get basename, compare to base; compare n_run with experiment instance
if splitted[0] == base and str(n_run).zfill(3) == splitted[2]:
filepath = log_path + '/' + subfolder + '/global_save.txt'
try:
data = pickle.load(open(filepath, "rb"))
except:
print('Make sure your parameters are right!')
data = None
exit()
return data | b9f76b14b90e3c187e19bcd0b8bbbfe865518fe7 | 30,176 |
def secs_to_str(secs):
"""Given number of seconds returns, e.g., `02h 29m 39s`"""
units = (('s', 60), ('m', 60), ('h', 24), ('d', 7))
out = []
rem = secs
for (unit, cycle) in units:
out.append((rem % cycle, unit))
rem = int(rem / cycle)
if not rem:
break
if rem: # leftover = weeks
out.append((rem, 'w'))
return ' '.join(["%02d%s" % tup for tup in out[::-1]]) | 0918fd72fbaaa0adf8fe75bcb1ef39b4e9aba75b | 30,177 |
def shuffle(xsets, ysets, seed=None):
"""Shuffle two datasets harmonically
Args:
x, y: datasets, both of them should have same length
Return:
(shuffled_x, shuffled_y): tuple including shuffled x and y
"""
if len(xsets) != len(ysets):
raise ValueError
np.random.seed(seed=seed)
shuffled_indexes = np.random.permutation(len(xsets))
shuffled_x = xsets[shuffled_indexes]
shuffled_y = ysets[shuffled_indexes]
return (shuffled_x, shuffled_y) | 0d07fa7b1d556a5af0bb4f3d174326c756d3d6a7 | 30,178 |
import math
def get_CL_parameters(file_pointer, class_10_100_1000):
""" Function to predict cluster count and mean size by means of clustering
Args:
file_pointer: string with a file path
Returns
tuple with(
clusters: predicted number of clusters
log_mean_radius: predicted mean radius in log (pixels)
"""
#Constant parameters
# =========================================================================
# optimal parameters
# =========================================================================
#for imagge class 1000
binarize_list=[70]
eps_list=[2]
min_samples=[2]
filter_boundary_list=[100]
remove_large_clusters_list=[200]
remove_small_clusters_list=[10]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_1000=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# for image class 100 objects
binarize_list=[15]
eps_list=[2]
min_samples=[2]
filter_boundary_list=[100]
remove_large_clusters_list=[20]
remove_small_clusters_list=[100]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_100=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# for image class 10 objects
binarize_list=[30]
eps_list=[2]
min_samples=[5]
filter_boundary_list=[100]
remove_large_clusters_list=[0]
remove_small_clusters_list=[800]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_10_1=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
binarize_list=[5]
eps_list=[2]
min_samples=[5]
filter_boundary_list=[100]
remove_large_clusters_list=[0]
remove_small_clusters_list=[800]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_10_2=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# =========================================================================
# optimal parameters
# =========================================================================
if class_10_100_1000 =='10_1':
param=param_list_10_1[0]
if class_10_100_1000 =='10_2':
param=param_list_10_2[0]
if class_10_100_1000 =='100':
param=param_list_100[0]
if class_10_100_1000 =='1000':
param=param_list_1000[0]
#define parameters
binarize=param[0]
eps=param[1]
min_sample=param[2]
filter_boundary=param[3]
remove_large_clusters=param[4]
remove_small_clusters=param[5]
max_filter=param[6]
eps_grain_boundary=param[7]
min_sample_grain_boundary=param[8]
binarize_bdr_coord=param[9]
binarize_grain_coord=param[10]
(image_X, m_real, s_real)=CL_load.load_func(file_pointer, threshold=binarize, max_filter=3)
try:
print('Clustering image')
(m_CL, s_CL, clusters) = CL_DBscan.fit_DBscan(image_X,
eps,
eps_grain_boundary,
min_sample,
min_sample_grain_boundary,
filter_boundary,
remove_large_clusters,
remove_small_clusters,
binarize_bdr_coord,
binarize_grain_coord,
)
if math.isnan(m_CL):
(m_CL,clusters)=(0,0)
except:
print('fit went wrong', str(param))
(m_CL,clusters)=(0,0)
log_mean_radius=m_CL
#print(m_CL)
return(clusters, log_mean_radius) | 498bf2e3b6a1e70808b159e2b630d9cdb8cebc40 | 30,179 |
def _nanclean(cube, rejectratio=0.25, boxsz=1):
"""
Detects NaN values in cube and removes them by replacing them with an
interpolation of the nearest neighbors in the data cube. The positions in
the cube are retained in nancube for later remasking.
"""
logger.info('Cleaning NaN values in the cube')
cleancube = cube.copy()
badcube = np.logical_not(np.isfinite(cleancube)) # find NaNs
badmap = badcube.sum(axis=0) # map of total nans in a spaxel
# choose some maximum number of bad pixels in the spaxel and extract
# positions
badmask = badmap > (rejectratio * cleancube.shape[0])
logger.info('Rejected %d spaxels with more than %.1f%% NaN pixels',
np.count_nonzero(badmask), rejectratio * 100)
# make cube mask of bad spaxels
badcube &= (~badmask[np.newaxis, :, :])
z, y, x = np.where(badcube)
neighbor = np.zeros((z.size, (2 * boxsz + 1)**3))
icounter = 0
logger.info("Fixing %d remaining NaN pixels", len(z))
# loop over samplecubes
nz, ny, nx = cleancube.shape
for j in range(-boxsz, boxsz + 1, 1):
for k in range(-boxsz, boxsz + 1, 1):
for l in range(-boxsz, boxsz + 1, 1):
iz, iy, ix = z + l, y + k, x + j
outsider = ((ix <= 0) | (ix >= nx - 1) |
(iy <= 0) | (iy >= ny - 1) |
(iz <= 0) | (iz >= nz - 1))
ins = ~outsider
neighbor[ins, icounter] = cleancube[iz[ins], iy[ins], ix[ins]]
neighbor[outsider, icounter] = np.nan
icounter = icounter + 1
cleancube[z, y, x] = np.nanmean(neighbor, axis=1)
return cleancube, badcube | 154bf994161a932505101ccbe921792e2d3c9f3b | 30,180 |
import json
def parseData(filePath):
"""
Tries to import JSON JobShop PRO file to program
:return machineList itinerariesList
"""
machinesList = []
itinerariesList = []
with open(filePath, 'r', encoding="utf8") as inputfile: # read file from path
importedData = json.loads(inputfile.read())
if list(importedData.keys()) == ["itineraries", "machines"]:
imMachines = importedData['machines'] # is first level structure is correct, then split
imItineraries = importedData['itineraries']
if len(list(imMachines)) > 0 and len(list(imItineraries)) > 0:
for index, dictMachine in enumerate(imMachines):
machinesList.append(Machine(imMachines[index]['machineName']))
for _, dictItinerary in enumerate(imItineraries): # for each itinerary check structure
tmpItinerary = Itinerary()
tmpItinerary.name = dictItinerary['itineraryName']
tmpItineraryTasks = dictItinerary['tasksList']
for i, taskDict in enumerate(tmpItineraryTasks): # check structure of each task in itinerary
if list(tmpItineraryTasks[i].keys()) == ['taskName', 'taskMachine', 'taskDuration']:
taskMachine = tmpItineraryTasks[i]['taskMachine']
if list(taskMachine.keys()) == ["machineName"]: # check correctness of elements
tmpItinerary.tasksList.append(Task(tmpItineraryTasks[i]['taskName'],
float(tmpItineraryTasks[i]['taskDuration']),
# parse values to taskList
[ mac for mac in taskMachine["machineName"]]))
# add itinerary to global list, beacuse parsing finished
itinerariesList.append(tmpItinerary)
return machinesList, itinerariesList | b02471737e320eb35c4c9626c11737952455f18e | 30,181 |
def curve_fit_log(xdata, ydata, sigma):
"""Fit data to a power law with weights according to a log scale"""
# Weights according to a log scale
# Apply fscalex
xdata_log = np.log10(xdata)
# Apply fscaley
ydata_log = np.log10(ydata)
sigma_log = np.log10(sigma)
# Fit linear
popt_log, pcov_log = curve_fit(linlaw, xdata_log, ydata_log,
sigma=sigma_log)
#print(popt_log, pcov_log)
# Apply fscaley^-1 to fitted data
ydatafit_log = np.power(10, linlaw(xdata_log, *popt_log))
# There is no need to apply fscalex^-1 as original data is already available
return (popt_log, pcov_log, ydatafit_log) | f00484c2e520e8060d7cb29ea503170c2e6ff07d | 30,182 |
def get_computed_response_text_value(response):
"""
extract the text message from the Dialogflow response, fallback: None
"""
try:
if len(response.query_result.fulfillment_text):
return response.query_result.fulfillment_text
elif len(response.query_result.fulfillment_messages[0].text.text[0]):
return response.query_result.fulfillment_messages[0].text.text[0]
else:
return None
except Exception as e:
return None | fa7410ac4b0ef2c0dea59b0e9d001a7893a56479 | 30,183 |
def tmpdir_factory(request):
"""Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
"""
return request.config._tmpdirhandler | cb506efaef55275d30755fc010d130f61b331215 | 30,184 |
import os
def get_sims(word, language, lemmatized=False, threshold=0.70):
"""Get similar Word2Vec terms from vocabulary or trained model.
TODO: Add option to install corpus if not available.
"""
# Normalize incoming word string
jv_replacer = JVReplacer()
if language == 'latin':
# Note that casefold() seemingly does not work with diacritic
# Greek, likely because of it expects single code points, not
# diacritics. Look into global string normalization to code points
# for all languages, especially Greek.
word = jv_replacer.replace(word).casefold()
model_dirs = {'greek': '~/cltk_data/greek/model/greek_word2vec_cltk',
'latin': '~/cltk_data/latin/model/latin_word2vec_cltk'}
assert language in model_dirs.keys(), 'Langauges available with Word2Vec model: {}'.format(model_dirs.keys())
if lemmatized:
lemma_str = '_lemmed'
else:
lemma_str = ''
model_name = '{0}_s100_w30_min5_sg{1}.model'.format(language, lemma_str)
model_dir_abs = os.path.expanduser(model_dirs[language])
model_path = os.path.join(model_dir_abs, model_name)
try:
model = Word2Vec.load(model_path)
except FileNotFoundError as fnf_error:
print(fnf_error)
print("CLTK's Word2Vec models cannot be found. Please import '{}_word2vec_cltk'.".format(language))
raise
try:
similars = model.most_similar(word)
except KeyError as key_err:
print(key_err)
possible_matches = []
for term in model.vocab:
if term.startswith(word[:3]):
possible_matches.append(term)
print("The following terms in the Word2Vec model you may be looking for: '{}'.".format(possible_matches))
return None
returned_sims = []
for similar in similars:
if similar[1] > threshold:
returned_sims.append(similar[0])
if not returned_sims:
print("Matches found, but below the threshold of 'threshold={}'. Lower it to see these results.".format(threshold))
return returned_sims | cdf0d04f448180bae7a8a3fdb6716603b0221ae4 | 30,185 |
def collect_ips():
"""Fill IP addresses into people list. Return if all addresses collected or not."""
out, rc, _ = run_cmd('sudo nmap -sn ' + net, log_error=False)
if rc:
print "Error: nmap is required. Run following command:"
print "sudo apt-get -y install nmap"
sys.exit(4)
# Regex seeks IP @ pos 0 and MAC @ pos 2.
addrs = re.findall('(?is)((\d+\.){3}\d+).*?(([\da-fA-F]{2}:?){6})', out)
for a, b in enumerate(people):
if b.mac in out.upper() and not b.ip:
for g in addrs:
if b.mac in g[2].upper():
people[a].ip = g[0]
people[a].presence = True # Avoid extra ping
people[a].ltsip = time.time()
return all(get_ips()) | 52d1369d4af469a62af465b000489ad43f71d2e3 | 30,186 |
def roles(*role_list):
"""
Decorator defining a list of role names, used to look up host lists.
A role is simply defined as a key in `env` whose value is a list of one or
more host connection strings. For example, the following will ensure that,
barring an override on the command line, ``my_func`` will be executed
against the hosts listed in the ``webserver`` and ``dbserver`` roles::
env.webserver = ['www1', 'www2']
env.dbserver = ['db1']
@roles('webserver', 'dbserver')
def my_func():
pass
Note that this decorator actually just sets the function's ``.roles``
attribute, which is then read prior to executing the function.
"""
def attach_roles(func):
@wraps(func)
def inner_decorator(*args, **kwargs):
return func(*args, **kwargs)
inner_decorator.roles = list(role_list)
return inner_decorator
return attach_roles | 2e30be0cb8876085c0c071b61a0a62061904816e | 30,187 |
from typing import List
from typing import Optional
from typing import Any
def pool_tr(
sents: List[str],
# services: List[str] = None,
max_workers: Optional[int] = -1,
from_lang: str = "auto",
to_lang: str = "zh",
timeout: float = 100,
) -> List[Any]:
# fmt: on
""" translate sents.
max_workers: Optional[int] = 10
from_lang: str = "zh"
to_lang: str = "en"
timeout: float = 100
"""
_ = """
if services is None:
services = FREEMT_SERVICES
# """
if max_workers is None or max_workers <= 0:
max_workers = len(Q_SERVICES)
pool_exec = ThreadPoolExecutor(max_workers)
resu = [] # type: List[Any]
sent_idx_list = [
*zip(sents, range(len(sents)))
] # type: List[Tuple[Union[str, int], Union[str, int]]]
q_sents = deque(
sent_idx_list
) # type: Deque[Tuple[Union[str, int], Union[str, int]]]
# preventive measure: None of the service returns anything
# other than None or '' or ' '
loop_ = 0
while q_sents:
batch = get_batch(q_sents) # type: List[str]
fut_dict = {} # type: Dict[Any, Any]
for elm in batch:
sent, idx = elm
# queue_tr => res, service
args = [queue_tr, sent, from_lang, to_lang] # type: List[Any]
# fut_dict = {**fut_dict, **{pool_exec.submit(*args): idx}}
# way faster
fut_dict.update({pool_exec.submit(*args): idx})
# fut_dict: dict {(res, service): idx}
# collect result if available, or send sents back to q_sents
try:
for fut in as_completed(fut_dict, timeout=timeout):
fut.result(0)
except Exception as exc:
# print(' **as_completed(fut_dict) exc:** ', exc)
logger.error(" **as_completed(fut_dict) exc**: %s", exc)
# unsuccessful terms
# [[idx, elm.result()] for idx, elm in enumerate(fut_dict) if not elm.result()[0]]
for fut, idx in fut_dict.items():
# idx, _ = idx_service
try:
# resu += [(fut.result(0), idx_service,)]
_ = fut.result(0)
trtext, service, time = _
# send back to the queue if trtext is None, "",
if trtext is None or not trtext.strip() :
q_sents.append((sents[idx], idx)) # type: ignore
else:
# service in fut.result()[1]
resu += [(fut.result(0), idx)]
except Exception as exc:
# print('resu += [fut.result(0), idx_service] exc:', exc)
logger.debug("resu += [fut.result(0), idx_service] exc: %s", exc)
# q_sents.append((sents[idx], idx)) # type: ignore
q_sents.append((sents[idx], idx)) # type: ignore
loop_ += 1
# if loop_ > len(sents):
if loop_ > 5:
logger.warning(
" Too many attempts, giving up -- probably net problem or none of the services if working"
) # noqa
raise Exception(
" Too many attempts, giving up -- probably net problem or none of the services if working"
)
pool_tr.loop = loop_
pool_tr.result = resu
_ = [
*zip(
FREEMT_SERVICES,
[
*map(
lambda x: len([elm for elm in resu if elm is not None and elm[0][1] == x]),
FREEMT_SERVICES,
)
],
)
]
# sorted contribution in reverse order
pool_tr.dist = sorted(_, key=lambda x: -x[1])
# rdict
_ = {
service: [[elm[0][0], elm[1]] for elm in resu if elm is not None and elm[0][1] == service]
for service in FREEMT_SERVICES
} # noqa
pool_tr.rdict = _
resu = [elm for elm in resu if elm is not None]
# return sorted(resu, key=lambda x: resu[1][0]) # type: ignore
return sorted(resu, key=lambda x: x[1]) | 1faf8970420dd8f3d6d58dd401f10ee2b9d74b9f | 30,188 |
def stairmaster_mets(setting):
"""
For use in submaximal tests on the StairMaster 4000 PT step ergometer.
Howley, Edward T., Dennis L. Colacino, and Thomas C. Swensen. "Factors Affecting the Oxygen Cost of Stepping on an Electronic Stepping Ergometer." Medicine & Science in Sports & Exercise 24.9 (1992): n. pag. NCBI. Web. 10 Nov. 2016.
args:
setting (int): the setting of the step ergometer
Returns:
float: VO2:subscript:`2max` in kcal/kg*hour
"""
return 0.556 * 7.45 * setting | 1d6cc9fc846773cfe82dfacb8a34fb6f46d69903 | 30,189 |
from typing import Optional
from typing import List
from pathlib import Path
from typing import Protocol
def get_uri_for_directory(directory: str,
excludes: Optional[List[str]] = None) -> str:
"""Get a content-addressable URI from a directory's contents.
This function will generate the name of the package by the directory.
It'll go through all the files in the directory and hash the contents
of the files to get the hash value of the package.
The final package name is: _ray_pkg_<HASH_VAL>.zip of this package.
e.g., _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Examples:
.. code-block:: python
>>> get_uri_for_directory("/my_directory")
.... _ray_pkg_af2734982a741.zip
Args:
directory (str): The directory.
excludes (list[str]): The dir or files that should be excluded.
Returns:
URI (str)
Raises:
ValueError if the directory doesn't exist.
"""
if excludes is None:
excludes = []
directory = Path(directory).absolute()
if not directory.exists() or not directory.is_dir():
raise ValueError(f"directory {directory} must be an existing"
" directory")
hash_val = _hash_directory(directory, directory,
_get_excludes(directory, excludes))
return "{protocol}://{pkg_name}.zip".format(
protocol=Protocol.GCS.value, pkg_name=RAY_PKG_PREFIX + hash_val.hex()) | acb7586d9adf210563ba73c3aed46c8ac695be26 | 30,190 |
def clean_cancer_dataset(df_training):
"""
Checks and cleans the dataset of any potential impossible values, e.g. bi-rads columns, the 1st only allows
values in the range of 1-5, ordinal
Age, 2nd column, cannot be negative, integer
Shape, 3rd column, only allows values between 1 and 4, nominal
Margin, only allows a range of 1 to 5, nominal
Density only allows values between 1-4,ordinal.
All deletions will be performed in place.
:return: cleaned up dataframe, count of removed points
"""
rows_pre_cleaning = df_training.shape[0]
df_training.drop(df_training.index[df_training['bi_rads'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['shape'] > 4], inplace=True)
df_training.drop(df_training.index[df_training['margin'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['density'] > 4], inplace=True)
rows_removed = rows_pre_cleaning - df_training.shape[0]
return df_training, rows_removed | a30f377b48bb665f42f3efa58b15d289f7e7f9b3 | 30,191 |
from datetime import datetime
def str_to_date(date, form=None):
"""
Return Date with datetime format
:param form:
:param date: str date
:return: datetime date
"""
if form is None:
form = get_form(date)
return datetime.datetime.strptime(date, form) | acda6e393b468ffaf8eceb689c859440a53e486e | 30,192 |
def get_model_results(corpus, texts, ldamodel=None):
"""function extract model result such as topics, percentage distribution and return it as pandas dataframe
in: corpus : encoded features
in: text : main text
in: ldamodel: the trained model
out: dataframe
"""
topics_df = pd.DataFrame()
# Extract the main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Percentage Contribution and Topic Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic, 4), topic_keywords]),
ignore_index=True)
else:
break
topics_df.columns = ['Dominant_Topic_Number', 'Percentage_Contribution', 'Topic_Keywords']
# concat original text to topics_df
contents = pd.Series(texts)
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df.columns = ['Dominant_Topic_Number', 'Percentage_Contribution', 'Topic_Keywords', 'Text']
topics_df = topics_df[["Text", "Topic_Keywords", "Dominant_Topic_Number", "Percentage_Contribution"]]
return topics_df | 31aa99db41193d2e25bd723720b68eb30606517f | 30,193 |
def rest_notify():
"""Github rest endpoint."""
sdkid = request.args.get("sdkid")
sdkbase = request.args.get("sdkbase", "master")
sdk_tag = request.args.get("repotag", sdkid.split("/")[-1].lower())
if not sdkid:
return {'message': 'sdkid is a required query parameter'}
rest_bot = RestAPIRepoHandler(sdkid, sdk_tag, sdkbase)
bot = BotHandler(rest_bot)
github_index = {
'ping': ping,
'push': push,
'pull_request': rest_pull_request,
'issue_comment': bot.issue_comment,
'issues': bot.issues
}
if not _WORKER_THREAD.is_alive():
_WORKER_THREAD.start()
return handle_github_webhook(
github_index,
request.headers['X-GitHub-Event'],
request.get_json()
) | 4f7c15186fbb2d0a4a3dd7199045178b62da362f | 30,194 |
def lib_pt_loc(sys_chars_vals, tolerance = 1e-12):
"""Computes Non-Dimensionalized Libration Points Location for P1-P2 system
Parameters
----------
sys_chars_vals: object
Object of Class sys_char
tolerance: float
convergence tolerance for Newton-Raphson Method
Returns
-------
lib_loc: numpy ndarray (5x3)
5 Libration Points, [nd]
"""
mu = sys_chars_vals.mu
lib_loc = np.zeros((5, 3))
lib_loc[3, :] = [0.5 - mu, 3**0.5 / 2, 0] # L4, analytical_guessal solution known
lib_loc[4, :] = [0.5 - mu, -(3**0.5) / 2, 0] # L5, analytical solution known
# 5th degree polynomial of L1, L2 and L3
f_lib = np.array(
[
[1, mu - 3, 3 - 2 * mu, -mu, 2 * mu, -mu],
[1, 3 - mu, 3 - 2 * mu, -mu, -2 * mu, -mu],
[1, 2 + mu, 1 + 2 * mu, mu - 1, 2 * mu - 2, -1 + mu],
]
)
# First-order derivative of the polyomial defined in f_lib
fd_lib = np.array(
[
[0, 5, 4 * (mu - 3), 3 * (3 - 2 * mu), 2 * -mu, 2 * mu],
[0, 5, 4 * (3 - mu), 3 * (3 - 2 * mu), 2 * -mu, -2 * mu],
[0, 5, 4 * (2 + mu), 3 * (1 + 2 * mu), 2 * (mu - 1), 2 * mu - 2],
]
)
initial_guess = np.array([0.9, 1.1, -1])
for i in range(3):
val = np.vander([initial_guess[i]], 6)
h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])
while abs(h) >= tolerance:
val = np.vander([initial_guess[i]], 6)
h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])
lib_loc[i, 0] = initial_guess[i] - h
initial_guess[i] = lib_loc[i, 0]
if i == 0:
lib_loc[i, 0] = 1 - mu - lib_loc[i, 0]
elif i == 1:
lib_loc[i, 0] = 1 - mu + lib_loc[i, 0]
elif i == 2:
lib_loc[i, 0] = -mu - lib_loc[i, 0]
return lib_loc | 2ee92c8f6e91353a675236a7f63eed4d7f807846 | 30,195 |
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling
layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
##################################################################
# TODO: Implement the max pooling forward pass #
##################################################################
pass
##################################################################
# END OF YOUR CODE #
##################################################################
cache = (x, pool_param)
return out, cache | 61abc1cfaf6e559f8063690764de8530d555797c | 30,196 |
def _find_connection_file(connection_file):
"""Return the absolute path for a connection file
- If nothing specified, return current Kernel's connection file
- Otherwise, call jupyter_client.find_connection_file
"""
if connection_file is None:
# get connection file from current kernel
return get_connection_file()
else:
return jupyter_client.find_connection_file(connection_file) | 2e4adfd67e0d2b35545cab1e82def271175b9de3 | 30,197 |
from typing import List
def _symbols_of_input(label: str) -> List[str]:
"""Extracts FST symbols that compose complex input label of the rewrite rule.
FST symbols of a complex input label is;
- Epsilon symbol if the complex input label is an epsilon symbol
(e.g. ['<eps>'] for label '<eps>').
- Digits of the complex input label if it is only composed of digits
without any feature analysis tags (e.g. ['9', '0'] for the label '90').
- Tokenized inflectional group boundaries, inflectional or derivational
morphemes, proper noun and feature analyses tags, numbers, and punction
if the complex input label is composed of these units (e.g. [')([VN]',
'-YAn[Derivation=PresNom]'] for the label
')([VN]-YAn[Derivation=PresNom]').
Args:
label: complex input label of a morphotactics FST rewrite rule.
Returns:
FST symbols that are used in the complex input label of the rewrite rule.
For labels that do not represent epsilon, FST symbols are returned in the
same order as they appear in the complex input label, and duplicate symbols
are preserved.
"""
if label == common.EPSILON:
return [label]
# We add a state transition arc for each digit of a multi-digit number.
if "[" not in label:
return list(label)
# We add a state transition arc for each inflectional or derivational
# morpheme, inflectional group boundary, and proper noun analysis tag.
return _SYMBOLS_REGEX.findall(label) | 8298a242701aa586ba50ffa6059a8e33e4cf01f3 | 30,198 |
def preset_select_func(area, preset):
"""Create preset selection packet."""
return DynetPacket.select_area_preset_packet(area, preset, 0) | 9ae5e162cb32c3f3b0ab1d07e1d5cd2961e1e91e | 30,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.