content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def delete_line_breaks(text, joiner):
""" Deletes line breaks and joins split strings in one line
:param text: string
:param joiner: string used to join items [" " for abs or "" for title]
:return: joined text
"""
text = text.split('\n')
text = joiner.join(text)
return text
|
b05a701d4828d57060af55389ec785a64e094392
| 30,142
|
def get_adm_fields(adm_level, field_name="name"):
"""Get list of adm-fields from `adm_level` up to the adm1 level"""
return [f"adm{i}_" + field_name for i in range(1, adm_level + 1)]
|
75a216ab7448f53417c5b8f5e951a5c96312b1e3
| 30,145
|
def return_tuple():
"""
タプル返却
Return:
list: タプル返却値
"""
return (4, 5, 6)
|
fefd06d24293e6a1c1aab85666e710a742f4250d
| 30,146
|
def get_vplex_gatherfacts_parameters():
"""This method provide the parameters required for the ansible
gather facts module on VPLEX"""
return dict(
cluster_name=dict(type='str', required=False, default=''),
gather_subset=dict(type='list', required=False, elements='str',
choices=['stor_array',
'stor_vol',
'port',
'be_port',
'initiator',
'stor_view',
'virt_vol',
'cg',
'device',
'extent',
'dist_device',
'dist_cg',
'dist_virt_vol',
'device_mig_job',
'extent_mig_job',
'amp',
]),
filters=dict(type='list', required=False, elements='dict',
options=dict(
filter_key=dict(type='str', required=True),
filter_operator=dict(type='str', required=True,
choices=['equal', 'greater',
'lesser',
'greater-equal',
'lesser-equal']),
filter_value=dict(type='str', required=True))
)
)
|
6ce423be29db1312cfd7b8c70f09570b461cb15d
| 30,147
|
def comeXpayY(come_x, pay_y, per_head, pax):
"""
มา x จ่าย y เช่น มา 4 จ่าย 3
:param come_x: มา
:param pay_y: จ่าย
:param per_head: ค่าอาหารต่อหัว
:param pax: จำนวนลูกค้าในโต๊ะ
:return: ค่าอาหาร
"""
return (pax // come_x) * (pay_y * per_head) + (pax % come_x) * per_head
|
8d92d51322b7d4b729577399301b3f462d2f2ae4
| 30,148
|
def prepare_actions(actions, enabled_analyzers):
"""
Set the analyzer type for each buildaction.
Multiple actions if multiple source analyzers are set.
"""
res = []
for ea in enabled_analyzers:
for action in actions:
res.append(action.with_attr('analyzer_type', ea))
return res
|
a8a8624c5921f9addaef9c9e532f333726e35618
| 30,149
|
import click
def style_bold(txt, fg):
"""Style text with a bold color."""
return click.style(txt, fg=fg, bold=True)
|
adecdf207ecb27d202f298a56f8de54576e72ecd
| 30,150
|
from typing import Optional
from typing import Tuple
import re
def find_github_owner_repo(url: Optional[str]) -> Tuple[Optional[str], Optional[str]]:
"""Find the owner's name and repository name from the URL representing the GitHub
repository.
Parameters
----------
url : Optional[str]
Any string (expect it to be a URL).
Returns
-------
owner : str or None
Owner's name, or None if not found.
repo : str or None
Repository name, or None if not found.
Examples
--------
>>> owner, repo = find_github_owner_repo("https://github.com/poyo46/lilili.git#foo")
>>> assert owner == "poyo46"
>>> assert repo == "lilili"
>>> owner, repo = find_github_owner_repo("https://www.example.com")
>>> assert owner is None
>>> assert repo is None
"""
if url is None:
return None, None
m = re.match(r"[^:/]+://github.com/(?P<owner>[^/]+)/(?P<repo>[^/#]+)", url)
if m is None:
return None, None
repo = m.group("repo")
if repo.endswith(".git"):
repo = repo[:-4]
return m.group("owner"), repo
|
81ba5533546efde91a7c6ef5f48e65d1cdd2c6c4
| 30,151
|
from typing import Union
def celsius_to_kelvin(temperature_in_celsius: Union[int, float]) -> float:
"""
>>> celsius_to_kelvin(0)
273.15
>>> celsius_to_kelvin(1)
274.15
>>> celsius_to_kelvin(-1)
272.15
>>> celsius_to_kelvin(-273.15)
0.0
>>> celsius_to_kelvin(-274.15)
Traceback (most recent call last):
...
ValueError: Argument must be greater than -273.15
>>> celsius_to_kelvin([-1, 0, 1])
Traceback (most recent call last):
...
ValueError: Argument must be int or float
>>> celsius_to_kelvin('one')
Traceback (most recent call last):
...
ValueError: Argument must be int or float
"""
if not isinstance(temperature_in_celsius, (float, int)):
raise ValueError('Argument must be int or float')
if temperature_in_celsius < -273.15:
raise ValueError('Argument must be greater than -273.15')
return float(temperature_in_celsius + 273.15)
|
1aa5b214e20c0d47a3ab50388132f174cd33dbde
| 30,152
|
def union(x, y=None):
"""
Return the union of x and y, as a list. The resulting list need not
be sorted and can change from call to call.
INPUT:
- ``x`` - iterable
- ``y`` - iterable (may optionally omitted)
OUTPUT: list
EXAMPLES::
sage: answer = union([1,2,3,4], [5,6]); answer
[1, 2, 3, 4, 5, 6]
sage: union([1,2,3,4,5,6], [5,6]) == answer
True
sage: union((1,2,3,4,5,6), [5,6]) == answer
True
sage: union((1,2,3,4,5,6), set([5,6])) == answer
True
"""
if y is None:
return list(set(x))
return list(set(x).union(y))
|
67b20db26081c05a8c706d5529897bc753726f6a
| 30,153
|
from typing import List
from typing import Any
def split(values: List[Any], split_count: int) -> List[Any]:
"""
Splits ``values`` into ``split_count`` pieces.
"""
list_length = len(values)
return [
values[
(list_length * i // split_count): (list_length * (i + 1) // split_count)
]
for i in range(split_count)
]
|
35819aeaf543386f2318dc1bd22eef433d16f090
| 30,155
|
def msd_ratio(track, fram1=3, fram2=100):
"""Calculates the MSD ratio of the input track at the specified frames.
Parameters
----------
track : pandas.core.frame.DataFrame
At a minimum, must contain a Frames and a MSDs column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
fram1 : int
First frame at which to calculate the MSD ratio.
fram2 : int
Last frame at which to calculate the MSD ratio.
Returns
-------
ratio: numpy.float64
MSD ratio as defined by
[MSD(fram1)/MSD(fram2)] - [fram1/fram2]
where fram1 < fram2. For Brownian motion, it is 0; for restricted
motion it is < 0. For directed motion it is > 0.
Examples
--------
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> ft.msd_ratio(dframe, 1, 9)
-0.18765432098765433
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames))+3,
... 'Y': np.cos(np.linspace(1, frames, frames))+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> ft.msd_ratio(dframe, 1, 9)
0.04053708075268797
"""
dframe = track
assert fram1 < fram2, "fram1 must be less than fram2"
ratio = (dframe['MSDs'][fram1]/dframe['MSDs'][fram2]) - (
dframe['Frame'][fram1]/dframe['Frame'][fram2])
return ratio
|
6e35571067f0150240829f4f7ce62c5c78cd260a
| 30,156
|
from typing import List
def define_frontal_base_direction(robot: str) -> List:
"""Define the robot-specific frontal base direction in the base frame."""
if robot != "iCubV2_5":
raise Exception("Frontal base direction only defined for iCubV2_5.")
# For iCubV2_5, the reversed x axis of the base frame is pointing forward
frontal_base_direction = [-1, 0, 0]
return frontal_base_direction
|
eac7536aca5e8cd1ba06160a4dd4f5f6332fb5ed
| 30,158
|
from typing import Callable
from typing import Any
def get_or_create_state(*, state: object, key: str, factory: Callable[[], Any]) -> Any:
""" Get or create sub-state entry (using factory callback) """
value = getattr(state, key, None)
if value is None:
value = factory()
setattr(state, key, value)
return value
|
128b709c342b4ec517a680302e3de27860f25222
| 30,159
|
import json
async def load(file):
"""json.load in a asynchronous way"""
return json.load(file)
|
8f4d44ac35ebb9d501ec8999eec1910bdd5f516a
| 30,160
|
import sys
from abc import abstractmethod
def abstract_property(func):
"""
Python2/3 compatible abstract property decorator.
:param func: method to decorate
:type func: callable
:returns: decorated function
:rtype: callable
"""
return (property(abstractmethod(func)) if sys.version_info > (3, 3)
else abstractproperty(func))
|
46bf3c6c6987fc559edb7b7c692e5cc7db11ea44
| 30,161
|
from typing import List
from typing import Dict
from pathlib import Path
def collect_file_pathes_by_ext(
target_dir: str, ext_list: List[str]
) -> Dict[str, List[Path]]:
"""Return the list of Path objects of the files in the target_dir that have the specified extensions.
Args:
target_dir (str): Directory to search for files.
ext_list (List[srt]): List of file extensions to search for.
Returns:
List[List[Path]]: List of lists of Path objects. The first list contains the files having the first extension
in the ext_list. The second list contains the files having the second extension in the ext_list and so on.
"""
target = Path(target_dir)
rtn = {}
for ext in ext_list:
if ext[0] == ".":
ext = ext.strip(".")
rtn[ext] = list(target.glob(f"**/*.{ext}"))
return rtn
|
18b6287d12a301b7cff98e37a4122474ae3a7958
| 30,162
|
import subprocess
def get_version():
"""Get version of gr1c as detected by TuLiP.
Failure to find the gr1c program or errors in parsing the received
version string will cause an exception.
@return: (major, minor, micro), a tuple of int
"""
try:
v_str = subprocess.check_output(["gr1c", "-V"], universal_newlines=True)
except OSError:
raise OSError('gr1c not found')
v_str = v_str.split()[1]
try:
major, minor, micro = v_str.split(".")
major = int(major)
minor = int(minor)
micro = int(micro)
except ValueError:
raise ValueError('gr1c version string is not recognized: '+str(v_str))
return (major, minor, micro)
|
4b9f294d9c14c57332bea0881e6592c1f564b828
| 30,164
|
def node_is_empty(node):
"""Handle different ways the regulation represents no content"""
return node.text.strip() == ''
|
64c80be5ad40ab388664e6f391fb729b6fc9ebb6
| 30,165
|
def getChecked(data):
"""
This function gets the modified tree object back from the visualization and analyzes what biosources and tfs are selected by the user.
The return of this function is a dictionary containing the selected biosources and tfs.
"""
whats_checked_bio_tf={}
#analyze what was checked by the user, all biosources only checked tfs
for biosource_obj in data:
biosource = biosource_obj["item"]
whats_checked_bio_tf[biosource]=[]
if biosource_obj["checked"]:
for tf_obj in biosource_obj["children"]:
whats_checked_bio_tf[biosource].append([tf_obj["item"], tf_obj["type"]])
else:
for tf_obj in biosource_obj["children"]:
if tf_obj["checked"]:
whats_checked_bio_tf[biosource].append([tf_obj["item"], tf_obj["type"]])
#remove empty biosources
only_checked={}
for biosource in whats_checked_bio_tf:
if whats_checked_bio_tf[biosource]!=[]:
only_checked[biosource]=whats_checked_bio_tf[biosource]
return only_checked
|
071f100a79f92ec72e04f918267d937d8199f7fd
| 30,166
|
def find_dict_in_list_from_key_val(dicts, key, value):
""" lookup within a list of dicts. Look for the dict within the list which has the correct key, value pair
Parameters
----------
dicts: (list) list of dictionnaries
key: (str) specific key to look for in each dict
value: value to match
Returns
-------
dict: if found otherwose returns None
"""
for dict in dicts:
if key in dict:
if dict[key] == value:
return dict
return None
|
02c98b64086266a21c2effdb72d1b681f77cbc26
| 30,167
|
from typing import List
from typing import Any
def get_combinations(*args: List[Any]) -> List[List]:
"""Takes K lists as arguments and returns Cartesian product of them.
Cartesian product means all possible lists of K items where the first
element is from the first list, the second is from the second and so one.
Returns:
All possible combinations of items from function's arguments.
"""
result: List[List] = [[]]
for list_n in args:
result = [old_item + [new_item] for old_item in result for new_item in list_n]
return result
|
db3d2f48e650e647cec79d7bab0b6e555238cb3a
| 30,169
|
def unique_from_list_field(records, list_field=None):
"""
Return a list of unique values contained within a list of dicts containing
a list.
"""
values = []
for record in records:
if record.get(list_field):
values = values + record[list_field]
return list(set(values))
|
9b30a2afb80e95479aba9bbf4c44ecdc0afaed34
| 30,170
|
def calculate_scan_time(metadata):
"""Parse scan time from metadata.
Returns:
timestamp string
"""
scan_time = None
if 'terraref_cleaned_metadata' in metadata and metadata['terraref_cleaned_metadata']:
scan_time = metadata['gantry_variable_metadata']['datetime']
else:
for sub_metadata in metadata:
if 'content' in sub_metadata:
sub_metadata = sub_metadata['content']
if 'terraref_cleaned_metadata' in sub_metadata and sub_metadata['terraref_cleaned_metadata']:
scan_time = sub_metadata['gantry_variable_metadata']['datetime']
return scan_time
|
9dc76c40163c7190c93e898fbc5c7153030083e0
| 30,171
|
def space_replacer(string):
"""
:type string: str
:rtype: str
"""
string = string.replace(" ", "_")
while "__" in string:
string = string.replace("__", "_")
return string
|
726adafe5e6308b5788382fea1b97d1bbd89bdac
| 30,172
|
def update_weights(weights, error, l_rate, vector):
"""Takes in a given weight, error, learning rate, and vector element
Retuns updated weight
"""
return [weight + (elem * l_rate * error) for elem, weight in zip(vector, weights)]
|
299c432aa0463a2c224959cc05b9291bebe5cc6c
| 30,173
|
def get_action_mapping(n, exclude_self=True):
"""In a matrix view, this a mapping from 1d-index to 2d-coordinate."""
mapping = [
(i, j) for i in range(n) for j in range(n) if (i != j or not exclude_self)
]
return mapping
|
0b707650202d6350b61dca79ca64ef69c179f08a
| 30,174
|
def human_size(qbytes, qunit=2, units=None):
""" Returns a human readable string reprentation of bytes"""
if units is None:
units = [' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'][qunit:]
return str(qbytes) + units[0] if qbytes < 1024 else human_size(qbytes >> 10, 1, units[1:])
|
2d573c61ae5241383054936e99adcc38260fc9ae
| 30,175
|
from typing import OrderedDict
def archive_metadata():
"""
Maps column names in the archive data file to internal variables.
This is the only place where the original column names should be
mentioned.
"""
columns = OrderedDict([
('Id', 'label'),
('Postnr', 'db_id'),
('Museum/objekt', 'museum_obj'),
('Fotonummer', 'photo_ids')])
list_columns = ('Fotonummer', )
key_column = 'Postnr'
return columns, list_columns, key_column
|
fbc56761d1460ea15e340ad01b5d4e8a65829ccd
| 30,176
|
import time
def parse_event_response(client, event, fpid, href):
"""
Prepare required event json object from event response
:param href: reference link of event
:param fpid: unique id of event. i.e EventId
:param client: object of client class
:param event: event indicator from response
:return: required event json object
"""
observed_time = time.strftime('%b %d, %Y %H:%M', time.gmtime(float(event['timestamp'])))
name = event.get('info', '')
uuid = event.get('uuid', '')
if uuid:
fp_link = client.url + '/home/technical_data/iocs/items/' + uuid
name_str = '[{}]({})'.format(name, fp_link)
else:
name_str = name
tags_list = [tag['name'] for tag in event.get('Tag', [])]
tags_value = ', '.join(tags_list)
event_creator_email = event.get('event_creator_email', '')
event = {
'Observed time (UTC)': observed_time,
'Name': name_str,
'Tags': tags_value,
'EventCreatorEmail': event_creator_email,
'EventId': fpid,
'Href': href
}
return event
|
799bb6e1f5433aa86d44d3ab299ec5f17adb43f5
| 30,177
|
def flesch(df):
"""
Calculates the Flesch formula for each text.
The formula and its interpretation is given in this wiki page: https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
Needed features:
Avg_words_per_sentence
Avg_syllables_per_word
Adds column:
Flesch - Flesch formula score for the text
"""
# Flesch formula
df["Flesch"] = 206.835 - 1.015 * df["Avg_words_per_sentence"] - 84.6 * df["Avg_syllables_per_word"]
return df
|
8bf6efa9e9c2ddd8795688ed0d11b687c20a4c36
| 30,178
|
def state_size_dataset(sz):
"""Get dataset key part for state size.
Parameters
----------
sz : `int`
State size.
Returns
-------
`str`
Dataset key part.
"""
return '_ss%d' % sz
|
2ab9b2247309b3441437baddd9f258f1772cd5ae
| 30,179
|
def find_max_score(scores):
"""
寻找并返回scores列表中成绩最高的学生下标
:param scores: scores列表
:return: 最高分同学在scores中的下标
"""
m = 0
n = len(scores)
for i in range(n):
if scores[i].score > scores[m].score:
m = i
return m
|
59051729339f70e662c33e9100a83ea0920bea70
| 30,181
|
def trunc(number, n_digits=None, *, is_round=False):
"""
Function to truncate float numbers
>>> from snakypy import helpers
>>> helpers.calcs.trunc(1.9989, 2)
1.99
>>> helpers.calcs.trunc(1.9989, 2, is_round=True)
2.0
Args:
number (float): Must receive a float number
n_digits (int): An integer must be passed to define the number of places after the comma.
is_round (bool): If the value is TRue, round the number.
Returns:
Returns a float number
"""
if is_round:
return round(number, n_digits)
if n_digits and not is_round:
return int(number * 10 ** n_digits) / 10 ** n_digits
return number
|
41f1e84271795fb0972e17a9c3a9208d1db8b816
| 30,182
|
import re
def comment_modules(modules, content):
"""
Disable modules in file content by commenting them.
"""
for module in modules:
content = re.sub(
r'^([ \t]*[^#\s]+.*{0}\.so.*)$'.format(module),
r'#\1',
content,
flags=re.MULTILINE
)
return content
|
ba1c1a6a9e5fd9a655e7c6c00975843196b4c1db
| 30,183
|
import pathlib
import shutil
def dir_sample_patches(sample_patch1, sample_patch2, tmpdir_factory):
"""Directory of sample image patches for testing."""
dir_path = pathlib.Path(tmpdir_factory.mktemp("data"))
try:
dir_path.joinpath(sample_patch1.name).symlink_to(sample_patch2)
dir_path.joinpath(sample_patch2.name).symlink_to(sample_patch2)
except OSError:
shutil.copy(sample_patch1, dir_path.joinpath(sample_patch1.name))
shutil.copy(sample_patch2, dir_path.joinpath(sample_patch2.name))
return dir_path
|
5210983887f2775fdc5b4b505d4c8cc43947e90a
| 30,184
|
def return_value(value):
"""直接获取值方法"""
return value
|
2c17a5b6e4612cf2355de6cb8b5cd209abbac8fc
| 30,185
|
import logging
def get_logger(logger_name):
"""
Get the dev or prod logger (avoids having to import 'logging' in calling code).
Parameters:
logger_name - 'dev' or 'prod'
Returns:
Logger to use for logging statements.
"""
valid_logger_names = ['dev','prod']
if logger_name in valid_logger_names:
return logging.getLogger(logger_name)
else:
raise ValueError("Invalid logger name: allowed values are "+",".join(valid_logger_names))
return None
|
2e4c41ef0eb226e11d9627090d80bcbbf9bfc475
| 30,186
|
def get_out_path(config):
"""
Returns path where summary output is copied after d-blink finishes running
:param config: ConfigTree
:return: path if it exists, otherwise None
"""
steps = config.get_list('dblink.steps')
copy_steps = [a for a in steps if a.get_string('name') == 'copy-files'] # filter out 'copy-files' steps
if len(copy_steps) == 0:
return None
if len(copy_steps) == 1:
return copy_steps[0].get_string('parameters.destinationPath')
else:
raise NotImplementedError('Too many copy-files steps')
|
330e32fe84f473b5e46a3d033fe94653ed33e551
| 30,187
|
def _is_within_close_bracket(s, index, node):
"""Fix to include right ']'."""
if index >= len(s) - 1:
return False
return s[index] == ']' or s[index + 1] == ']'
|
e3f5829132fc950237b8d9732c153dd166ef647e
| 30,188
|
def _clean(item):
"""Return a stripped, uppercase string."""
return str(item).upper().strip()
|
92e3319345149b5b645b21250389ddf3ce04e636
| 30,191
|
def get_talkroom_timerange(talk_room_list):
"""
returns :talk toom 的时间range 以及最早和最晚的时间
int, datetime.time, datetime.time
"""
t_start = talk_room_list[0].Rstart
t_finish = talk_room_list[0].Rfinish
for room in talk_room_list:
t_start = min(t_start, room.Rstart)
t_finish = max(t_finish, room.Rfinish)
return t_start, t_finish
|
cd9722482fc9b0e47d057170a042447320227c5a
| 30,193
|
def validation_email_context(notification):
"""Email context to verify a user email."""
return {
'protocol': 'https',
'token': notification.user.generate_validation_token(),
'site': notification.site,
}
|
0475223827d57f93aba25f49080f02cf164628d6
| 30,194
|
import os
def quick_mkdir(name):
"""
当前目录下建一个文件夹
:param name: 文件夹名称
:return: 新建的文件夹的完整路径
"""
new_directory = os.getcwd() + '\\' + name + "\\"
if not os.path.exists(new_directory):
try:
os.mkdir(os.getcwd() + '\\' + name)
except Exception as e:
print(e)
return new_directory
|
09701aabea6d5f69bb085580f33deb8777375934
| 30,195
|
def parse_path(path):
"""Parse a path of /hash/action/my/path returning a tuple of
('hash', 'action', '/my/path') or None values if a shorter path is
given.
:param path: :py:class:`string`: the path
:rtype: :py:func:`tuple`
"""
if path == '/':
return None, None, None
paths = path[1:].split('/', 1)
#Filter Empty strings
paths = [p for p in paths if p != '']
if len(paths) == 1:
return paths[0], None, None
else:
file_hash, rest = paths
paths = rest.split('/', 1)
#Filter Empty strings
paths = [p for p in paths if p != '']
if len(paths) == 1:
return file_hash, paths[0], None
else:
action, rest = paths
return file_hash, action, rest
|
d6b9ea79104503888c862e8c42c8815e91a885fb
| 30,196
|
import six
def _contains(values, splitter):
"""Check presence of marker in values.
Parameters
----------
values: str, iterable
Either a single value or a list of values.
splitter: str
The target to be searched for.
Return
------
boolean
"""
if isinstance(values, six.string_types):
values = (values,)
try:
if any([splitter in x for x in values]):
return True
return False
except TypeError:
return False
|
1f41e366309e7160d66a54115e1f20d4c3fe5f52
| 30,197
|
def get_top_namespace(node):
"""Return the top namespace of the given node
If the node has not namespace (only root), ":" is returned.
Else the top namespace (after root) is returned
:param node: the node to query
:type node: str
:returns: The top level namespace.
:rtype: str
:raises: None
"""
name = node.rsplit("|", 1)[-1] # get the node name, in case we get a dagpath
name = name.lstrip(":") # strip the root namespace
if ":" not in name: # if there is no namespace return root
return ":"
else:
# get the top namespace
return name.partition(":")[0]
|
c7314b4b2dea934da4ab5710ee8cd1c1e7c87035
| 30,200
|
def football_points(win: int, draw: int, loss:int):
"""Calculate the number of points for a football team."""
return (win * 3 + draw)
|
692d115e3955847e2a2e9ede2995fedbc752a00c
| 30,201
|
def perc_diff(bigger, smaller):
""" Calculates the percentual difference between two int or float numbers
:param bigger: greater value
:param smaller: smaller value
:return: dif_perc """
dif_perc = round(((bigger - smaller) / smaller * 100), 2)
return dif_perc
|
3e4a19e907afed549c598cd03d78454c2e140db1
| 30,202
|
def uploads_to_list(environment, uploads_list):
""" return list of upload url """
return [upload.get("url") for upload in uploads_list]
|
7771dfb0190b76ad3de55168720640170e72f26c
| 30,203
|
def _element_fill_join(elements, width):
"""Create a multiline string with a maximum width from a list of strings,
without breaking lines within the elements"""
s = ''
if(elements):
L = 0
for i in range(len(elements) - 1):
s += str(elements[i]) + ', '
L += len(elements[i]) + 2
if(L + len(elements[i+1]) >= width - 2):
s += '\n'
L = 0
s += elements[-1]
return(s)
|
2cb08a9be5dfc5a6af99f9fbd95487fbaebf11ca
| 30,204
|
def hmsToHour(s, h, m, sec):
"""Convert signed RA/HA hours, minutes, seconds to floating point hours."""
return s * (h + m/60.0 + sec/3600.0)
|
900f6a942d126eb9af0f4f0ccfc57daac73e0e53
| 30,205
|
def convert_to_indices(vert_list):
"""
Convert given flattened components list to vertices index list
:param vert_list: list<str>, list of flattened vertices to convert
# NOTE: Vertices list must follow Maya vertices list convention: ['{object_name}.v[0]', '{object_name}.v[1]' ...]
:return: list<str>, [0, 1, 2, 3 ...]
"""
indices = list()
for i in vert_list:
index = int(i.split('[')[-1].split(']')[0])
indices.append(index)
return indices
|
b98f79fc8e6c5f8126698ef0ce0b1fb77e29250d
| 30,206
|
def get_client_rad(access_route, logger=None):
"""In most cases, the client's IP and
load balancer's IP are returned.
But rarely contains the user side of proxy IP,
return three IP in access_route
access_route
e.g.
[111.111.111.111] is example of real clieant ip.
- Direct Access
[111.111.111.111]
- via Google Load balancer
[111.111.111.111, 130.211.0.0/22]
- and via client's proxy
[002512 172.16.18.111, 111.111.111.111, 130.211.0.0/22]
- with unknown
['unknown', '111.111.111.111', '222.222.222.222', '130.211.0.0/22']
"""
if len(access_route) > 2 and access_route[0] == "unknown":
if logger:
logger.error('delete "unknown" from:{}'.format(access_route))
del access_route[0]
if len(access_route) == 3:
"""via client's proxy ip"""
return access_route[1]
else:
"""Direct or via Google Load balancer"""
return access_route[0]
|
d9fc2bf6b4d88e282afd0a5b351d1f14d4429968
| 30,208
|
def guess_feature_group(feature):
"""Given a feature name, returns a best-guess group name."""
prefix_list = (
('Sha', 'Shadow'),
('miRNA', 'miRNA'),
('chr', 'mRNA'),
('Fir', 'Firmicutes'),
('Act', 'Actinobacteria'),
('Bac', 'Bacterodetes'),
('Pro', 'Proteobacteria'),
('Ami', 'Amino Acid'),
('Pep', 'Peptide'),
('Car', 'Carbohydrate'),
('Ene', 'Energy'),
('Lip', 'Lipid'),
('Nuc', 'Nucleotide'),
('Cof', 'Cofactor or Vitamin'),
('Xen', 'Xenobiotics'),
('Gen', 'Genus OTU'),
('MET', 'Metabolite'),
('OTU', 'OTU'),
('V.K', 'Vaginal Functional'),
('F.K', 'Fecal Functional'),
('V.G', 'Vaginal Gene'),
('F.G', 'Fecal Gene'),
('V.', 'Vaginal OTU'),
('F.', 'Fecal OTU')
)
for prefix, group in prefix_list:
if feature.startswith(prefix):
return group
return 'NA'
|
f25d5c368cf832b48f8f154d525c96999fae39fb
| 30,209
|
def _tensorize(d, dtype=None, name=None, as_ref=False):
"""Tensor conversion function presuming `hasattr(d, '_value')`."""
return d._value(dtype, name, as_ref) # pylint: disable=protected-access
|
499368ac608bd99135ba84fb17cb8e833989ca3d
| 30,210
|
import re
def input_server_config(
ip_prompt: str = "Enter the IP of where to host the server: ",
port_prompt: str = "Enter the Port of where to host the server: ",
) -> tuple[str, int]:
"""
Provides a built-in way to obtain the IP and port of where the server
should be hosted, through :func:`input()`
:param ip_prompt: A string, specifying the prompt to show when
asking for IP.
Default is "Enter the IP of where to host the server: "
:type ip_prompt: str, optional
:param port_prompt: A string, specifying the prompt to show when
asking for Port
Default is "Enter the Port of where to host the server: "
:type port_prompt: str, optional
:return: A two-element tuple, consisting of IP and Port
:rtype: tuple[str, int]
"""
# Flags
ip_range_check = True
ip = input(ip_prompt)
if re.search(r"^((\d?){3}\.){3}(\d\d?\d?)[ ]*$", ip):
# IP conformity regex
split_ip = list(map(int, ip.split(".")))
split_ip = [i > 255 for i in split_ip]
if any(split_ip):
ip_range_check = True
while (
ip == "" or not re.search(r"^((\d?){3}\.){3}(\d\d?\d?)[ ]*$", ip)
) or ip_range_check:
# If IP not conform to regex, accept input until it
# is compliant
ip = input(f"\033[91mE: Invalid IP\033[0m\n{ip_prompt}")
if re.search(r"^((\d?){3}\.){3}(\d\d?\d?)[ ]*$", ip):
split_ip = list(map(int, ip.split(".")))
split_ip = [i > 255 for i in split_ip]
if not any(split_ip):
ip_range_check = False
port = input(port_prompt)
while (port == "" or not port.isdigit()) or (port.isdigit() and int(port) > 65535):
# If port is > 65535, or not numerical, repeat until it is
port = input(f"\033[91mE: Invalid Port\033[0m\n{port_prompt}")
port = int(port)
# Returns
return ip, port
|
0ee997a6fa58cb1e76b4739006113e3ce3cfa861
| 30,211
|
def add_lane_lines(img, lane_line_img):
"""
Colors lane lines in an image
"""
ret_img = img.copy()
ret_img[(lane_line_img[:, :, 0] > 0)] = (255, 0, 0)
ret_img[(lane_line_img[:, :, 2] > 0)] = (0, 0, 255)
return ret_img
|
2cab2fe7615f9072d998afc3c28a31ff82a7e8fd
| 30,212
|
import re
def parse(instruction):
"""
Take an instruction as a string and return a tuple (output wire, input specification).
Example input specifications:
* ("ID", "6") for constant input
* ("ID", "a") for direct wire input "a"
* ("NOT", "ab") for negated wire "ab"
* ("AND", "76", "xy") for bitwise-AND between constant and wire "ab"
Keyword arguments:
instruction --- an instruction formatted according to the challenge rules
"""
match = re.search("^(.*) -> ([a-z]+)$", instruction)
if match:
(input_expression, output_wire) = match.groups()
if " " not in input_expression:
input_command = ("ID", input_expression)
elif "NOT" in input_expression:
submatch = re.search(r"^NOT (\w+)$", input_expression)
if submatch:
input_command = ("NOT", submatch.group(1))
else:
raise ValueError("Illegal instruction:", instruction)
else:
submatch = re.search(r"^(\w+) ([A-Z]+) (\w+)$", input_expression)
if submatch:
input_command = (submatch.group(2), submatch.group(1), submatch.group(3))
else:
raise ValueError("Illegal instruction:", instruction)
return (output_wire, input_command)
else:
raise ValueError("Illegal instruction:", instruction)
|
8a17605b4a4a93a2ca01e32bee083ece6d75627b
| 30,213
|
import math
def _ms_to_us(time_ms, interval_us=1e3, nearest_up=True):
"""Convert [ms] into the (not smaller/greater) nearest [us]
Translate a time expressed in [ms] to the nearest time in [us]
which is a integer multiple of the specified interval_us and it is not
smaller than the original time_s.
Example:
::
_ms_to_us(1.0) => 1000 [us]
_ms_to_us(1.0, 1024) => 1024 [us]
_ms_to_us(1.1) => 2000 [us]
_ms_to_us(1.1, 1024) => 2048 [us]
:param time_ms: time in milliseconds
:type time_ms: float
:param interval_us: the result will be an integer multiple o this value
(default = 1e3)
:type time_ms: int
:param nearest_up: convert to not smaller nearest if True, to not greater
otherwise (default = True)
:type nearest_up: bool
"""
if nearest_up:
return interval_us * int(math.ceil((1e3 * time_ms) / interval_us))
return interval_us * int(math.floor((1e3 * time_ms) / interval_us))
|
fcc6c03ba55451dccfce42b30579298d963ef696
| 30,215
|
def generate_dmenu_options(optlist: list) -> str:
"""
Generates a string from list seperated by newlines.
"""
return "\n".join(optlist)
|
6bcb1e601973d12d66c1d28f86a8eb4743408a4d
| 30,216
|
import time
def _is_active(log_stream: dict, cut_off: int) -> bool:
"""
Determine if the given stream is still active and worth tailing.
:param log_stream:
A dictionary returned by `describe_log_streams` describing the
log_stream under consideration.
:param cut_off:
The number of seconds to wait before calling a log_stream inactive.
If the stream does not have a `lastIngestionTime` more recent than
`cut_off` seconds ago it will be considered not active.
:return:
Whether the given log stream is active.
"""
last_ingest = log_stream["lastIngestionTime"] / 1000
return time.time() - last_ingest < cut_off
|
210dcd5a965dcf2535e55af694e11f17918666e6
| 30,217
|
def box_df_broadcast_failure(request):
"""
Fixture equivalent to `box` but with the common failing case where
the DataFrame operation tries to broadcast incorrectly.
"""
return request.param
|
3fc1105f740b949464958af13cc5b025a97421fe
| 30,219
|
def find_edges(silhouette):
"""Returns a map for each row in the table that has an edge with a filled out portion on the upper side and empty space on the lower side."""
row_map = [False] * silhouette.shape[0]
#go thru every row of the silhouette
for y in range(silhouette.shape[0]):
for x in range(silhouette.shape[1]):
if silhouette[y,x] == 0 and silhouette[y-1,x] != 0:
row_map[y] = True
break
row_map[0] = True #always offer the top edge as potential decomposition
return row_map
|
b9fbf100badf584330ec4a895a1d5d96a7f40858
| 30,220
|
def _check_regex(regex, value):
"""Return true if *value* matches regex."""
try:
return regex.search(value) is not None
except TypeError:
if value is regex:
return True # <- EXIT!
value_repr = repr(value)
if len(value_repr) > 45:
value_repr = value_repr[:42] + '...'
msg = 'expected string or bytes-like object, got {0}: {1}'
exc = TypeError(msg.format(value.__class__.__name__, value_repr))
exc.__cause__ = None
raise exc
|
62fa0f3d3b346cad03f3565cc6a03795c621cdbb
| 30,221
|
import re
def mountpoint_dataset(mountpoint: str):
"""
Check if dataset is a 'zfs' mount.
return dataset, or None if not found
"""
target = re.compile(r'^.*\s+' + mountpoint + r'\s+zfs\b')
with open("/proc/mounts") as f:
mount = next((ds for ds in f.read().splitlines() if target.search(ds)), None)
return None if mount is None else mount.split()[0]
|
b549a94915b0089fcb3267b6273bde6f3243bd65
| 30,223
|
def _add_necessary_columns(args, custom_columns):
"""
Convenience function to tack on columns that are necessary for
the functionality of the tool but yet have not been specifically
requested by the user.
"""
# we need to add the variant's chrom, start and gene if
# not already there.
if custom_columns.find("gene") < 0:
custom_columns += ", gene"
if custom_columns.find("start") < 0:
custom_columns += ", start"
if custom_columns.find("alt") < 0:
custom_columns += ", alt"
return custom_columns
|
931b7ed9b181462220e665e6fb895bb7a1a835dc
| 30,224
|
import random
def next_img_pair_to_grow_reconstruction(n_imgs, init_pair, resected_imgs, unresected_imgs, img_adjacency):
"""
Given initial image pair, resect images between the initial ones, then extend reconstruction in both directions.
:param n_imgs: Number of images to be used in reconstruction
:param init_pair: tuple of indicies of images used to initialize reconstruction
:param resected_imgs: List of indices of resected images
:param unresected_imgs: List of indices of unresected images
:param img_adjacency: Matrix with value at indices i and j = 1 if images have matches, else 0
"""
if len(unresected_imgs) == 0: raise ValueError('Should not check next image to resect if all have been resected already!')
straddle = False
if init_pair[1] - init_pair[0] > n_imgs/2 : straddle = True #initial pair straddles "end" of the circle (ie if init pair is idxs (0, 49) for 50 images)
init_arc = init_pair[1] - init_pair[0] + 1 # Number of images between and including initial pair
#fill in images between initial pair
if len(resected_imgs) < init_arc:
if straddle == False: idx = resected_imgs[-2] + 1
else: idx = resected_imgs[-1] + 1
while True:
if idx not in resected_imgs:
prepend = True
unresected_idx = idx
resected_idx = random.choice(resected_imgs)
return resected_idx, unresected_idx, prepend
idx = idx + 1 % n_imgs
extensions = len(resected_imgs) - init_arc # How many images have been resected after the initial arc
if straddle == True: #smaller init_idx should be increased and larger decreased
if extensions % 2 == 0:
unresected_idx = (init_pair[0] + int(extensions/2) + 1) % n_imgs
resected_idx = (unresected_idx - 1) % n_imgs
else:
unresected_idx = (init_pair[1] - int(extensions/2) - 1) % n_imgs
resected_idx = (unresected_idx + 1) % n_imgs
else:
if extensions % 2 == 0:
unresected_idx = (init_pair[1] + int(extensions/2) + 1) % n_imgs
resected_idx = (unresected_idx - 1) % n_imgs
else:
unresected_idx = (init_pair[0] - int(extensions/2) - 1) % n_imgs
resected_idx = (unresected_idx + 1) % n_imgs
prepend = False
return resected_idx, unresected_idx, prepend
|
3d05076f42bd8439a5ee0af662709a3d7b819922
| 30,225
|
def select_attr(obj, name):
"""Prepares data for set_attr"""
return {'object': obj, 'name': name}
|
d32116d3ae28455dc463ed4cc016de5e8c2434fc
| 30,227
|
def get_part_of_speech(s):
"""
Detect part-of-speech encoding from an entity string, if present.
:param s: Entity string
:return: part-of-speech encoding
"""
tokens = s.split("/")
if len(tokens) <= 4:
return ""
pos_enc = tokens[4]
if pos_enc == "n" or pos_enc == "a" or pos_enc == "v" or pos_enc == "r":
return pos_enc
return ""
|
8498e4ff112bad0946b1f66b466007d9049b5e2f
| 30,228
|
def get_pam_and_tool_from_filename(score_filename):
"""Extracts pam/tool info from a score filename
Args:
score_filename: specific score file name (including .txt)
Returns:
list of pam, pam_tool
Example:
Example score file: 'aggt_Chimera.txt'
output: ['aggt', 'Chimera']
"""
pam, pam_tool_txt = score_filename.split('_')
pam_tool = pam_tool_txt.split('.')[0]
return pam, pam_tool
|
319c211d98591ab1702689e08d15cb5025ca7a6c
| 30,229
|
from typing import Dict
from typing import Union
def parse_websocket_action(data: Dict[str, str]) -> Union[str, None]:
"""
Returns the websocket action from a given decoded websocket message or None if the action
doesn't exist or isn't a valid string.
"""
if isinstance(data, Dict):
websocket_action = data.get('action', None)
if websocket_action and isinstance(websocket_action, str):
return websocket_action
return None
|
7a7a2136c0f5809e51641ba10839070152b2df98
| 30,230
|
def get_object(conn, key, error='object not found', version_id=None):
""" Gets an object from s3 """
def helper():
try:
if version_id is None: return conn['client'].get_object(Bucket=conn['bucket'], Key=key)
else: return conn['client'].get_object(Bucket=conn['bucket'], Key=key, VersionId=version_id)
except conn['client'].exceptions.NoSuchKey:
raise ValueError(error)
k = helper()
return {'key' : key,
'version_id' : k['VersionId'],
'body' : k['Body'],
'content_length' : k['ContentLength'],
'content_type' : k['ContentType'],
'metadata' : k['Metadata'],
'last_modified' : k['LastModified']}
|
506dfdf11c98d9cd6cf6d7d6938c71b4b6d4ae5c
| 30,231
|
def containedStructure(containee, container):
"""
Checks weather the structure of the given containee is contained in the
given container. This is similar to sameStructure with the difference that
only a subset has to be matching.
To the structure counts the type of the value except None which matches all
Types
:param containee: The containee which structure has to be contained in
the container.
:param container: The container that has to contain the structure of the
containee
:returns: True if the structure of the containee is contained in the
container,
False otherwise
"""
if isinstance(containee, dict):
if isinstance(container, dict):
# then we have shapes to check
return (set(containee.keys()).issubset(set(container.keys())) and
# so the keys are all the same
all(containedStructure(containee[k], container[k])
for k in containee.keys()))
# thus all values will be tested in the same way.
else:
return False # containee is a dict, but container isn't
elif isinstance(containee, list):
if isinstance(container, list):
return set(containee).issubset(set(container))
else:
return False # containee is a lit, but container isn't
elif containee is None:
return True
else:
# else they have to be the same type
return isinstance(container, type(containee))
|
9d883f3fb8937a31593a335ab9161fcdfcc156fc
| 30,232
|
import math
def add_buffer_to_label(sparsity, label_windows, min_label, max_label, window_scale_limit=2, max_sparsity=0.01):
"""
Add buffers to windows to produce the desired anomaly sparsity. If the current sparsity is greater
than the max_sparsity, then this function does not do anything. This will not expand windows beyond
window_scale_limit * (current window size). Merge any overlapping windows.
@param sparsity (float): The current sparsity (total length of label windows)/(length of timeseries)
@param label_windows (list): List of (start, end) indices. Should be sorted by start index.
@param min_label (int): Don't allow windows to expand before this index
@param max_label (int): Don't allow windows to expand after this index
@param window_scale_limit (float): The largest amount the windows to be expanded by. For example, for
window_scale_limit=2, the new windows will be at most
2 * (current window size).
@param max_sparsity (int): The max sparsity of the window after increasing the window size.
"""
if sparsity == 0:
return label_windows
new_windows = []
expand_amount = min(max_sparsity/sparsity, window_scale_limit) - 1
if expand_amount > 0:
for i in range(len(label_windows)):
start = label_windows[i][0]
end = label_windows[i][1]
buff_amount = math.ceil(expand_amount/2 * (end - start+1))
new_start = max(min_label, start - buff_amount)
new_end = min(max_label, end + buff_amount)
# Merge overlapping windows. Overlapping windows do not happen frequently and
# usually only overlap with adjacent window.
while len(new_windows) > 0 and new_windows[-1][1] >= new_start-1:
new_start = min(new_windows[-1][0], new_start)
new_end = max(new_windows[-1][1], new_end)
del new_windows[-1]
new_windows.append((new_start, new_end))
else:
new_windows = label_windows
return new_windows
|
856a5c76936766bbf9b0ae519234329f9b3aa4a7
| 30,233
|
import fnmatch
def filter_repos(config, repo_dir=None, vcs_url=None, name=None):
"""Return a :py:obj:`list` list of repos from (expanded) config file.
repo_dir, vcs_url and name all support fnmatch.
Parameters
----------
config : dist
the expanded repo config in :py:class:`dict` format.
repo_dir : str, Optional
directory of checkout location, fnmatch pattern supported
vcs_url : str, Optional
url of vcs remote, fn match pattern supported
name : str, Optional
project name, fnmatch pattern supported
Returns
-------
list :
Repos
"""
repo_list = []
if repo_dir:
repo_list.extend(
[r for r in config if fnmatch.fnmatch(r['parent_dir'], repo_dir)]
)
if vcs_url:
repo_list.extend(
r for r in config if fnmatch.fnmatch(r.get('url', r.get('repo')), vcs_url)
)
if name:
repo_list.extend([r for r in config if fnmatch.fnmatch(r.get('name'), name)])
return repo_list
|
587d81e0292f785fd8a4a4511487752a1d5ebfe7
| 30,234
|
def extend_smallest_list(a, b, extension_val=None):
"""Extend the smallest list to match the length of the longest list.
If extension_val is None, the extension is done by repeating the last element of the list. Otherwise, use
extension_val.
Arg(s):
a - A list.
b - A list.
extension_val - Extension value.
Returns:
The input lists with the smallest list extended to match the size of the longest list.
"""
gap = abs(len(a) - len(b))
if len(a) > len(b):
extension_val = extension_val if extension_val is not None else b[-1]
b += [extension_val] * gap
else:
extension_val = extension_val if extension_val is not None else a[-1]
a += [extension_val] * gap
return a, b
|
2d80690073cbf258ac918b1677b6cd0b7313d17d
| 30,235
|
import os
import json
def get_canned_json(name_of_file):
""" Read canned Grid request from file """
location = os.path.join(os.path.dirname(__file__), "test_files", name_of_file)
try:
with open(location, "r") as grid_file:
data = json.load(grid_file)
return data
except FileNotFoundError:
raise FileNotFoundError("json file not found")
|
e439d4ecede15d4cc354393b6b59e444724d459d
| 30,236
|
def doc(l, b, i):
"""
!d Return the main docstring of a module
!a <module>
!r developer
"""
for m in l.get_modules():
if i.args[0] == m.__name__:
if m.__doc__ is not None:
for line in m.__doc__.split('\n'):
b.l_say(line, i, 0)
else:
b.l_say('No doc found for that module.', i, 0)
return True
else:
if 'modules.' in i.args[0]:
return b.l_say('No doc found for that module.', i, 0)
return b.l_say('No doc found for that module. '
'Maybe try modules.%s?' % i.args[0],
i, 0)
|
8565dc75d9d78eafb2892213dbef20df3defe39f
| 30,237
|
import os
def list_dirs(path, suffix):
"""
Recursively list all dirs under path with given suffix
"""
dirs = list()
for p in os.listdir(path):
p = os.path.join(path, p)
if os.path.isdir(p):
if p.endswith(suffix):
dirs.append(p)
else:
dirs += list_dirs(p, suffix)
return dirs
|
086d7987167d7d5a77ff51e7f62a334897bbc22d
| 30,238
|
import sys
from pathlib import Path
def get_config_directory():
"""Returns the Dakara config directory to use for the current OS.
Returns:
path.Path: Path of the Dakara config directory. Value is not expanded,
so you have to call `.expand()` on the return value.
"""
if "linux" in sys.platform:
return Path("~") / ".config" / "dakara"
if "win" in sys.platform:
return Path("$APPDATA") / "Dakara"
raise NotImplementedError(
"This operating system ({}) is not currently supported".format(sys.platform)
)
|
0c1a6e3f388ff50385406719b4842b9a13d9df1a
| 30,239
|
def to_celsius(temp):
"""Convert temperature measured on the Fahrenheit scale to Celsius.
Parameters:
temp (int): temperature value to convert
Returns
float: temperature value converted to Celsius
"""
return round((int(temp) - 32) * .5556, 3)
|
da86d46794ef1e8d4d34ae2b23a48488b75643a0
| 30,240
|
def ReadToken(stream, binary, remove_tail_space = True):
"""Read string token from input stream.
Args:
stream: An opened KaldiInputStream.
binary: If the input stream is in binary.
remove_tail_space: Whether to remove the tailing space.
Returns:
A string read from the input stream.
"""
res = []
if not binary :
while True:
c = stream.Peek(1)
if not c:
return ''.join(res)
if c == ' ' or c == '\n':
stream.Read(1)
else:
break
while True:
c = stream.Peek(1)
if not c:
return ''.join(res)
if c == ' ' or c == '\n':
break
else:
res.append(c)
stream.Read(1)
if remove_tail_space:
if stream.Peek(1) == ' ':
stream.Read(1)
return ''.join(res)
|
11b11416abb6aeb548ac4c3cecf9b7376217b4fe
| 30,241
|
def readable_bool(b):
"""Takes a boolean variable and returns "yes" for true and "no" for false
as a string object.
:param b: boolean variable to use
"""
if b:
return "yes"
else:
return "no"
|
dc30a0dda31943538283b1f18dd6351ca0613614
| 30,243
|
import re
def is_url(_str):
"""return true if the str input is a URL."""
ur = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', _str)
return len(ur) > 0
|
7a298424c2f39f62a9d4197a7f822806e68e424f
| 30,244
|
import torch
def log_sum_exp(x):
"""
:param x: Tensor. shape = (batch_size, num_mixtures, height, width)
:return:
"""
m2 = torch.max(x, dim=1, keepdim=True)[0]
m = m2.unsqueeze(1)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=1))
|
5f5c30070458727b032219bbc8158d88f607b74b
| 30,245
|
from typing import Union
from typing import List
def common_missing_fills(variable_type: str) -> Union[List[int], List[str]]:
"""Return a list of common missing stand-ins.
Returns
-------
List[int]
Parameters
----------
variable_type : str
What type of variable to return the missing stand-ins for, either
"numeric" or "string".
Returns
-------
list
list of strings if variable_type is "string" and of ints if
variable_type is "numeric".
"""
if variable_type == "numeric":
common_missing_fill_vals_numeric = [-1]
# repetitions of "9"s
mc_cains = [int("9" * i) for i in range(2, 5)]
mc_cains = mc_cains + [-i for i in mc_cains]
common_missing_fill_vals_numeric = common_missing_fill_vals_numeric + mc_cains
return common_missing_fill_vals_numeric
elif variable_type == "string":
common_missing_fill_vals_str = ["NA", "missing", "N/A", "NaN"]
return common_missing_fill_vals_str
else:
raise NotImplementedError
|
1e3e189c7d2fd2895028ca5be59e3ada3a90a688
| 30,246
|
def _tc_enc(value: str, code: int = 32) -> str:
"""Encode error message values with terminal colors.
See:
- <https://i.stack.imgur.com/9UVnC.png>
- <https://stackoverflow.com/a/61273717>
For Python's somewhat-poorly-documented encodings:
<https://docs.python.org/3/library/codecs.html#text-encodings>
Some color options:
- 30: black
- 31: red
- 32: green
- 33: yellow
- 34: blue
- 35: magenta
- 36: cyan
- 37: white
"""
return f"\u001b[{code}m{value}\u001b[0m"
|
dfa56f27aa3f6dda51fbeb4cfabe70e25b19f3da
| 30,247
|
def list_reduce(fn, lst, dftl):
"""Implementation of list_reduce."""
res = dftl
i = 0
while i < len(lst):
res = fn(res, lst[i])
i = i + 1
return res
|
8069bb4a5615a3e618532e2d85ccae6ad90a2909
| 30,248
|
def _MonkeypatchTokenizerHelpers(module, func_names):
"""Replaces preprocessor functions.
Pygments defines some functions that are called when a lexer is constructed,
returning opaque objects that we can't inspect. We must replace those objects
with ones that simply note the name of the function called.
For example, instead of calling bygroups (which returns a matcher function),
we must note that bygroups is called at that point.
Args:
module: the module to patch.
func_names: the list of function names in that module to replace.
"""
def _MakeRecorder(func_name):
# Ignore the keyword args, which we don't support.
return lambda *args, **kwargs: (func_name, args)
for func_name in func_names:
getattr(module, func_name) # First make sure the function exists.
setattr(module, func_name, _MakeRecorder(func_name))
|
28f8adb98bce961be8d07e371d6f18b461d88293
| 30,249
|
def jwt_claim(request):
"""
Holds parametrized information about client_ids and response codes they
should return
"""
return request.param
|
65d95086b7f183b56190431bae197f9f12fb07d8
| 30,250
|
def obtain_ECG(tensec_data):
""" obtain ECG values of ten second data
:param tensec_data: 10 seconds worth of heart rate data points
:return ECGData: ECG unmultiplexed data
"""
ECGData = tensec_data[1::2]
return ECGData
|
ce63b58435b67b6995d19e04a64bcc24d9687cd5
| 30,252
|
def get_nic(parent, host, port, community):
""" This is a plugin to be loaded by Ecks3
Info on the data returned is available at http://www.oidview.com/mibs/0/IF-MIB.html
return an array of tuples containing (name, type, mtu, speed, mac, admin_status, oper_status,
last_changed, in_bytes, in_u_pkts, in_nu_pkts, in_discard, in_err, in_unknown,
out_bytes, out_u_pkts, out_nu_pkts, out_discard, out_err, out_queue) for each ethernet interface
The type return value is an integer that can be looked up at http://www.iana.org/assignments/ianaiftype-mib
Most common type is 6 (ethernet)
"""
oid = (1, 3, 6, 1, 2, 1, 2, 2, 1) # IF-MIB::ifEntry
data = parent.get_snmp_data(host, port, community, oid, 1)
return list(
map(
parent._build_answer,
parent._extract(data, str, 2), # Name
parent._extract(data, int, 3), # ifType
parent._extract(data, int, 4), # MTU
parent._extract(data, int, 5), # Speed
parent._extract(data, str, 6), # MAC
parent._extract(data, int, 7), # Admin Status
parent._extract(data, int, 8), # Oper Status
parent._extract(data, int, 9), # Last Change
parent._extract(data, int, 10), # In Bytes
parent._extract(data, int, 11), # In Unicast Pkts
parent._extract(data, int, 12), # In Not Unicast Pkts
parent._extract(data, int, 13), # In Discards
parent._extract(data, int, 14), # In Errors
parent._extract(data, int, 15), # In Unknown Protocols
parent._extract(data, int, 16), # Out Bytes
parent._extract(data, int, 17), # Out Unicast Pkts
parent._extract(data, int, 18), # Out Not Unicast Pkts
parent._extract(data, int, 19), # Out Discards
parent._extract(data, int, 20), # Out Errors
parent._extract(data, int, 21), # Out Queue Length
)
)
|
26941d04ed030d305969ecb021bf8e71182640a5
| 30,253
|
def is_int(s: str) -> bool:
"""Returns True iff the field can be interpreted as an int.
Valid value examples: 3, 3.0, "3", "3.0"
"""
try:
num = float(s)
except (ValueError, TypeError) as e:
return False
# Handle NaN
if num != num:
return False
return num == round(num)
|
f1ef8efd9134bf87862fdcb2d97c7fffa010bdee
| 30,255
|
def generate_tissue_box(image_file,bounding_box):
"""cut small piece of tissue from image opened in PIL"""
imc = image_file.crop(bounding_box)
return imc
|
95b66ffd10b306cc7b3939226f96678d813b6518
| 30,257
|
def tert_to_offset_t_j2000(tert):
"""地球時を基にしてJ2000元期からの經過日數を求める."""
return tert.terrestrial_time - 2451545.0
|
1ce628b5271d3b6eec7482153266de4e5f6b4e91
| 30,258
|
import ast
def str2dict(d_s):
"""Convert string to dictionary
:d_s: Dictionary string
:returns: Evaluated dictionary
"""
return ast.literal_eval(d_s)
|
959da3c3197b5f8338cc33a7d9998303c50dd424
| 30,260
|
def project_08_largest_product(count):
""" Problem 8: Find the largest product of n numbers in a hardcoded series..
Args:
count (int): The number of adjacent numbers to determine product for.
"""
def product(sequence):
if 0 in sequence:
return 0
else:
product = 1
for term in sequence:
product = int(int(product) * int(term))
return product
series = '73167176531330624919225119674426574742355349194934' \
'96983520312774506326239578318016984801869478851843' \
'85861560789112949495459501737958331952853208805511' \
'12540698747158523863050715693290963295227443043557' \
'66896648950445244523161731856403098711121722383113' \
'62229893423380308135336276614282806444486645238749' \
'30358907296290491560440772390713810515859307960866' \
'70172427121883998797908792274921901699720888093776' \
'65727333001053367881220235421809751254540594752243' \
'52584907711670556013604839586446706324415722155397' \
'53697817977846174064955149290862569321978468622482' \
'83972241375657056057490261407972968652414535100474' \
'82166370484403199890008895243450658541227588666881' \
'16427171479924442928230863465674813919123162824586' \
'17866458359124566529476545682848912883142607690042' \
'24219022671055626321111109370544217506941658960408' \
'07198403850962455444362981230987879927244284909188' \
'84580156166097919133875499200524063689912560717606' \
'05886116467109405077541002256983155200055935729725' \
'71636269561882670428252483600823257530420752963450'
max_terms = list(map(int, series[0:count]))
max_product = product(max_terms)
for start_index in range(1, len(series)-count-1, 1):
terms = list(map(int, series[start_index:start_index+count]))
term_product = product(terms)
if term_product > max_product:
max_terms = terms
max_product = term_product
return max_product
|
49d7e5d5d2b90bc22b07d9af7863b8367d16501d
| 30,262
|
def load_input_segs(cand_segs, ref_segs, src_segs=None):
"""
Load input files specified in the CL arguments into memory.
Returns a list of 5-tuples: (segment_id, origin, src_segment,
candidate_segment, reference_segment)
"origin" is always None (present for compatibility with other modules handling sdlxliff files).
"src_segment" is None if the source file was not passed on the CL.
"""
# src file is optional
src_segs = src_segs or [None] * len(cand_segs)
assert len(src_segs) == len(cand_segs) == len(ref_segs)
return [(i, None, src.strip() if src else src, cand.strip(), ref.strip())
for i, (src, cand, ref)
in enumerate(zip(src_segs, cand_segs, ref_segs), 1)]
|
e6d8f6b92d00603dc7c06ca1484bf86ebea84273
| 30,263
|
from typing import Union
def fibonacci_k_n_term(n: int, k: int) -> Union[int, NotImplementedError]:
"""
Returns the nth fibonacci_k number.
Where F_{k,n+1} = k*F_{k,n} + F_{k,n−1} for n ≥ 1
"""
if n < 0:
return NotImplementedError('negative n is not implemented')
if n in [0, 1]:
return n
root = (k+(k**2 + 4)**0.5) / 2
return round((root**n - (-root)**(-n)) / (root + 1/root))
|
135e130f63e0ab9317eb145f21e55268412973da
| 30,264
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.