content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def StopW_Punct():
""" Function creates a list containing strings of punctuation to use as stop words """
punctList = ["!",'"',"#","$","%","&","'","(",")","*","+",",","-",".","/",":",";","<","=",">","?","@","[","{","|","}","~","^","_","]","`"]
return punctList | 959db27f4db886a77572b44d0e5fc0503c711ac1 | 35,933 |
def baskets(items, count):
""" Place list itmes in list with given basket count.
Original order is not preserved.
Example:
> baskets([1,2,3,4,5,6,7,8, 9, 10], 3)
[[1, 4, 7, 10], [2, 5, 8], [3, 6, 9]]
"""
_baskets = [[] for _ in range(count)]
for i, item in enumerate(items):
_baskets[i % count].append(item)
return list(filter(None, _baskets)) | dabf82b5229595276fd6dc2cf6bdb7c104d9867a | 35,934 |
def dates(df, params, field='dates'):
"""
Return an inclusive sliced portion of the input data frame based on a min and max date
Args:
df (pd.DataFrame): The input data
params (Tuple[dt.datetime, dt.datetime]): Dates, must be in order of MIN, MAX
field (str): The date field used to find matching values
Returns:
pd.DataFrame
"""
_min, _max = params
return df[(df[field] >= _min) & (df[field] <= _max)].reset_index(drop=True) | 4bf52d35d4c9d6601edea6eeb621163d318dd975 | 35,935 |
def tox_configure(config):
"""Stores options in the config. Makes all commands external"""
if config.option.print_deps_only:
config.skipsdist = True
elif config.option.current_env:
config.option.recreate = True
config.skipsdist = True
for testenv in config.envconfigs:
config.envconfigs[testenv].whitelist_externals = "*"
return config | a4f6737ec810c3905bec031b116c2a999bdf95a2 | 35,936 |
def get_hyperparams_wd(optimizer_def):
"""
returns the weight decay stored in the hyperparameters
"""
return optimizer_def.hyper_params.weight_decay | f9a097e3d6c9a8963bc6cf6a8a96c334551c730b | 35,937 |
import os
def exists(file):
""" Check if a specified file exists"""
if not os.path.exists(file):
return False
else:
return True | c0b125f5a2a4d3f6a31f25ae1822ca5b60df06d8 | 35,938 |
import os
def getScriptPath(design):
"""
get the scripts directory
"""
return os.path.join(design.result_dir, design.top_name, design.lib_name, "scripts/") | 6c82a10e32a4efffce254a9267c2ece8df68622f | 35,939 |
import os
def select_dir(*dirs):
""" Given a series of directories, select the first one that exists.
This helps to write code that works for multiple users.
"""
for dir in dirs:
if os.path.isdir(dir):
return dir
else:
raise RuntimeError('None of the given dirs exists.') | 928450bbf9d1bd434ba145b2843e9e6084117650 | 35,941 |
def sobloo_opensearch_params(latitude, longitude, max_cloud_cover):
"""Build Sobloo open search query string to get the latest Sentinel-2 image
with minimal cloud cover at the requested location
Arguments:
latitude {float} -- latitude of search point
longitude {float} -- longitude of search point
max_cloud_cover {float} -- max cloud cover percentage (0 to 1)
Returns:
dict -- parameters of GET request to Sobloo Opensearch endpoint
"""
# Create a small polygon around the search point
polygon = [
(longitude - 0.001, latitude - 0.001),
(longitude + 0.001, latitude - 0.001),
(longitude + 0.001, latitude + 0.001),
(longitude - 0.001, latitude + 0.001),
(longitude - 0.001, latitude - 0.001)
]
# To WKT
polygon = 'POLYGON ((%s))' % ', '.join(['%s %s' % p for p in polygon])
# Querying the latest Sentinel 2 image for our polygon
params = {
'f': [
'state.services.wmts:eq:true',
'identification.collection:eq:Sentinel-2',
'contentDescription.cloudCoverPercentage:lt:%i' % int(
max_cloud_cover * 100)
],
'sort': '-timeStamp',
'size': 1,
'gintersect': polygon
}
return params | 1ee722c56e42c6594dd3476825af345a78481c1b | 35,943 |
from pathlib import Path
def data_file(path):
"""
Get a path to a file in the local 'data' directory.
"""
my_location = Path(__file__).parent
return str(my_location / 'data' / path) | 89b41133878c0b22ae7c1f3ee65cae20a96f58f9 | 35,945 |
def _map_boolean_to_human_readable(boolean, resource, token):
"""
Map a boolean into a human readable representation (Yes/No).
:param boolean: boolean with the value that we want to transform
:param resource: resource containing all the values and keys
:param token: user token
"""
if boolean:
return 'Yes'
else:
return 'No' | 380a9ff38cc5999a9e062b2487a7e54158c02a69 | 35,946 |
def clean_message(message):
"""(str) -> str
Return a copy of the message containing only its alphabetic
characters, in uppercase
>>>clean_message('Hello there')
'HELLOTHERE'
>>>clean_message('yH*&23 ?')
'YH'
"""
cleaned_message = ''
for char in message.upper():
if char.isalpha():
cleaned_message += char
# Only the alphabetic characters are returned
return cleaned_message | f211b53fcd9a52b8d59792e61cd403bd6de0741e | 35,947 |
def lower(value):
"""Convert value to lowercase if possible.
For example::
>>> print(lower('HELLO'))
hello
>>> print(lower(5))
5
"""
try:
return value.lower()
except AttributeError:
return value | c467743664787a8cbd10836ffb61d812f4b6fb5c | 35,948 |
import struct
def read_sprite_info_(rom, column_count_offset, sprite_data_offset):
"""Reads sprite offsets and column counts from the rom.
SNA3D-style, simpler.
"""
rom.seek(column_count_offset)
sprites = []
while True:
line_count = rom.read_ushort()
if line_count == 0:
break
sprites.append({
'column_count': line_count,
'offset': sprite_data_offset + struct.unpack('<I', rom.read(4))[0],
})
return sprites | e2c8f0fe0f044321d85d3026026ae763067e4aee | 35,949 |
def json_path_components(path):
"""Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
"""
if isinstance(path, str):
path = path.split(".")
return list(path) | 0b9a1c7e68b368b04616d7a29254bd4509673d51 | 35,950 |
import re
def strip(data):
""" returns a python string, with chopped off quotes,
and replaced escape characters"""
return ''.join([w.encode('utf-8').decode('unicode_escape')
if '\\' in w
else w
for w in re.split('([\000-\200]+)', data[1:-1])]) | 71247cbba9ab00d448b259b121325d24e77b8beb | 35,951 |
def find_kth(nums1, nums2, k):
"""find kth number of two sorted arrays
>>> find_kth([1, 3], [2], 2)
2
>>> find_kth([2], [1, 3], 1)
1
>>> find_kth([1, 3], [2], 3)
3
>>> find_kth([1], [2], 1)
1
>>> find_kth([1], [2], 2)
2
"""
# assume len(nums1) <= len(nums2)
if len(nums2) < len(nums1):
nums1, nums2 = nums2, nums1
# if nums1 is empty
if not nums1:
return nums2[k - 1]
if k == 1:
return min(nums1[0], nums2[0])
# divide and conquer
if len(nums1) < k // 2:
return find_kth(nums1, nums2[k - k // 2:], k // 2)
elif nums1[k // 2 - 1] == nums2[k - k // 2 - 1]:
return nums1[k // 2 - 1]
elif nums1[k // 2 - 1] < nums2[k - k // 2 - 1]:
return find_kth(nums1[k // 2:], nums2[:k - k // 2], k - k // 2)
else:
return find_kth(nums1[:k // 2], nums2[k - k // 2:], k // 2) | 8471f79de2d388ff942482f758076a5ebe39164c | 35,952 |
def _vimdiff(filepath, local_file_path, remote_file_path):
"""
Tried for a ludicrous amount of time to get it to open vimdiff automagically.
Instead we settled on just letting user know what command they should run.
"""
command = "vimdiff -f -d -c 'wincmd J' {merged} {local} {remote}".format(
merged=filepath, local=local_file_path, remote=remote_file_path)
return '''
~*Currently under development*~
To open the diff use this command:
$> {}'''.format(command) | d3b17aa9f85fd066e9dcd85c3d68752df59615d1 | 35,953 |
def _dataset_transform_first_fn(x):
"""Named transform function since lambda function cannot be pickled."""
return x | ac98761f2f5c32ca867be0fd8539ce3be741773c | 35,954 |
def get_menu_html(menu_data):
"""显示:菜单 + [子菜单] + 权限(url)"""
option_str = """
<div class='rbac-menu-item'>
<div class='rbac-menu-header'>{menu_title}</div>
<div class='rbac-menu-body {active}'>{sub_menu}</div>
</div>
"""
url_str = """
<a href="{permission_url}" class="{active}">{permission_title}</a>
"""
"""
menu_data = [
{'id': 1, 'title': '订单管理', 'parent_id': None, 'status': True, 'open': False,
'children': [{'title': '查看订单', 'url': '/order', 'menu_id': 1, 'status': True, 'open': False}]},
{'id': 2, 'title': '库存管理', 'parent_id': None, 'status': True, 'open': True,
'children': [{'title': '查看库存清单', 'url': '/stock/detail', 'menu_id': 2, 'status': True, 'open': False},
{'title': '入库', 'url': '/stock/in', 'menu_id': 2, 'status': True, 'open': True}]},
{'id': 3, 'title': '生产管理', 'parent_id': None, 'status': True, 'open': False,
'children': [{'title': '查看生产订单', 'url': '/produce/detail', 'menu_id': 3, 'status': True, 'open': False},
{'title': '排单', 'url': '/produce/new', 'menu_id': 3, 'status': True, 'open': False}]},
{'id': 4, 'title': '生产调查', 'parent_id': None, 'status': True, 'open': False,
'children': [{'title': '产出管理', 'url': '/survey/produce', 'menu_id': 4, 'status': True, 'open': False},
{'title': '工时管理', 'url': '/survey/labor', 'menu_id': 4, 'status': True, 'open': False}]}
]
"""
menu_html = ''
for item in menu_data:
if not item['status']: # 如果用户权限不在某个菜单下,即item['status']=False, 不显示
continue
else:
if item.get('url'): # 说明循环到了菜单最里层的url
menu_html += url_str.format(permission_url=item['url'],
active="rbac-active" if item['open'] else "",
permission_title=item['title'])
else:
menu_html += option_str.format(menu_title=item['title'],
sub_menu=get_menu_html(item['children']),
active="" if item['open'] else "rbac-hide")
return menu_html | 24ded42649cb3fc20d5a45f02453cbfa9a6db34a | 35,956 |
def inertia_update(iteration,n_iterations,wmin,wmax):
"""
Time varying acceleration inertia:
w^k = wmax - (wmax - wmin)/kmax * k
Parameters:
iteration: int
The number of the iteration
n_iterations: int
The number of total iterations
wmin: float
The minimum value of the itertia weight
wmax: float
The maximum value of the inertia weight
Returns: float
The new intertia weight value
"""
W=wmax-((wmax-wmin)/n_iterations)*iteration
return W | 20831f2eeac8cdb269674188704a0944469f66d1 | 35,957 |
def add_p(msg):
"""
returns message wrapped in paragraph tags and adds new lines
:input:
message (string)
"""
return '<p>{}</p>\n\n'.format(msg) | c9b5e39b852285ef6b0d8340227ada1eabd32e7a | 35,958 |
def crop_scores(player):
"""Select specific only parts of the full score object
Args:
player (dict): player object from which to read the score
Returns:
score (dict): filtered scores from the game data
"""
score = {}
# if there is no active username, nobody finished, but the
# last seen player might still get a valid rank
if player['username'] in ['dead', 'open']:
score['finished'] = 0
score['rank'] = player['finishrank']
return score
keys_wanted = [
'capitalships',
'freighters',
'planets',
'starbases',
'militaryscore',
'percent',
]
score = {k: player['score'].get(k, None) for k in keys_wanted}
score['finished'] = 1
score['rank'] = player['finishrank']
return score | 5466cf39b59ce4b49b7da3ff2c355b2d7b46455c | 35,959 |
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
# Handle non-integer inputs
try:
int(number)
except ValueError:
return None
# Handle negative integer
if number < 0:
return None
# Handle zero value
if number == 0:
return 0
# Use divide and conquer approach
# - Find the middle value and check it's square value
# - if larger, then search from the left half, else search from the right half
left_value = 1
right_value = number
# Continue the loop as long as right value larger then left value + 1
while right_value > left_value + 1:
mid_value = (left_value + right_value) // 2
# Check the square value of the middle value
mid_value_square = mid_value * mid_value
if mid_value_square > number:
right_value = mid_value
elif mid_value_square < number:
left_value = mid_value
else:
return mid_value
# If no exact match, return the left value which is the floor value
return left_value | d129c89c0d158b39a173cbf22a9288913a0bb7bd | 35,960 |
def get_header_item_group(header, group):
"""
Filter header and return list of items of a specific header
group (e.g. 'CTYPE').
Return empty list when unable to find @group in @_header.items().
Parameters
----------
header: dictonary
The dictonary that contains the header of the SST cube file.
group: string
Keyword you want to find.
Returns
-------
list: list
List that contains the matched group else it is empty.
"""
return [i for i in header.items() if not i[0].find(group) and not i[0] == group] | 5417756a05186fa17f67b83e520d8b035a43a21c | 35,961 |
def title(title):
""" Generate reST title directive.
:Examples:
>>> section('Page title')
'''
==========
Page title
==========
<BLANKLINE>
'''
"""
title_len = len(title)
return '\n'.join((
'',
'=' * title_len,
title,
'=' * title_len,
'',
)) | 86bd46cc28ad704d9f90844511574a912f6df9a5 | 35,964 |
from typing import Sequence
from typing import List
def _chord_options_upper_extensions(semitones: Sequence[int], _rec: int) -> List[int]:
"""
Returns possible names for further extensions
"""
return [] | 83cd37aed1a142f1b3101dd337c9fa70b59a43c9 | 35,965 |
from functools import reduce
def concat_cols(df, cols, delim):
"""
Concatenate columns in a dataframe with a
delimiter.
Args:
df (DataFrame): input DataFrame
cols (list-like): columns to concatenate
delimiter (str): delimiter to join column values
Returns:
Series with concatenated columns.
"""
cols_str = [df[x].astype(str) for x in cols]
return reduce(lambda a, b: a + delim + b, cols_str) | 5088848fbc6337d8ae251dedf3b7c4d56df4192c | 35,967 |
def event_handler(relative_priority):
"""Decorate an event handler."""
def decorator(func):
"""Decorate a function with relative priority."""
func.relative_priority = relative_priority
return func
return decorator | 837c143065c4efdbcdc64747177448247fbe1bee | 35,968 |
import inspect
def import_subclass_from_module(parent_cls, imported_module):
"""
Import class(es) from the Python module `imported_module` if
it is a subclass of a parent class, `parent_cls`
"""
child_classes = []
for cls_name, cls_path in inspect.getmembers(
imported_module, inspect.isclass):
if cls_name != parent_cls.__name__:
child_cls = getattr(imported_module, cls_name)
if issubclass(child_cls, parent_cls):
child_classes.append(child_cls)
return child_classes | e8696663b821c0a572df9875a3dd8bdfb101fc7a | 35,969 |
import random
def generar_tll():
"""Generar tll."""
tll = random.lognormvariate(3.05653, 1.5459)
while tll > 219 or tll < 9:
tll = random.lognormvariate(3.05653, 1.5459)
return tll | df1f507562a954a3a3cef630c6602939acfae1ec | 35,970 |
def update_dictionary(default_dict, overwrite_dict=None, allow_unknown_keys=True):
"""Adds default key-value pairs to items in ``overwrite_dict``.
Merges the items in ``default_dict`` and ``overwrite_dict``,
preferring ``overwrite_dict`` if there are conflicts.
Parameters
----------
default_dict: `dict`
Dictionary of default values.
overwrite_dict: `dict` or None, optional, default None
User-provided dictionary that overrides the defaults.
allow_unknown_keys: `bool`, optional, default True
If false, raises an error if ``overwrite_dict`` contains a key that is
not in ``default_dict``.
Raises
------
ValueError
if ``allow_unknown_keys`` is False and ``overwrite_dict``
has keys that are not in ``default_dict``.
Returns
-------
updated_dict : `dict`
Updated dictionary.
Returns ``overwrite_dicts``, with default values added
based on ``default_dict``.
"""
if overwrite_dict is None:
overwrite_dict = {}
if not allow_unknown_keys:
extra_keys = overwrite_dict.keys() - default_dict.keys()
if extra_keys:
raise ValueError(f"Unexpected key(s) found: {extra_keys}. "
f"The valid keys are: {default_dict.keys()}")
return dict(default_dict, **overwrite_dict) | 503934d42362ea2b7ff7732bb9f752de45140898 | 35,974 |
def comb(n: int, k: int) -> int:
"""Number of combination
Args:
n (int): [description]
k (int): [description]
Returns:
int: [description]
"""
if k >= n or k == 0:
return 1
return comb(n - 1, k - 1) + comb(n - 1, k) | 61fb5f35640b20c8a4d86be4cb5cf67994375ecc | 35,975 |
def identify_phecode_from_ranged_list(sorted_ranged_list, icd10):
"""turns icd10 into a set of phecodes from sorted, ranged phecode list"""
icd10 = icd10.replace('.', '')
phecodes = set()
for (low, high, phecode) in sorted_ranged_list:
if icd10 < low:
continue
elif icd10 <= high:
phecodes.add(phecode)
elif icd10 > high:
continue
else:
raise Exception(
"not catching icd10 %s in range %s, %s" %
(icd10, low, high),
)
return phecodes | 48dc8ff0c12c3e38b2be85cbc645c4391d17a409 | 35,976 |
def find_in_mapping(sequence, key, value):
"""
Search a sequence of mappings for one with a matching key-value pair.
Only searches one level deep.
Args:
sequence (list(dict)): Sequence of mappings.
key: Key to match.
value: value to match
Returns:
The first matching mapping, or ``None`` if no such mapping exists.
"""
mapping = None
for map_value in sequence:
try:
if map_value[key] == value:
mapping = map_value[key]
except (KeyError, TypeError):
pass
return mapping | 105738b193b9003c108726a05c65e2b41295dde4 | 35,977 |
def uncomplement_base(base, state, traits):
"""
Takes the base and its state and if it is a reverse complement state returns its complement.
"""
if state in traits.reverse_complements:
return 3-base, traits.reverse_complements[state]
else:
return base, state | 54edde6d2df946253b628c0fc01f80e2e4509cbf | 35,978 |
from typing import List
from typing import Dict
import logging
def decode_one_sequence_label_to_span(sequence_label: List[str]) -> List[Dict]:
"""
对 BIO 序列进行解码成 List. 例如:
["B-Per", "I-Per", "O", "B-Loc"] ->
[ {"label": "Per", "begin": 0, "end": 2},
{"label": "Loc", "begin": 3, "end": 4} ]
:param sequence_label: BIO 序列。
:return: 解码好的字典列表
"""
idel_state, span_state = 0, 1
spans = list()
begin = None
tag = None
state = idel_state
for i, label in enumerate(sequence_label):
if state == idel_state:
if label[0] == "B":
begin = i
tag = label[2:]
state = span_state
elif label[0] == "O":
pass
elif label[0] == "I":
logging.warning(f"{sequence_label} 有不满足 BIO 格式的问题")
else:
raise RuntimeError(f"{label} schema 不符合 BIO")
elif state == span_state:
if label[0] == "B":
span = {"label": tag,
"begin": begin,
"end": i}
spans.append(span)
begin = i
tag = label[2:]
state = span_state
elif label[0] == "O":
span = {"label": tag,
"begin": begin,
"end": i}
spans.append(span)
begin = None
tag = None
state = idel_state
elif label[0] == "I":
state = span_state
else:
raise RuntimeError(f"{label} schema 不符合 BIO")
else:
raise RuntimeError(f"{state} 错误,应该是 在 [{idel_state}, {span_state}] ")
if state == span_state:
span = {"label": tag,
"begin": begin,
"end": len(sequence_label)}
spans.append(span)
return spans | 5df60c24b5ab1276568d2193bf276ef3ef8e54fd | 35,979 |
from typing import Any
def cast_number(number: Any) -> float:
"""Cast to a float"""
number_float = float(number)
return number_float | d43bd8db72a4817ab18e5c24fe9225a3b1702d00 | 35,980 |
def readInput(inFile):
"""
Reads in the key values.
@ In, inFile, Python file object, file containing inputs
@ Out, (x,y,z), tuple(float,float,float), input values
"""
x, y, z = 0,0,0
for line in inFile:
var,val = line.strip().split('=')
if var.strip() == 'x':
x = float(val)
elif var.strip() == 'y':
y = float(val)
elif var.strip() == 'z':
z = float(val)
if x is None or y is None or z is None:
raise IOError('x,y,z were not all found in input file',inFile)
return x,y,z | 68a194375d880070f5404318b4ebfccb62ae479f | 35,981 |
from typing import List
def moving_average_batch_python(nums: List, average_length: int) -> List:
"""
pythonのlistを使う、移動平均を素直に計算する
"""
assert len(nums) - average_length + 1 > 0 # 移動平均が計算できない場合は例外送出する
res = [sum(nums[i-average_length+1:i+1]) / average_length for i in range(average_length-1, len(nums))]
return res | 639c719679ebcc2b130591a50a1233c9e887a7cb | 35,982 |
def curp(body):
"""
Returns the CURP almost complete
"""
return body | 91bc243c5da3359a818d89787763639e0b3c49b1 | 35,984 |
import logging
import time
def wait_for_text(session, text, timeout):
"""
Poll until expected text appears in an asynchronously loading page
"""
count = 0
limit = timeout / 0.2
test_text = text.split("|")
while True:
for text in test_text:
if text == None:
return "unavailable"
if session.page_source == None:
return "unavailable"
if text in session.page_source:
return "ready"
if "this page is temporarily unavailable" in session.page_source:
return "unavailable"
if count > limit:
logging.error("Timeout waiting for object visibility")
return "timeout"
count = count + 1
time.sleep(0.2) | ae75f42ce2e3a053c7e0a9f47432401f5bf8b2a0 | 35,985 |
def get_label(labels, index):
""" Gets label if exists, otherwise returns label number """
if index < len(labels):
return labels[index]
else:
return '#%d' % (index) | 40ca42a95c5ac3252aa63e84708177b87c538411 | 35,986 |
def connect_string(config):
"""return connect string"""
return config['username'] + "/" + config['password'] + "@" + \
config['db_url'] | c7b59e653329ede722042bb521cb7fdda7c07960 | 35,987 |
def katera_leta(leta, none_on):
"""
Funkcija kot argument dobi tabelo let in tabelo nizov "on"/"none"
Vrne tabelo let, kjer je na istoležečem elementu v none_on tabeli
vrednost "on"
"""
tab = []
i = 0
for leto in leta:
if none_on[i] == "on":
tab.append(leto)
i += 1
return tab | 75a0becb6fcdf44304fab4ee96579c54cd7e4825 | 35,988 |
def formatHowpublished(howpublished):
"""How something strange has been published. The first word should be
capitalized. """
return howpublished.capitalize() | 480c6b3f7f08a3e79c496b1a40a65390175893d6 | 35,989 |
def apply_at(
arr,
func,
mask=None,
else_=None,
in_place=False):
"""
Apply a function to an array.
Warning! Depending on the value of `in_place`, this function may alter
the input array.
Args:
arr (np.ndarray): The input array.
func (callable): The function to use.
Must have the signature: func(np.ndarray) -> np.ndarray
mask (np.ndarray[bool]): The mask where the function is applied.
Must have the same shape as `arr`.
else_ (callable|Any|None): The alternate behavior.
If callable, this is a function applied to non-masked values.
Must have the signature: func(np.ndarray) -> np.ndarray
If Any, the value is assigned (through broadcasting) to the
non-masked value.
If None, the npn-masked value are left untouched.
in_place (bool): Determine if the function is applied in-place.
If True, the input gets modified.
If False, the modification happen on a copy of the input.
Returns:
arr (np.ndarray): The output array.
Examples:
>>> arr = np.arange(10) - 5
>>> print(arr)
[-5 -4 -3 -2 -1 0 1 2 3 4]
>>> print(apply_at(arr, np.abs, arr < 0))
[5 4 3 2 1 0 1 2 3 4]
>>> print(apply_at(arr, np.abs, arr < 2, 0))
[5 4 3 2 1 0 1 0 0 0]
>>> print(apply_at(arr, np.abs, arr < 0, lambda x: x ** 2, True))
[ 5 4 3 2 1 0 1 4 9 16]
>>> print(arr)
[ 5 4 3 2 1 0 1 4 9 16]
"""
if not in_place:
arr = arr.copy()
if mask is not None:
arr[mask] = func(arr[mask])
if else_ is not None:
if callable(else_):
arr[~mask] = else_(arr[~mask])
else:
arr[~mask] = else_
else:
arr[...] = func(arr)
return arr | 1af1e62f6e1ede616508d017bd14ca0fcd4556d6 | 35,990 |
import re
def parse_data_refs(tf_content):
"""
Look for references to other remote states. These references look like this:
gcp_org_id = "${data.terraform_remote_state.foundation.org_id}"
"""
result = []
p = re.compile(r'data\.terraform_remote_state\.([_a-z][_\-0-9a-z]*)\.')
result = p.findall(tf_content)
# remove duplicates
if len(result) > 1:
res_set = set(result)
result = list(res_set)
return result | 63af09c6ac830b6d822629cfeb8a02f50b366d56 | 35,991 |
import struct
def read_varint(buffer: bytearray) -> int:
""" Parse a varint, read bytes are consumed.
"""
i, = struct.unpack('<B', buffer[:1])
if i < 0xfd:
del buffer[:1]
res = i
elif i < 0xfe:
res, = struct.unpack('<H', buffer[1:3])
del buffer[:3]
elif i < 0xff:
res, = struct.unpack('<I', buffer[1:5])
del buffer[:5]
else:
res, = struct.unpack('<Q', buffer[1:9])
del buffer[:9]
return res | ae3a41d4efa8b13a7fda86fc39acc0d396a22b77 | 35,992 |
def custom_formatwarning(msg, *a):
"""Given a warning object, return only the warning message."""
return str(msg) + '\n' | 5da47d79d37c79d3072aedb754f9fcac13baf0b1 | 35,995 |
def calculate_interest_amount_in_years(starting_amount, number_of_years, interest_rate, stipend_rate):
"""
After X number of years, how much would I have in the bank?
:param starting_amount: The amount of money the bank has to start with.
:type starting_amount: double
:param number_of_years: The amount of time to accrue interest.
:type number_of_years: int
:param interest_rate: The rate that interest is being added into the bank.
:type interest_rate: float
:param stipend_rate: The amount taken out each year for a stipend.
:type stipend_rate: float
:return: The amount in the bank, and the yearly stipends.
"""
# Money is not gained. Can be calculated, but not here.
if stipend_rate >= interest_rate:
return -1, -1
current_amount = starting_amount
stipend = {}
for year in range(number_of_years):
current_amount += (current_amount * interest_rate)
# We take the stipend out after adding new interest.
new_stipend_amount = current_amount * stipend_rate
current_amount -= new_stipend_amount
stipend[year+1] = round(new_stipend_amount, 2)
return current_amount, stipend | ea6a7e503c92f4b65e1ebaba8cca1bfce89ceff1 | 35,996 |
import argparse
def get_options(cmd_args=None):
""" Argument Parser """
parser = argparse.ArgumentParser(
prog='generateDefaultsActivityGen.py', usage='%(prog)s [options]',
description='Generate the default values for the SUMOActivityGen.')
parser.add_argument('--conf', type=str, dest='conf_file', required=True,
help='Default configuration file.')
parser.add_argument('--od-amitran', type=str, dest='amitran_file', required=True,
help='OD matrix in Amitran format.')
parser.add_argument('--out', type=str, dest='output', required=True,
help='Output file.')
parser.add_argument('--population', type=int, dest='population', default=1000,
help='Population: number of entities to generate.')
return parser.parse_args(cmd_args) | 26e424b2332f5d03ed0dce66713ccf365c5b9727 | 35,997 |
import re
def _extract_zip_code(key):
"""What it says."""
search = re.search('[0-9\-]*$', key)
if search:
key = search.group()
return key | bcb7a75e3305178eae4d47e848836ebac400c6c7 | 36,000 |
def _shadow_level(kernel, threshold, sensitivity):
"""
Calculates the greyscale shadow level for a given kernel,
the diode threshold and the diode sensitivity.
:param kernel: pooling kernel for down scaling
:type kernel: float array (2 dimensional)
:param threshold: thresholds for light intensity (determines the shadowlevel)
:type threshold: list or tuple (len == 3) descending order
:param sensitivity: diode sensitivity
:type sensitivity: float
:return: shadowlevel (integer between 0 and 3)
"""
light_intensity = 0.0
for y in range(len(kernel)):
for x in range(len(kernel[0])):
light_intensity += kernel[y][x]
# Normalizing the shadowlevel with kernel size
light_intensity /= (len(kernel)*len(kernel[0]))
if light_intensity > (threshold[0] + sensitivity):
return 0
elif light_intensity > (threshold[1] + sensitivity):
return 1
elif light_intensity > (threshold[2] + sensitivity):
return 2
else:
return 3 | 6aa09975ab8933d812ca599fae2d6116fc9190e9 | 36,002 |
def combine_ctrlpts_weights(ctrlpts, weights=None):
""" Multiplies control points by the weights to generate weighted control points.
This function is dimension agnostic, i.e. control points can be in any dimension but weights should be 1D.
The ``weights`` function parameter can be set to None to let the function generate a weights vector composed of
1.0 values. This feature can be used to convert B-Spline basis to NURBS basis.
:param ctrlpts: unweighted control points
:type ctrlpts: list, tuple
:param weights: weights vector; if set to None, a weights vector of 1.0s will be automatically generated
:type weights: list, tuple or None
:return: weighted control points
:rtype: list
"""
if weights is None:
weights = [1.0 for _ in range(len(ctrlpts))]
ctrlptsw = []
for pt, w in zip(ctrlpts, weights):
temp = [float(c * w) for c in pt]
temp.append(float(w))
ctrlptsw.append(temp)
return ctrlptsw | b55b77159ec04aa287314f8771f0056c820c5cad | 36,004 |
def word2Index_per_sentence(sentence_list):
"""将每句话的字用唯一索引表示,组合成一个大的 数字-句子 列表
Args:
sentence_list: 包含所有句子的列表
例子: ["the rock is destined to be the 21st century 's new conan",...]
Returns:
wordIndex_lists: 由 词-句子 => 索引-小列表 的列表
例子: [[12891, 13909, 5241, 8905, 15342, 825],[2,23,432,231,4324,124],...]
"""
# 所有(包括重复的词)词构成的列表
all_word_list = list()
# print(sentence_list)
for one_sentence in sentence_list:
all_word_list.extend(one_sentence.split(" "))
# 所有(不包括重复的词)词构成的列表
unique_word_list = list(set(all_word_list))
voc_size = len(unique_word_list)
# 构成词-索引的对照词典
# 一般而言词频高的,索引值较小;这里未实现此操作
unique_wordIndex_set = dict()
for index, word in enumerate(unique_word_list):
unique_wordIndex_set[word] = index + 1
# 将原sentence_list中单词用index替换
wordIndex_lists = list()
for one_words in sentence_list:
wordIndex_list = list()
for one_word in one_words.split(" "):
wordIndex_list.append(int(unique_wordIndex_set[one_word]))
wordIndex_lists.append(wordIndex_list)
return wordIndex_lists, voc_size | 245d5f8ac4e006f265c09d0a496827d9b24dc282 | 36,005 |
import torch
def diagonal_mask(dim, num_diagonals):
"""Creates a binary mask with ones around the major diagonal defined by
`num_diagonals`.
Parameters
----------
dim : int
dimension of the mask matrix
num_diagonals : int
number of diagonals. The number gets rounded up to the nearest odd
number. 1 means an identity matrix.
Returns
-------
torch tensor
mask tensor
"""
if num_diagonals == 0:
raise Exception(
f'Expected positive number of diagonals. Found {num_diagonals=}.'
)
mask = torch.eye(dim)
for i in range(num_diagonals // 2):
d = i + 1
mask += torch.diag(torch.ones(dim - d), diagonal=d) \
+ torch.diag(torch.ones(dim - d), diagonal=-d)
return mask | 2c6653e36da449bfba7321965b8091952e316263 | 36,006 |
import os
import glob
def init_corenlp_command(corenlp_path, memory, properties):
"""
Checks the location of the jar files.
Spawns the server as a process.
"""
jars = ["stanford-corenlp-?.?.?.jar",
"stanford-corenlp-?.?.?-models.jar",
"xom.jar",
"joda-time.jar",
"jollyday.jar",
"ejml-?.*.jar"] # No idea what this is but it might be sentiment
java_path = "java"
classname = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
# include the properties file, so you can change defaults
# but any changes in output format will break parse_parser_results()
current_dir_pr = os.path.dirname(os.path.abspath(__file__)) + "/" + properties
if os.path.exists(properties):
props = "-props %s" % (properties)
elif os.path.exists(current_dir_pr):
props = "-props %s" % (current_dir_pr)
else:
raise Exception("Error! Cannot locate: %s" % properties)
# add and check classpaths
jars = [corenlp_path + "/" + jar for jar in jars]
missing = [jar for jar in jars if not glob.glob(jar)]
if missing:
raise Exception("Error! Cannot locate: %s" % ', '.join(missing))
jars = [glob.glob(jar)[0] for jar in jars]
# add memory limit on JVM
if memory:
limit = "-Xmx%s" % memory
else:
limit = ""
return "%s %s -cp %s %s %s" % (java_path, limit, ':'.join(jars), classname, props) | 9ef2d0293e94b09d05298ddea75eb23f99763c9b | 36,008 |
def locate_card_linear(cards, query):
"""Linear search for `query` in a desc sorted list `cards`."""
# time complexity: O(N)
# space complexity: O(1)
position = 0
while position < len(cards):
# check if current elem matches the query
if cards[position] == query:
return position
position += 1
# if we have reached the end of the list w/out returning,
# then query is not in cards
return -1 | c3acb6064228cd0357fceb5804b0f4a500aae780 | 36,009 |
def string_to_tuple( word ) :
"""Convert string word into a tuple suitable for use as a key to a dictionary
of anagrams"""
this_word_list = list(word) # convert the string to a list so we can sort it
this_word_list.sort()
this_word_tuple = tuple( this_word_list ) # Conver the list to a tuple which can be the key to a dictionary
return this_word_tuple | a80ecbdeaa9b9a3d185d00befd6b9a33a970eb73 | 36,011 |
import os
import torch
def checkpoint(model, epoch, fold, outdir):
""" Save the weights of a given model.
Parameters
----------
model: Net
the network model.
epoch: int
the epoch index.
fold: int
the fold index.
outdir: str
the destination directory where a 'model_<fold>_epoch_<epoch>.pth'
file will be generated.
"""
outfile = os.path.join(
outdir, "model_{0}_epoch_{1}.pth".format(fold, epoch))
torch.save(model, outfile)
return outfile | 42323b31789924aa7c28a304097a78be13300c8d | 36,012 |
import torch
def empty(*args, **kwargs):
"""
In ``treetensor``, you can use ``ones`` to create a tree of tensors with
the uninitialized values.
Example::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.empty(2, 3) # the same as torch.empty(2, 3)
tensor([[-1.3267e-36, 3.0802e-41, 2.3000e+00],
[ 2.3000e+00, 2.3000e+00, 2.3000e+00]])
>>> ttorch.empty({'a': (2, 3), 'b': {'x': (4, )}})
<Tensor 0x7ff363bb6080>
├── a --> tensor([[-3.6515e+14, 4.5900e-41, -1.3253e-36],
│ [ 3.0802e-41, 2.3000e+00, 2.3000e+00]])
└── b --> <Tensor 0x7ff363bb66d8>
└── x --> tensor([-3.6515e+14, 4.5900e-41, -3.8091e-38, 3.0802e-41])
"""
return torch.empty(*args, **kwargs) | 4a0d423f9dbdc0130edf42e0a9fed496c4e6e071 | 36,013 |
def findCon(name,conList):
"""
Returns the index of a constituent from a list
"""
return next((i for i, j in enumerate(conList) if j == name)) | 3de9c922459175156900649800465a3546e330a1 | 36,015 |
def _format_results(name, internal_score, scores, metrics, use_elbo=False):
"""Format results."""
result_str = ""
internal_score_name = "elbo" if use_elbo else "ppl"
if internal_score:
result_str = "%s %s %.2f" % (name, internal_score_name, internal_score)
if scores:
for metric in metrics:
if result_str:
result_str += ", %s %s %.1f" % (name, metric, scores[metric])
else:
result_str = "%s %s %.1f" % (name, metric, scores[metric])
return result_str | c8e88bbd0ee476101837474e2ed5b2f5f41bfde7 | 36,016 |
def find_chunk_shape(shape, n_max=None):
"""
Given the shape of an n-dimensional array, and the maximum number of
elements in a chunk, return the largest chunk shape to use for iteration.
This currently assumes the optimal chunk shape to return is for C-contiguous
arrays.
"""
if n_max is None:
return tuple(shape)
block_shape = []
max_repeat_remaining = n_max
for size in shape[::-1]:
if max_repeat_remaining > size:
block_shape.append(size)
max_repeat_remaining = max_repeat_remaining // size
else:
block_shape.append(max_repeat_remaining)
max_repeat_remaining = 1
return tuple(block_shape[::-1]) | 3370be64a4ba13a5d7a3f1e2e85858068202df38 | 36,018 |
def top_n(distributions, n=5):
"""n=0 to return all"""
if not n:
n = len(distributions)
return sorted(distributions, key=lambda y: y[1], reverse=True)[:n] | 18c268830915201e5f4955e71cfbee98fb06f20b | 36,019 |
def _UsageStringFromFullArgSpec(command, spec):
"""Get a usage string from the FullArgSpec for the given command.
The strings look like:
command --arg ARG [--opt OPT] [VAR ...] [--KWARGS ...]
Args:
command: The command leading up to the function.
spec: a FullArgSpec object describing the function.
Returns:
The usage string for the function.
"""
num_required_args = len(spec.args) - len(spec.defaults)
help_flags = []
help_positional = []
for index, arg in enumerate(spec.args):
flag = arg.replace('_', '-')
if index < num_required_args:
help_flags.append('--{flag} {value}'.format(flag=flag, value=arg.upper()))
help_positional.append('{value}'.format(value=arg.upper()))
else:
help_flags.append('[--{flag} {value}]'.format(
flag=flag, value=arg.upper()))
help_positional.append('[{value}]'.format(value=arg.upper()))
if spec.varargs:
help_flags.append('[{var} ...]'.format(var=spec.varargs.upper()))
help_positional.append('[{var} ...]'.format(var=spec.varargs.upper()))
for arg in spec.kwonlyargs:
if arg in spec.kwonlydefaults:
arg_str = '[--{flag} {value}]'.format(flag=arg, value=arg.upper())
else:
arg_str = '--{flag} {value}'.format(flag=arg, value=arg.upper())
help_flags.append(arg_str)
help_positional.append(arg_str)
if spec.varkw:
help_flags.append('[--{kwarg} ...]'.format(kwarg=spec.varkw.upper()))
help_positional.append('[--{kwarg} ...]'.format(kwarg=spec.varkw.upper()))
commands_flags = command + ' '.join(help_flags)
commands_positional = command + ' '.join(help_positional)
commands = [commands_positional]
if commands_flags != commands_positional:
commands.append(commands_flags)
return '\n'.join(commands) | 060bfb550b6a99ba7c9bf210a2028363a4d2041d | 36,020 |
def jobs_from_path(path):
""" helper for finding jobs from path"""
return [(path[i], path[i+1]) for i in range(len(path)-1)] | 2499482867ceeffdbf78273329f7b966ca7c8f36 | 36,021 |
import re
def clean_name(text):
"""
Return a cleaned version of a string - removes everything
but alphanumeric characters and dots.
:param str text: string to clean.
:returns: cleaned string.
:rtype: str
"""
return re.sub(r'[^a-zA-Z0-9\n\.]', '_', text) | 51c9663d4d6a7f5b3099fc30b0108e99ac2608d0 | 36,022 |
def dir_path(d):
"""Фильтр, который возвращает путь относительно корня проекта
для директории *d*
"""
path=''
if not d:
return path
while True:
if not d.dir_name:
#path = '/' + path
return path
else:
path = d.dir_name + '/' + path
d = d.dir_parent | fe6addcb3abae9e3a4d8aa6dbdd7f10bc2da968b | 36,023 |
def create_test_function(source, output, lang):
""" Create a test function for a source file """
with open(source) as f:
snippet = f.read()
with open(output) as f:
res = f.read()
def tst_func(slf):
slf.do(snippet, res, lang=lang)
return tst_func | 097741351be231c62d25cca3843a4cb9e2586bee | 36,024 |
def remove_non_datastore_keys(form_data):
"""Remove keys not relevant to creating a datastore object."""
form_dict = {k: v[0] for k, v in form_data.items()}
for key in ["csrfmiddlewaretoken", "name", "type", "owner"]:
form_dict.pop(key, None)
return form_dict | 0d0e561cb24eaf7cb3ee77060ed2149c55140812 | 36,025 |
import re
def _parse_boolean(value):
"""
Attempts to convert a value to a boolean and returns it.
If it fails, then it raises an Exception.
:param value: a value
:return: boolean
"""
if re.match("^(on|true|yes|1)$", str(value), re.IGNORECASE):
return True
if re.match("^(off|false|no|0)$", str(value), re.IGNORECASE):
return False
raise Exception("Unable to coerce value '{}' to boolean".format(value)) | 3090f60eaccbfc65ecb3cf6f956ab3e57d572798 | 36,026 |
import re
def is_match(sanitizer, pattern, input_str):
"""
Utility method for running a pattern through the sanitizer and evaluating the input against generated regex
:param sanitizer:
:param pattern:
:param input_str:
:return:
"""
sanitized = sanitizer(pattern)
return True if re.match(sanitized, input_str) else False | 9c818dd4f52835081750bfd730c949bd068fa385 | 36,029 |
import six
import os
def _resolve_stack(relative, path):
"""
Resolve relative paths to the absolute path of the cloned Git repo
"""
if isinstance(relative, dict):
absolute = {}
for key, value in six.iteritems(relative):
absolute[key] = _resolve_stack(value, path)
elif isinstance(relative, list):
absolute = []
for item in relative:
absolute.append(_resolve_stack(item, path))
elif isinstance(relative, six.string_types):
absolute = os.path.join(path, relative)
else:
absolute = relative
return absolute | 9cb1ebca8133c3b01949ebc50dfacc40fa7da3e1 | 36,032 |
import torch
def padding_mask(x_lens):
"""
transform lengths of samples to a binary mask.
inputs:
- x_lens: length of each sample in minibatch. # tensor # (batch_size, )
outputs:
- mask: 1-0 binary mask. 1 means valid and 0 means invalid.
# tensor # (batch_size, longest_time_step, 1)
"""
longest_len = max(x_lens)
batch_size = len(x_lens)
mask = torch.zeros(batch_size, longest_len, 1)
for i, x_len in enumerate(x_lens):
mask[i, :x_len] = 1.
return mask | ea9e3c06d61f5d9b19795a59dbb2956e8bdb4385 | 36,033 |
def certificate_files(create_certificate):
"""Returns a dict with the certificate files
The dict contains the following keys:
caKeyPath
caCrtPath
clientKeyPath
clientCrtPath
"""
return create_certificate | b30ae8b72b5b933b8f7cb973c1b5022a9e97ed53 | 36,035 |
def get_seed(seed_id):
"""
This function provides the random seed.
:param seed_id: int
the seed_id is the 'seeds' vector index
:return:
"""
seeds = [1859168769, 1598189534,
1822174485, 1871883252, 694388766,
188312339, 773370613, 2125204119, #0,1,2,3,4,5
2041095833, 1384311643, 1000004583,
358485174, 1695858027, 762772169,
437720306, 939612284, 1998078925,
981631283, 1024155645, 1147024708, #19
558746720, 1349341884, 678622600,
1319566104, 538474442, 722594620,
1700738670, 1995749838, 1936856304,
346983590, 565528207, 513791680,
1996632795, 2081634991, 1769370802,
349544396, 1996610406, 1973272912,
1972392646, 605846893, 934100682,
222735214, 2101442385, 2009044369,
1895218768, 701857417, 89865291,
144443207, 720236707, 822780843,
898723423, 1644999263, 985046914,
1859531344, 1024155645, 764283187,
778794064, 683102175, 1334983095,
1072664641, 999157082, 1277478588,
960703545, 186872697, 425414105]
return seeds[seed_id] | d568962485aa02f88ed0b12a2b6dcfb397773ec2 | 36,036 |
from typing import OrderedDict
def parse_worksheets(worksheets):
"""Parse worksheet xml objects & return cleaned values."""
results = OrderedDict()
for worksheet in worksheets:
name = worksheet.attrib['name']
datasource = worksheet.find('table/view/datasources')
datasource = [i.attrib['caption'] for i in datasource if 'caption' in i.attrib]
results[name] = datasource
return results | 819ccc65908890aef66d0367749cb9e179dd0508 | 36,037 |
import json
def format_json(data):
"""
Returns a human-formatted JSON
"""
return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')) | a0ea13af5e95d5cd9879de0cdf5055d6fa1dd4c8 | 36,038 |
def format_taxa_to_js(otu_coords, lineages, prevalence, min_taxon_radius=0.5,
max_taxon_radius=5, radius=1.0):
"""Write a string representing the taxa in a PCoA plot as javascript
Inputs:
otu_coords: numpy array where the taxa is positioned
lineages: label for each of these lineages
prevalence: score of prevalence for each of the taxa that is drawn
*These parameters should work more as constants and once we find out that
there's a value that is too big to be presented, the proper checks should
be put into place. Currently we haven't found such cases in any study*
min_taxon_radius: minimum value for the radius of the spheres on the plot
max_taxon_radious: maximum value for the radius of the spheres on the plot
radius: default value size
Outputs:
js_biplots_string: javascript string where the taxa information is written
to create the spheres representing each of these, will return only the
variable declaration if the inputs are empty.
"""
js_biplots_string = []
js_biplots_string.append('\nvar g_taxaPositions = new Array();\n')
# if we have prevalence scores, calculate the taxa radii values
if len(prevalence):
taxa_radii = radius*(min_taxon_radius+(max_taxon_radius-
min_taxon_radius)*prevalence)
else:
taxa_radii = []
index = 0
# write the data in the form of a dictionary
for taxa_label, taxa_coord, t_radius in zip(lineages,otu_coords,taxa_radii):
js_biplots_string.append(("g_taxaPositions['%d'] = { 'lineage': '%s', "
"'x': %f, 'y': %f, 'z': %f, 'radius': %f};\n") % (index,
taxa_label, taxa_coord[0], taxa_coord[1], taxa_coord[2], t_radius))
index += 1
js_biplots_string.append('\n')
# join the array of strings as a single string
return ''.join(js_biplots_string) | 5184e163f30c19b6d41c9fa3765f4d054b238d3b | 36,039 |
import argparse
def init_cmdparser_objects(root_parser, parser, objects):
"""Init all derived CmdParser instances with specific data.
Args:
root_parser: The root parser
parser: The ArgParser node (e.g. 'run' or 'convert')
objects: All CmdParser instances of this file
"""
def silent_help():
pass
def error_message(p):
def error(x):
p.print_help()
root_parser.print_help = silent_help
exit(-1)
return error
def init(x):
x.options = parser.add_parser(
x.name,
add_help=False,
help=x.description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
x.options.error = error_message(x.options)
x.options.set_defaults(func=x.run_command)
x.build()
for x in objects:
try:
init(x)
except BaseException:
pass | 9aba4ac4dd47d0841fdc0bff3b5a3d9b87e29812 | 36,041 |
import math
def part2(buses: list[int]) -> int:
"""
Only by looking at hints from AoC reddit:
Compare https://brilliant.org/wiki/chinese-remainder-theorem/
Or https://rosettacode.org/wiki/Chinese_remainder_theorem
"""
x = 0
N = math.prod(bus for bus in buses if bus)
for i, n in enumerate(buses):
if not n:
continue
a_i = -i % n
y_i = N // n
# See https://docs.python.org/3/library/functions.html#pow
z_i = pow(y_i, -1, n)
x += a_i * y_i * z_i
return x % N | fb92980b9cad8e7c9bc9c3eb50c0b4d757b417e4 | 36,042 |
def split_attribute(attribute_string):
"""
Properly split apart attribute strings, even when they have sub-attributes
declated between [].
Sub-attribute strings between '[]'s are appended to their parent, without
processing, even if they contain '.'
:param attribute_string:the attribute string to split
:return: dict containing the elements of the top level attribute string.
"""
ret = []
# zero means we are not inside square brackets
square_brackets = 0
last_index = 0
a_len = len(attribute_string)
for i in range(a_len):
if attribute_string[i] == '[':
square_brackets += 1
elif attribute_string[i] == ']':
square_brackets -= 1
elif attribute_string[i] == '.' and square_brackets == 0:
ret.append(attribute_string[last_index:i])
last_index = i + 1
# last element
ret.append(attribute_string[last_index:])
return ret | e1195486c9fdc0eece8f61324dc893e3fd78cfed | 36,044 |
def find_request_end_token(data):
"""Find token that indicates that request is over."""
lines = data.splitlines(True)
if not lines:
return False
elif 'command_list_ok_begin' == lines[0].strip():
return 'command_list_end' == lines[-1].strip()
else:
return lines[0].endswith('\n') | 5b09d57fdb020940f6d16183c0ae3d84dcb2ce2c | 36,046 |
def set_invalid(field, render_kw=None):
"""
Returns *render_kw* with `invalid` added to *class* on validation errors.
Set (or appends) 'invalid' to the fields CSS class(es), if the *field* got
any errors. 'invalid' is also set by browsers if they detect errors on a
field.
"""
if render_kw is None:
render_kw = {}
if field.errors:
classes = render_kw.get('class') or render_kw.pop('class_', '')
if classes:
render_kw['class'] = 'invalid {}'.format(classes)
else:
render_kw['class'] = 'invalid'
return render_kw | 5e32005e1a5405b3ba86293c986e6368c177ad40 | 36,047 |
def update_previous_label(response_dict, worker_list):
""" returns a new dictionary showing the previous label for each worker """
previous_label_dict = {worker:'?' for worker in worker_list}
if response_dict['text'] != '"':
for worker in response_dict:
if worker in previous_label_dict:
previous_label_dict[worker] = response_dict[worker]
return previous_label_dict | 540e8b7b8973185e92bbd1c55d95d4c841be255f | 36,048 |
def split_by_resources(tests):
"""Split a list of tests by the resources that the tests use.
:return: a dictionary mapping sets of resources to lists of tests
using that combination of resources. The dictionary always
contains an entry for "no resources".
"""
no_resources = frozenset()
resource_set_tests = {no_resources: []}
for test in tests:
resources = getattr(test, "resources", ())
all_resources = list(resource.neededResources()
for _, resource in resources)
resource_set = set()
for resource_list in all_resources:
resource_set.update(resource_list)
resource_set_tests.setdefault(frozenset(resource_set), []).append(test)
return resource_set_tests | 82208bc8025d03228a0951d0df3994ff392fd19f | 36,049 |
def write_json(df, **kwargs):
"""
write json
:param df:
:param kwargs: orient
:return: json
"""
# orient: index, split, records, columns, values
orient = kwargs.get('orient', 'index')
json_df = df.to_json(orient=orient, force_ascii=False, **kwargs)
return json_df | c69ee86c158c3c93c6c988275047db5c6eb5234c | 36,050 |
def BFS(authors, papers, nodes, tabuset):
"""
Return a list of connected components.
@param authors: a list of strings, containing author names.
@param papers: is a list of list of strings, where each inner list corresponds to a list of author names.
@param nodes: maintains the edge set, where the key is either string (author name) or int (paper id).
@param: tabuset: Banned reviewers or papers.
"""
components = []
is_visited = set()
for a in nodes:
if a in tabuset: continue
if a in is_visited: continue
bfs_queue, c = [], [[], []]
header, tail = 0, 0
is_visited.add(a)
bfs_queue.append(a)
tail += 1
# Compute for one connected components.
while tail > header:
r = bfs_queue[header]
header += 1
if r in authors:
c[0].append(r)
else:
c[1].append(r)
for p in nodes[r]:
if p in tabuset: continue
if p in is_visited: continue
bfs_queue.append(p)
is_visited.add(p)
tail += 1
components.append(c)
return components | b08ad35b3447c39df7ae4895e93b7639858819f6 | 36,051 |
from datetime import datetime
def convert_unix_to_date(d):
""" Convert millise since epoch to date formatted MM/DD/YYYY HH:MI:SS """
if d:
dt = datetime.utcfromtimestamp(d / 1000)
return dt.strftime('%m/%d/%Y %H:%M:%S')
return 'N/A' | e587839c4c8cc6464704d7b2ce1b4ae0bf9db302 | 36,052 |
def add_key_arguments(parser):
"""
Adds the arguments required to create a new key to the parser given as a parameter
Args:
- parser: Parser where to add the key parameters
+ Type: argparse.ArgumentParser
Return:
- group: Argument group containing all the key arguments
+ Type: argparse._ArgumentGroup
"""
group = parser.add_argument_group("Key management")
group.add_argument(
"-ks", "--key_size",
help='Length of the new key',
type=int,
default=4096
)
group.add_argument(
"-kt", "--key_type",
help="Method used for generating the new key",
choices=["dsa", "rsa"],
default="rsa"
)
return group | 329a90dbf639283e62765690cfe724038ce61bbd | 36,053 |
def weight_normalization(weight1, weight2):
"""
A function to normalize the weights of each modality so the weights' sum is 1 for each pixel of the image
:param weght1: The weight of madality 1, a grayscale image
:param weight2: The weight of modality 2, a grayscale image
:return: Two weights, weight1_normalized and weight2_normalized, respectively the normalized versions of weight1 and weight2, two grayscale images.
"""
weight1_normalized = weight1 / (weight1 + weight2)
weight2_normalized = weight2 / (weight1 + weight2)
return weight1_normalized, weight2_normalized | 0e04960ba7baec3e0e657117ebb5c3da8c9bd845 | 36,054 |
import re
def clean_message(raw_message_dict):
"""
Message clearing
"""
reg_mess = r'<[\s\S|.]*?>| |"|.*?;}'
clean_mess = re.sub(reg_mess, '', raw_message_dict['message'])
reg_line_break = r'(\r\n){5,}'
clean_mess = re.sub(reg_line_break, '\r\n', clean_mess)
raw_message_dict.update({'message': clean_mess})
return raw_message_dict | 33f2ed796d2bb9bd15019e7f30a5620f15994b32 | 36,056 |
import uuid
def CreateShoppingCampaign(client, budget_id, merchant_id):
"""Creates a shopping campaign with the given budget and merchant IDs.
Args:
client: an AdWordsClient instance.
budget_id: the str ID of the budget to be associated with the shopping
campaign.
merchant_id: the str ID of the merchant account to be associated with the
shopping campaign.
Returns:
The created Shopping Campaign as a sudsobject.
"""
campaign_service = client.GetService('CampaignService', 'v201809')
campaign = {
'name': 'Shopping campaign #%s' % uuid.uuid4(),
# The advertisingChannelType is what makes this a shopping campaign
'advertisingChannelType': 'SHOPPING',
# Recommendation: Set the campaign to PAUSED when creating it to stop the
# ads from immediately serving. Set to ENABLED once you've added targeting
# and the ads are ready to serve.
'status': 'PAUSED',
# Set portfolio budget (required)
'budget': {
'budgetId': budget_id
},
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC'
},
'settings': [
# All shopping campaigns need a ShoppingSetting
{
'xsi_type': 'ShoppingSetting',
'salesCountry': 'US',
'campaignPriority': '0',
'merchantId': merchant_id,
# Set to "True" to enable Local Inventory Ads in your campaign.
'enableLocal': True
}
]
}
campaign_operations = [{
'operator': 'ADD',
'operand': campaign
}]
campaign = campaign_service.mutate(campaign_operations)['value'][0]
print ('Campaign with name "%s" and ID "%s" was added.'
% (campaign['name'], campaign['id']))
return campaign | 208b2ad37d2fda5ee5b85827029597ed8bc6801b | 36,057 |
def create_temp_file(query, output_file="/tmp/query.png"):
"""create_temp_file.
:param query:
:param output_file:
"""
data = query.read()
with open(output_file, "wb") as file:
file.write(data)
return output_file | b3c83d86bf834b02d6520a55dd1c1b4b9d5e142d | 36,058 |
def resizeTuple(t, newSize, newValues=None):
""" resize a tuple
@param t::()
@param newSize::int = the new size of the tuple
@param newValues = what values to put in any new required elements
"""
if len(t)==newSize:
return t
elif len(t)>newSize:
return tuple(list(t)[:newSize])
else:
# tuple is too small
a = list(t)
numNew = newSize - len(a)
a = a + [newValues]*numNew
return tuple(a) | a4e125e2f761d2ce4ef4971b2c58b128abfbec71 | 36,059 |
import numpy as np
def kepler(path):
""" this is temporary solution. It will accept path to the file instead of text from textField. Later, during
changes in load system i'm going to rewrte this function """
data = np.loadtxt(path)
first = data[:, 0]
second = data[:, -2]
out = ""
if len(first) != len(second):
return "OOPS, ERROR"
for i in range(len(first)):
out += "24" + str(first[i]) + "\t" + str(second[i]) + "\n"
return out | 3f201256225acfdc3a4fe9e4f14c9fc51c0c271c | 36,060 |
def camels():
"""Ask the user "How many camels fit in a tent?". Returns an
integer."""
no_of_camels = input("How many camels fit in a tent?")
return int(no_of_camels) | f7fe34a8b1dcbd4f6c920c776373b11b1bbf1f92 | 36,061 |
from typing import Dict
from typing import Tuple
def get_conversion_endpoints(options: Dict) -> Tuple[str, str, Dict]:
"""Returns conversion direction with endpoints
(Text to DNA) or (DNA to Text)."""
_from, _to = "TEXT", "DNA"
if options["convert_to"] == "DNA":
_from, _to = "TEXT", "DNA"
elif options["convert_to"] == "TEXT":
_from, _to = "DNA", "TEXT"
options["convert_from"] = _from
return _from, _to, options | 02fd713646efb72959d3b42dd1f2c3f9de343da8 | 36,063 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.