content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import os
def expand_path(path: str) -> str:
"""
Expand the given path's references to user and environment variables. Also
convert to absolute path.
Args:
path: Path to expand
Returns:
Expanded path
"""
expanded_path = os.path.abspath(
os.path.expandvars(os.path.expanduser(path))
)
return expanded_path | d3d108dfa90d91e406283357b6247d75dc04e452 | 48,343 |
def get_list_of_multiple_or_one_or_empty_from_dict(input, name, vtype=None):
"""
Extracts objects by 'name' from the 'input' and returns as a list.
Tries both plural and singular names from the input.
If vtype is specified, tries to convert each of the elements in the result to this type.
:param input: - dict - Input dictionary. Event of Lambda for example.
:param name: - str - Name of attribute (in plural form).
:param vtype: - type - Type to be converted to. Must be callable. Tested types: str, int, float
:return: - list - List of vtypes, or list of whatever was in input, or empty list.
:raises ValueError: In all cases something is wrong.
"""
if not isinstance(input, dict):
raise ValueError("'input' attribute must be a dict. Received: {}".format(type(input)))
if not isinstance(name, str):
raise ValueError("'name' attribute must be a str. Received: {}".format(type(name)))
def convert(obj, t):
return obj if not t else t(obj)
results = input.get(name) or input.get(name.rstrip('s'))
if not results:
return []
# Wrap to list if not there yet.
if not isinstance(results, (list, tuple, set)):
results = [results]
else:
results = list(results)
# Apply vtype convertion if required.
return [convert(x, vtype) for x in results] | b038bd1c2ff390d32bd058344f48175ca8071b27 | 48,345 |
def get_regular_price(game_page):
"""
Get and return the regular price of a game
"""
regular_price = None
price_element_id_prefix = "ProductPrice_productPrice_PriceContainer-"
# the price container element changes throughout the day
# currently, only the number suffix changes
# have seen as high as 11, but trying 50
for i in range(0, 50):
price_element_id = f"{price_element_id_prefix}{i}"
price_element = game_page.find(id=price_element_id)
try:
price_element_text = price_element.text
except AttributeError:
continue
else:
# once the price text has been located, parse out the decimal price
regular_price = price_element_text.replace("CAD $", "").replace(
"+Offers in-app purchases", ""
)
break
return regular_price | bece99a5cef805bd56a3782deb017fbc827e5b10 | 48,346 |
def is_aligned3(a, b, c, tol=1e-2):
""" test aligment of 3 points
Parameters
----------
a : np.array
b : np.array
c : np.array
tol : float
default 1e-2
"""
# return abs(((b[0,:]-a[0,:])*(c[1,:]-a[1,:]) -
# (b[1,:]-a[1,:])*(c[0,:]-a[0,:])))<1e-8
val = abs(((b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0])))
cond = val < tol
# print val
return cond | a9157c9a684287b4abfec76afd99e0b944569d83 | 48,347 |
from typing import Dict
def profiling_dump(durations: Dict[str, float]) -> str:
"""
Returns time elapsed during each stage or step of the fud execution.
"""
def name_and_space(s: str) -> str:
# Return a string containing `s` followed by max(32 - len(s), 1) spaces.
return "".join((s, max(32 - len(s), 1) * " "))
return f"{name_and_space('step')}elapsed time (s)\n" + "\n".join(
f"{name_and_space(p)}{round(t, 3)}" for p, t in durations.items()
) | 8891b5be938701cd85b3243945dc94b6dd1dcf55 | 48,349 |
def lower_keys(obj: dict) -> dict:
"""
Lowercase first level keys of a dictionary
:param obj: object to be lowercased
:return: same object with lowercase keys in first level
"""
nu_obj = dict()
for key in obj:
nu_obj[key.lower()] = obj[key]
return nu_obj | e3043d3b850128bb94b9c9388ab59e279a87bc40 | 48,351 |
def CSVWriter (iterable, outLoc, header="", ):
"""
Writes an iterable to a CSV file.
:param iterable: List of list
:param outLoc: file location. Where to place it.
:param header: header of the CSV file
:return: 1
"""
if not iterable:
print ("nothing to write")
return 0
out = open(outLoc, 'w')
if header:
out.write(header+'\n')
#Only works if iterable is a nested list
for member in iterable:
for item in member:
out.write(str(item)+',')
out.write('\n')
print("write to "+outLoc+" successful.")
return 1 | 25a800d84b5ad4733fa6a4205f49823191a8523f | 48,353 |
import torch
def create_positive_map_for_od_labels(tokenized, label_to_positions):
"""construct a map such that positive_map[i] = j, where j is the object detection label of the token i"""
"""
{3: [1: 5)}
256 : -1 3 3 3 3 -1 .. 8 8 ..
the woman in the garden
-1 -1 -1 -1 -1
"""
positive_map = torch.ones(256, dtype=torch.float) * -1 # -1 means no match
keys = list(label_to_positions.keys())
for j, key in enumerate(keys):
tok_list = label_to_positions[key]
# one label only mapps to one location
beg, end = tok_list
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
positive_map[beg_pos: end_pos + 1].fill_(key)
return positive_map | 0a2ca4282a2b163f441cae43935dfa9c4be49bea | 48,354 |
def pyThreads():
"""
Python 'threads' are something of a misnomer -- they do not actually use multi-threaded
code ... they actually refer to async. i/o features of the Python run-time.
In the traditional definition of threads, a process is a single "main" thread, executing in
a single CPU, using dedicated stack and heap space memory. A process main thread, however,
can create multiple child threads that share the parent / main thread's memory and can in
theory utilize other available CPUs and run in parallel. Some algorithms can greatly benefit
from multi-threading, if there are lots of available CPUs and if access to the shared memory
does not involve too much mutual exclusion locking, etc. The advantages and disadvanteges
tend to derive from the extent that any shared resource(s) can be efficiently accessed, etc.
"""
return pyThreads.__doc__ | b5ec73e1a1ef8b3d867943d2bfca03bd97760530 | 48,355 |
import os
def is_safe_path(basedir, path, follow_symlinks=True):
"""https://security.openstack.org/guidelines/dg_using-file-paths.html"""
# resolves symbolic links
if follow_symlinks:
return os.path.realpath(path).startswith(os.path.realpath(basedir))
return os.path.abspath(path).startswith(os.path.abspath(basedir)) | e09ca4375fabc5d2ba0792633f4d0eefa2e998cb | 48,357 |
def get_waterfalldata(space, items, length, reverseitems=[]):
"""Create a DataFrame for drawing waterfall chart"""
data = type(space.cells)(
space.cells, space.cells.impl, keys=items).to_frame(range(length))
for outflow in reverseitems:
data[outflow] = -1 * data[outflow]
return data | 51d4728578611d61c056ec8c31183377efd17c38 | 48,358 |
def arrangeCoins_timelimit( n):
"""
:type n: int
:rtype: int
"""
a = []
i=1
while sum(a) <=n:
a.append(i)
i+=1
return i-2 | 8cc0c59dfcd06a2fd5cf6521d1c442e50cf8db19 | 48,359 |
def has_text(node):
"""
>>> has_text(fragment_fromstring('<div/>'))
False
>>> has_text(fragment_fromstring('<div> </div>'))
False
>>> has_text(fragment_fromstring('<div> fsfdsff </div>'))
True
>>> has_text(fragment_fromstring('<div> fsfdsff<a>oi mundo</a></div>'))
True
"""
if node.text is not None and node.text.strip():
return True
else:
return False | fd2f0001781cfe9ff5bf1ef7c9b5fd322eb1c4d4 | 48,360 |
def fibonacci(n):
"""
Returns a list of fibonacci numbers of length n
Parameters
----------
n : int
Number in fibonacci suite desired
Returns
-------
fib_list : list[ints]
List of integers
"""
memo = [0, 1]
for i in range(2, n):
memo += [memo[i - 2] + memo[i - 1]]
return memo[:n] | 5c2e29732c383b92a7a56fd618418d4301bbcd3c | 48,362 |
import torch
def inject_noise(X, epsilon=0.1, bound=(0,1)):
""" Construct FGSM adversarial examples on the examples X"""
return (X + torch.randn_like(X) * epsilon).clamp(*bound) - X | 0d98472f65469cb4c612bcf904abce9f3879c411 | 48,363 |
def setup_path(sub, root, exp_part):
"""
Sets up paths and directories for experiment
The structure loosely follows BIDS convention
Parameters
----------
sub : int
root : Pathlib object
points to the root directory of the experiment
exp_part : str
"""
# data directory
data_dir = root / "data"
if not data_dir.exists():
print("Making the data directory...")
data_dir.mkdir()
# subject directory
sub_id = f"{sub:02d}"
sub_dir = data_dir / f"sub-{sub_id}"
if not sub_dir.exists():
print("Making the subject directory...")
sub_dir.mkdir()
# task directory
task_dir = sub_dir / exp_part
if not task_dir.exists():
print("Making the task directory...")
task_dir.mkdir()
return task_dir | c5590efef2c3f5231ca6e470ce1cc7222c617d2d | 48,364 |
import math
def get_number_elements_per_split(total_elements, num_splits):
# type: (int, int) -> int
"""Compute the number of elements in each split"""
return math.ceil(total_elements / float(num_splits)) | 9f80c0db1042291afe7fa9513464ff186946feac | 48,365 |
def percent(amount, total):
"""
>>> percent(512, 4096)
12.5
>>> percent(811, 700)
100
>>> percent(-7, 91)
0
"""
return (amount / total) * 100 | 898d1417e5cbadc97ed30babeb42d69c48096c5d | 48,367 |
import os
def _put_in_artifact_path(environment, filename):
"""Determines the artifact path for a file with the specified name
@param environment Environment for which the artifact path will be determined
@param filename Filename for which the artifact path will be returned
@returns The artifact path for a file with the specified name"""
artifact_directory = os.path.join(
environment['ARTIFACT_DIRECTORY'],
environment.get_build_directory_name()
)
return os.path.join(artifact_directory, filename) | 722c1aa846a227fc67d0e82a0ee9ece7499039e1 | 48,368 |
def fit_in_range(value, min_value, max_value):
"""Make the given value fit into the range min_value..max_value"""
if value < min_value:
value = min_value
elif value > max_value:
value = max_value
return value | e12de739061ce1c6b8ca72502a15cafd58b56555 | 48,369 |
def permute(self, *dims):
"""
Permute function
"""
return self.transpose(dims) | 52477edab7e059315cfee9709544df01f6d9c3d0 | 48,371 |
def combmir(p, boxl):
"""Make lists for calculating naked and hidden combination."""
comb = []
mirror = []
for bo in boxl:
co = []
mi = []
for i in bo:
c = ()
for j in range(9):
if p[i][j] == 1:
c = c + (j + 1,)
co.append(c)
comb.append(co)
n = []
for i in co:
for j in i:
if j not in n:
n.append(j)
n = sorted(n)
for i in n:
m = ()
for j in range(len(co)):
if i in co[j]:
m = m + (j,)
mi.append(m)
mirror.append([n, mi])
return (comb, mirror) | 44754929bf3cfc1afa4eb74e8505fc449b097eec | 48,372 |
def transpose_list(list_of_dicts):
"""Transpose a list of dicts to a dict of lists.
:param list_of_dicts: to transpose, as in the output from a parse call
:return: Dict of lists
"""
res = {}
for d in list_of_dicts:
for k, v in d.items():
if k in res:
res[k].append(v)
else:
res[k] = [v]
return res | 48c0213992f1c0a3d542f5631bad1522a096ab14 | 48,373 |
def crop_shifted_images(images, shift):
"""
Crop images according to x/y shifts.
"""
x_shift, y_shift = shift
# Calculate the minimum and maximum horizontal shift
if x_shift < 0:
images = images[:, :x_shift]
elif x_shift > 0:
images = images[:, x_shift:]
if y_shift < 0:
images = images[:, :, :y_shift]
elif y_shift > 0:
images = images[:, :, y_shift:]
return images | 6a625865220daec857de039cda478b71b51906ee | 48,374 |
def get_cond_latents_at_level(cond_latents, level, hparams):
"""Returns a single or list of conditional latents at level 'level'."""
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
return cond_latents[level] | 6a2184d059753b8a946ec24bf4e1bbff6efe9910 | 48,375 |
def prepare_fetch_incidents_query(fetch_timestamp: str,
fetch_severity: list,
fetch_table: str,
fetch_subtype: list,
fetch_fields: str,
fetch_limit: str) -> str:
"""
Prepares the SQL query for fetch incidents command
Args:
fetch_limit: Indicates how many incidents should be queried
fetch_timestamp: The date from which threat logs should be queried
fetch_severity: Severity associated with the incident.
fetch_subtype: Identifies the log subtype.
fetch_table: Identifies the fetch type.
fetch_fields: Fields to fetch fro the table.
Returns:
SQL query that matches the arguments
"""
query = f'SELECT {fetch_fields} FROM `{fetch_table}` ' # guardrails-disable-line
query += f'WHERE time_generated Between TIMESTAMP("{fetch_timestamp}") ' \
f'AND CURRENT_TIMESTAMP'
if fetch_subtype and 'all' not in fetch_subtype:
sub_types = [f'sub_type.value = "{sub_type}"' for sub_type in fetch_subtype]
query += f' AND ({" OR ".join(sub_types)})'
if fetch_severity and 'all' not in fetch_severity:
severities = [f'vendor_severity.value = "{severity}"' for severity in fetch_severity]
query += f' AND ({" OR ".join(severities)})'
query += f' ORDER BY time_generated ASC LIMIT {fetch_limit}'
return query | a68e0c9bac3d6ef451d772bffbc0fd43e23b7360 | 48,376 |
def fancy_retrieve(broker, num_messages=1):
"""
Fancy wrapper that combines several operations to retrieve a message from the message broker.
:param broker:
The Broker instance
:param num_messages:
The number of messages to retrieve from the queue
:rtype:
list
:return:
The list of messages kept in the internal message buffer
"""
broker.connect()
broker.subscribe()
messages = broker.pop_messages(num_messages)
broker.disconnect()
return messages | 24668471841a3bb71691d2dcd3d715bd97626e53 | 48,377 |
import subprocess
def sysctl(command):
"""Run a sysctl command and parse the output.
Args:
command: A sysctl command with an argument, for example,
["sysctl", "hw.memsize"].
Returns:
The parsed output.
"""
out = subprocess.check_output(command)
result = out.split(b" ")[1]
try:
return int(result)
except ValueError:
return result | 8ce02d8fdbdce0f8d7b572e789ab89bf66eeb7c7 | 48,380 |
def HasLib(parsed, name, version=None):
"""Check if the parsed yaml has specified the given library.
Args:
parsed: parsed from yaml to python object
name: str, Name of the library
version: str, If specified, also matches against the version of the library.
Returns:
True if library with optionally the given version is present.
"""
libs = parsed.libraries or []
if version:
return any(lib.name == name and lib.version == version for lib in libs)
else:
return any(lib.name == name for lib in libs) | 67b1fbf0823eeaaaf666fe6f08af817f622e19b4 | 48,381 |
from typing import Tuple
def find_group(line: str, start: int) -> Tuple[str, int]:
"""Capture the group of non tabs character
return the capture group and the end position of the group"""
end = start
length = len(line)
while end < length and line[end].isalnum():
end += 1
return line[start:end], end | dd53429d90265df8da7aea215516eb9ad58cc0b0 | 48,382 |
import requests
def robot(access_token, content):
"""
群机器人
https://developers.dingtalk.com/document/robots/custom-robot-access
:param str access_token:
:param str content:
:return:
"""
r = requests.post(
f'https://oapi.dingtalk.com/robot/send?access_token={access_token}',
json={
'msgtype': 'text',
'text': {
'content': content
}
}
)
return r.json() | 735a22e087f977d71b8169ade9b02eae1e737fd1 | 48,383 |
def find_biggest_value_per_day(day_data):
"""
Take pressure data per day and find
biggest value.
If some systolic and other systolic equal,
compare by diastolic
"""
values = [(data[2], data[3]) for data in day_data]
systolic, diastolic = max(values)
return systolic, diastolic | f46a866809278b95851c8583a1682de58749a64f | 48,384 |
def db_row_count(cursor, schema_table):
"""
:param cursor: Postgres formatted database connection string
:type cursor: psycopg2.cursor
:param schema_table: schema.table
:type schema_table: str|unicode
:return: number of rows in the specified schema.table
:rtype: int
"""
query = """SELECT COUNT(1) FROM %s;""" % schema_table
cursor.execute(query)
result = cursor.fetchall()
return int(result[0][0]) | 7c8547940233fb9b6bde894badf8096938ed7892 | 48,385 |
def analyzed_function(tweet):
""" dummy filtering function """
try:
if len(tweet['actor']['preferredUsername']) > 7:
return True
else:
return False
except KeyError:
return False | 970582f1951536a805a52a38d6798a6d95a19c2d | 48,386 |
def datasetsListIntoTree(dList):
"""Transforms list of datasets into a neested dict where keys are year and quartal"""
d = {}
for rec in dList:
if rec.quartal:
if rec.year not in d:
d[rec.year] = [None] * 4
d[rec.year][rec.quartal - 1] = rec
else:
d[rec.year] = rec
return d | 37a9ea05d919d02ff5bcf3844ee9df78ea2ba040 | 48,387 |
def show_available_province(instance):
"""
Show all the available provinces/state
:param instance: class instance
:return: dict
"""
regions = []
for _item in instance._actions_files.keys():
reader = instance.get_reader(_item)
for row in reader:
province = dict(row).get("Province/State")
if province and (province not in regions):
regions.append(province)
return regions | a609435839b134bc6e61add2a74a3f5b6eb54f78 | 48,388 |
def dict_merge(dictA, dictB, path=None):
""" Recursively Merge dictionary dictB into dictA
DictA represents the data extracted by a plugin and DictB
represents the additional site config dictionary that is passed
to CLI. The merge process compares the dictionary keys and if they
are same and the values they point to are different , then
dictB object's value is copied to dictA. If a key is unique
to dictB, then it is copied to dictA.
"""
if path is None:
path = []
for key in dictB:
if key in dictA:
if isinstance(dictA[key], dict) and isinstance(dictB[key], dict):
dict_merge(dictA[key], dictB[key], path + [str(key)])
elif dictA[key] == dictB[key]:
pass # values are same, so no processing here
else:
dictA[key] = dictB[key]
else:
dictA[key] = dictB[key]
return dictA | b6b44cdbae8e7bf72a2b056d6cc576d2c49a40d8 | 48,389 |
import hashlib
def sha256_hex(hash_input):
""" Return 64-character hex representation of SHA256 of input.
Args:
hash_input (obj): a python object having a string representation.
Returns:
length-64 hexadecimal string representing result of applying
SHA256 to utf-8 encoding of string representation of input.
Example:
>>> sha256_hex("abc")
'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad'
"""
return hashlib.sha256(str(hash_input).encode('utf-8')).hexdigest() | 1261bbe0b9a208653ab7b069b4d6a64c24c9b57c | 48,390 |
def jogador_jogada():
"""Função utilizada para a definição da linha e da coluna aonde o jogador ira jogar no tabuleiro
sem parâmetros
return: um dicionário com o valor da linha e da coluna definidos pelo úsuario"""
# Definição da Linha pelo Jogador
while True:
linha = int(input('Escolha a linha em que você vai jogar: '))
if 0 <= linha <= 2:
break
print("\033[31mErro!! linha ínvalida, tente novamente\033[m")
# Definição da Coluna pelo Jogador
while True:
coluna = int(input('Agora, escolha a coluna: '))
if 0 <= coluna <= 2:
break
print("\033[31mErro!! coluna ínvalida, tente novamente\033[m")
return dict(l=linha, c=coluna) | b2763d4fa93d88efa58b31fe237fb05b3d19716e | 48,391 |
import shutil
def tpl_repo(request, tmp_path, tpl_path):
"""
Creates a temporary folder containing a set of test templates.
The templates to copy can be passed in by using the tpl_repo_contents mark.
Used for testing installation of template directories.
"""
repo_dir = tmp_path / "template_repository"
repo_dir.mkdir()
marker = request.node.get_closest_marker("tpl_repo_contents")
templates = ["hello_world", "test"]
if marker is not None:
templates = [a for a in marker.args]
for tpl in templates:
tpl_dir = tpl_path / tpl
if tpl_dir.is_dir():
shutil.copytree(tpl_dir, repo_dir / tpl)
return repo_dir | 7299c86555595a063d9d043fcf1c5ebf7c5c4c6f | 48,392 |
import os
import shutil
import zipfile
def decompress_files(reports):
"""this method decompress zip files"""
reps_to_ret = []
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
dir_path = os.path.join(workpath, './static/tmp')
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
for report in reports:
if isinstance(report,str):
rep_name = report
else:
rep_name = report.name
if rep_name.endswith('zip'):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with zipfile.ZipFile(report, 'r') as zip_ref:
zip_ref.extractall(dir_path)
if os.path.exists(dir_path):
files = os.listdir(dir_path)
reps_to_ret.extend(files)
else:
reps_to_ret.append(report)
return reps_to_ret | 7f25e2c5f4d673b9c0cd7b89046d66d35e3dd6a9 | 48,394 |
def get_base_name(full_name: str) -> str:
"""Extract the base name of a package.
Args:
full_name: Full name of the package of interest, e.g., pandas.testing
Returns:
Base name of the provided package, e.g., pandas
"""
return full_name.split('.')[0] | 5a9fb854021fd878afe729dd938a4415b4ad39b5 | 48,395 |
def project_directory(packaged_scene, package_root, source_scene):
"""
Project directory to use in scene settings
Returns:
Project directory str
"""
return " project_directory \"[python nuke.script_directory()]\"\n" | 21e6ea43cacadd9adaa0ef760b34384be770562a | 48,397 |
import argparse
def parseArgs():
"""parse command line arguments
Returns:
dictionary of parsed arguments
"""
scriptname = "calculate_emr_cost.py"
parser = argparse.ArgumentParser(scriptname)
parser.add_argument('-c','--cluster-id',dest="clusterid",required=True,
help='Provide the emr cluster-id to calculate the total cost')
return(vars(parser.parse_args())) | a9973e716e720e646f5240e3ec3fb8bba55361ae | 48,398 |
def color_str_yellow(s):
"""Color string YELLOW for writing to STDIN."""
return "\033[93m{}\033[00m".format(s) | 4c02d3a111da7213dd887bb168f7f77b0b733f41 | 48,399 |
from typing import List
def __check_request(method: str = "") -> str:
"""
检查返回的错误信息是否合规则
:param request: 返回的请求地址
:return: 如果请求的地址为空,则返回空字符串
"""
methods: List[str] = ["get", "post", "put", "patch", "delete", "*"]
request: str = method.lower()
request = request.strip()
if len(request) == 0:
return ""
for method in methods:
if request.startswith(method):
request = request.replace(method, method.upper())
break
else:
request = "GET {}".format(request)
return request | 968772dd83425d73d4c5dfe97ebacfd2af235112 | 48,400 |
import json
def _getappconfig(instance, cmd_args, rc):
"""get an appconfig"""
config_name = cmd_args.config_name
configs = instance.get_application_configurations(name = config_name)
# Check if any configs by that name
if (not configs):
raise NameError("No application configuration by the name {}".format(config_name))
config = configs[0]
config_props = config.properties
if not config_props:
raise Exception("The {} application configuration has no properties defined".format(config_name))
for key, value in config_props.items():
try:
json_value = json.loads(value)
json_value = json.dumps(json_value, indent=2)
except ValueError:
json_value = value
print(key + "=" + json_value)
return (rc, config_props) | c9dbc5fa4f6b9b26c3bbd5798bfe6c3b226aaf41 | 48,401 |
def discern_possession_and_margin(event, current_margin, home_has_ball):
""" This function determines whether
or not the home team has the ball
@param event (pd.Series): pandas Series
containing a row from a play-by-play
DataFrame
@param current_margin (int): Current score margin
relative to the home team
@param home_has_ball (bool): True if the home
team has possession, False otherwise
Returns:
home_has_ball (bool): True if the home
team has possession, False otherwise
current_margin (int): Current score margin
relative to the home team
"""
# Determine possession
home_desc = (event['HOMEDESCRIPTION'] != None)
visitor_desc = (event['VISITORDESCRIPTION'] != None)
if home_desc and not visitor_desc:
home_has_ball = True
elif visitor_desc and not home_desc:
home_has_ball = False
elif home_desc and visitor_desc:
if ('STEAL' in event['HOMEDESCRIPTION']) \
or ('BLOCK' in event['HOMEDESCRIPTION']):
home_has_ball = True
else:
home_has_ball = False
# Determine score margin
if event['SCOREMARGIN'] != None:
current_margin = 0
if event['SCOREMARGIN'] != 'TIE':
current_margin = (int(event['SCOREMARGIN']))
return home_has_ball, current_margin | 4b53c16f322caa6077e81e6929629db551b4f410 | 48,402 |
from typing import Optional
import ssl
def get_verify_ssl(ssl_context: Optional[ssl.SSLContext]) -> bool:
"""
NETunnel expects an SSLContext object and None considered "True" while JWThenticator expects a boolean.
We "degrade" the SSLContext to boolean value to support JWThenticator's expected value
"""
if ssl_context is False or (isinstance(ssl_context, ssl.SSLContext) and ssl_context.verify_mode is ssl.VerifyMode.CERT_NONE):
return False
return True | a7d0cf7b3e42d74b63abdcae4d5dd2a685c2a634 | 48,403 |
def fib_iter(n: int):
"""迭代解法,自低向上,迭代时只需要最近两个值,可以进一步优化,减少空间"""
if n < 3:
return 1
a, b, i = 1, 1, 3
while i <= n:
a, b = b, a + b
i += 1
return b, a, b | ffde4123c07ddcc840e4fe892b58a367459872d1 | 48,404 |
def cyclic_fetch_elements_in_array(array, start_index, searchable_size):
"""
Fetch elements without worrying about reaching the end of the array
Args:
array (list of Any): anything in the form of array, can be an array of ADT
start_index (int): the starting index to slice from
searchable_size (int): the number of elements included from start_index
Returns:
list of Any
"""
array_length = len(array)
# Determine if start_index + searchable_size will cause an overflow, and if so,
# calculate how many elements will exceed.
overflow_n = start_index + searchable_size - array_length
# If it is larger than 0, that means we have an overflow
if overflow_n > 0:
# We need to return 2 concatenated arrays:
# 1. Elements from the current index to the maximum length of the array
# 2. Elements from the start to the overflow_n
return array[start_index:array_length] + array[0:overflow_n]
else:
# Else, return elements as usual using slicing
return array[start_index:(start_index + searchable_size)] | 663b2fbbd576d926853e805715a7164b015e6c05 | 48,405 |
def filename_from_string(string):
"""Create filename from unsafe string."""
keepcharacters = (" ", ".", "_")
return "".join(c for c in string if c.isalnum() or c in keepcharacters).rstrip() | 99e3d5312e6c99cd93098208620da90becca2950 | 48,406 |
def value_for_key(data, keypath, default=None, exception_on_miss=False):
"""Returns the value at the given *keypath* within :attr:`values`.
A key path is a list of components delimited by dots (periods). The components are interpreted
as dictionary keys within the structure.
For example, the key path ``'a.b'`` would yield ``'c'`` with the following :attr:`values` dictionary: ::
{'a':{'b':'c'}}
If the key path does not exist *default* will be returned.
"""
v = data
for component in keypath.split('.'):
if v != None and hasattr(v,'has_key') and v.has_key(component):
v = v[component]
else:
if(exception_on_miss):
raise KeyError, "Could not locate required tag: '%s'" % component
v = default
return v | f563fd845d6138d8651caf8780a1089b2b2d99f9 | 48,407 |
def is_json(input_file):
"""
Check if the file is in JSON format.
The function reads the first character of the file, and if it is "{" then returns True.
:param input_file: file name (string)
:return: Boolean.
"""
with open(input_file) as unknown_file:
c = unknown_file.read(1)
if c == '{':
return True
return False | 9213688ec0fc6d0b659360ba37dc5c740886e571 | 48,408 |
def make_increasing(sequence: list) -> list:
"""
Used to force the Confidence Intervals generated for DELPHI to be always increasing
:param sequence: list, sequence of values
:return: list, forcefully increasing sequence of values
"""
for i in range(len(sequence)):
sequence[i] = max(sequence[i], sequence[max(i-1, 0)])
return sequence | 09d07ef3f9930768a336bbbb9e5623ec3e2db043 | 48,411 |
from pathlib import Path
from datetime import datetime
def detect_new_day(lock_file: Path = Path('.daylock')):
"""
Return true when executed on a different day than the last time.
It uses a file (lock_file) to keep track of the last day
"""
day_of_the_year = datetime.now().timetuple().tm_yday
new_day = True
if lock_file.exists():
with open(lock_file, 'r') as file:
lock = int(file.read())
new_day = lock != day_of_the_year
if new_day:
with open(lock_file, 'w') as file:
file.write(str(day_of_the_year) + '\n')
else:
with open(lock_file, 'w') as file:
file.write(str(day_of_the_year) + '\n')
return new_day | a56ec8cca97ee423b163d0587d0cb1d90371b17a | 48,414 |
from typing import List
from typing import Dict
import collections
def ip_stats(ips: List[Dict]) -> str:
""" Format and return pretty string from `ips`. """
hist: Dict[str, int] = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" | 516817a71c06bd4d9226c9d29d90fa2ef9917987 | 48,415 |
from typing import Iterable
import pathlib
from typing import Sequence
def get_modified_files(
files: Iterable[pathlib.Path], *, since: float
) -> Sequence[pathlib.Path]:
"""Return the files that were modified since the given timestamp."""
return [f for f in files if f.stat().st_mtime > since] | 1576da300654d6f4aa9dbc331fc23740bc25dd7c | 48,416 |
def anomaly(idata):
"""calculate anomaly"""
clim=idata.groupby('time.month').mean(dim='time')
anom=idata.groupby('time.month')-clim
return anom | 6bb88c6d3f7114ed1d715c2f11029a314a231ee7 | 48,417 |
def crop_like(input, target):
"""Crop Tensor based on Another's Size.
Args:
input (torch.Tensor): Input Tensor
target (torch.Tensor): Tensor to Compare and Crop To.
Returns:
torch.Tensor: Cropped Tensor
"""
if input.size()[2:] == target.size()[2:]:
return input
else:
return input[:, :, : target.size(2), : target.size(3)] | 8cc422d1789446f39b8d22b74757f0e7e8fca206 | 48,418 |
import json
def getSetting(name):
"""获取策略基础配置"""
setting_c = {}
settingFileName = './CTA_setting1.json'
with open(settingFileName) as f:
l = json.load(f)
for setting in l:
if setting['name'] == name:
setting_c = setting
setting_c[u'backtesting'] = True
return setting_c | c44631aa500731db2b2b322c81fb7aad800841e0 | 48,419 |
def get_recursive_content_as_str(doc):
"""
THIS METHOD IS DEPRECATED!
"""
text = ''
if isinstance(doc, str):
return doc.strip() + '\n'
elif isinstance(doc, dict):
for key in doc:
text += get_recursive_content_as_str(doc[key])
elif isinstance(doc, list):
for t in doc:
text += get_recursive_content_as_str(t)
else:
raise Exception('cannot parse document recursively, ' + str(type(doc)))
return text | d16d66fcd9ac50b44ce4f92b8486f4fa0234a506 | 48,421 |
def share_percentage(df, columns):
"""
This function shows the balance in each column of dataframe.
For each column, it returns the list of unique values together with
the percentange of each value in the dataset.
"""
shares_cols = dict()
nrowss = len(df)
for col in columns:
unq_vals = df[col].unique()
shares = dict()
for val in unq_vals:
shares[val] = round(len(df[df[col] == val]) / nrowss * 100.0, 3)
shares_cols[col] = shares
return shares_cols | b95ace6ae03d29b5bfaf0958719586013f435b2d | 48,423 |
def is_suffix_of(suffix, trace) -> bool:
"""
Args:
suffix: target suffix
trace: trace in question
Returns:
True if suffix is the suffix of trace.
"""
if len(trace) < len(suffix):
return False
else:
return trace[-len(suffix):] == suffix | aee62450ac20847dece9debc189cf2948f45a0c0 | 48,425 |
def edit_distance(s: str, t: str):
"""
Return the edit distance between the strings s and t.
The edit distance is the sum of the numbers of insertions, deletions,
and mismatches that is minimally necessary to transform one string
into the other.
"""
m = len(s) # index i
n = len(t) # index j
costs = list(range(m + 1))
for j in range(1, n + 1):
prev = costs[0]
costs[0] += 1
for i in range(1, m + 1):
match = int(s[i - 1] == t[j - 1])
c = min(
prev + 1 - match,
costs[i] + 1,
costs[i - 1] + 1,
)
prev = costs[i]
costs[i] = c
return costs[-1] | 9c9feaf097b5297370f335bea158be300741792d | 48,427 |
def getAttrText(elem,attr):
"""
Return text value from a named attribute of the supplied element node.
"""
return elem.getAttribute(attr) | 9229451a2874b17d365ed02fd5b5f841ea920256 | 48,428 |
import requests
def get_page(url):
"""Gets HTML for a specific page."""
try:
r = requests.get(url)
return r.text
except requests.exceptions.MissingSchema as _:
print(f"Incorrect url missing schema, skipping {url}")
return "" | 90a2a37195bf8be09a93905e4869d45993c4d9ab | 48,429 |
def fasta_to_histo(fastalines):
"""Reads fastaline tuples as produced by read_fasta(...) and retuns a histogram (a list) of
dict(tally=..., at_bases=..., gc_bases=...)
"""
res = list()
for fline in fastalines:
if fline.length > len(res) - 1:
res.extend( dict(tally=0, at_bases=0, gc_bases=0) for _ in range(len(res) - 1, fline.length) )
res[fline.length]['tally'] += 1
res[fline.length]['at_bases'] += fline.at_bases
res[fline.length]['gc_bases'] += fline.gc_bases
return res | 2ca79cd3731957ecb08266764dba656410ae45a9 | 48,430 |
def generate_annotation(x, y, text, color='black', fsize=12, textangle=0, xref='paper',
yref='paper', align='left'):
"""
Generates a plotly annotation in the form of dictionary.
It can be directly be appended to the layout.annotations.
:param x: x coordinate in plot
:param y: y coordinate in plot
:param text: text content
:param color: color of text
:param fsize: font size
:param textangle: angle of text
:param xref: reference to 'paper'/canvas or 'plot'/axis coordinates, more details in `plotly` documentation
:param yref: reference to 'paper'/canvas or 'plot'/axis coordinate, more details in `plotly` documentation
:return: The annotation object to append in the `layout` of the figure.
"""
res = dict(
y=y,
x=x,
showarrow=False,
textangle=textangle,
text=text,
font=dict(
color=color,
size=fsize
),
align = align,
xref=xref,
yref=yref
)
return res | 32465c1dd7d5aee55107b468beb27be6cbf313c5 | 48,431 |
def filterDataFrameByValue(df,column,argument):
""" Returns a subset of a GeoPandas GeoDataframe
Currently only works for single instances of categorical variables. For more
complicated cases, either code directly or update this function
Args:
df (GeoPandas DataFrame): The dataframe to be filtered
column (str): The string name of the dataframe column to be filtered
argument(var): The value determining which rows to return
Returns:
filteredDF (GeoPandas DataFrame): A filtered copy of the original df
Raises:
None
Tests:
None
"""
filteredDF = df[df[column]==argument]
return filteredDF | a402fcc7b94866641d92636c545bf29712191547 | 48,433 |
import torch
def _check_singular_jacobian(x):
"""Check for singular scalars/matrices in batch and replace with NaNs."""
bad_idxs = torch.isclose(torch.linalg.det(x), torch.zeros_like(x[:, 0, 0]))
if bad_idxs.any():
print(
f'🔔 Found {bad_idxs.sum()} singular Jacobian(s) in current batch during root-finding. Inserted NaNs so optimizer can skip batch.'
)
x[bad_idxs] = float('NaN')
return x | a64a6a8472aee96dc48929f2234a46f31c2333ed | 48,434 |
def find_defining_class(obj, meth_name):
"""find and return the class object that will provide
the definition of meth_name (as a string) if it is
invoked on obj.
"""
for ty in type(obj).mro():
if meth_name in ty.__dict__:
return ty
return None | b53dc6cad40fa8bf48f66fe721d5d5e230a21621 | 48,435 |
import math
def getEndTime(datetime, duration):
"""
Gets end time of a free date suggestion using starting datetime and
duration
:param datetime: Starting datetime
:type datetime: datetime
:param duration: Duration in minutes
:type duration: int
:return: End datetime
:rtype: datetime
"""
# round duration minutes to next 15
duration = int(math.ceil(float(duration) / 15.)) * 15
durationhour = int(duration / 60)
durationminute = duration % 60
newEndHour = datetime.hour + durationhour
newEndMinute = durationminute + datetime.minute
while newEndMinute >= 60:
newEndHour += 1
newEndMinute = newEndMinute % 60
return datetime.replace(hour=newEndHour, minute=newEndMinute) | 12ce0d821740eaae3b489375400ea2617fc42b87 | 48,436 |
import argparse
def argparser():
"""
Returns an argument parser for the script.
"""
ap=argparse.ArgumentParser(description="Replaces Unicode characters in input text with ASCII approximations.")
ap.add_argument('-d', '--directory', default=None, help="Directory for output (stdout by default)")
ap.add_argument('-v', '--verbose', default=False, action='store_true', help="Verbose output")
ap.add_argument('file', nargs='+', help='Input text file')
return ap | 7cecc4d3327b2d436b6114d66e04903f33e842e2 | 48,437 |
def longest_substring(number: str):
"""returns 00 if found none
fun fact - 100 000 iteration per second for strings of len == 30"""
result = "00"
cache = []
last_odd = False
last_even = False
for digit in number:
if int(digit) % 2:
if last_odd:
cache_str = "".join(cache)
if len(cache_str) > len(result):
result = cache_str
cache.clear()
cache.append(digit)
last_even = False
last_odd = True
else:
if last_even:
cache_str = "".join(cache)
if len(cache_str) > len(result):
result = cache_str
cache.clear()
cache.append(digit)
last_even = True
last_odd = False
cache_str = "".join(cache)
if len(cache_str) > len(result):
result = cache_str
return result | bef5877cdfda45f203bd595d0a1f2757aed86f5c | 48,439 |
def has_main_loop(f):
"""
Simple test to see if a py-file has a method called 'main_loop'
"""
if not f.lower().endswith(".py"):
return False
try:
descriptor = open(f)
except:
return False
try:
while True:
try:
line = descriptor.readline()
except:
line = None
if not line:
break
if line.startswith("def main_loop():"):
descriptor.close()
return True
descriptor.close()
return False
except:
descriptor.close()
return False | 52ba03e94b686c952a81e1dc5b8761abc1326a64 | 48,442 |
import argparse
def parse_input():
"""
Parses the command line input arguments.
Parameters
----------
None.
Returns
----------
args: Dictionary. Required.
Dictionary of arguments from the ``argparse`` package.
Dictionary is keyed by the argument name (e.g., args['fname_in']).
"""
parser = argparse.ArgumentParser()
# Taken from
# https://stackoverflow.com/questions/24180527/argparse-required-arguments-listed-under-optional-arguments
# Normally `argparse` lists all arguments as 'optional'. However some of
# my arguments are required so this hack makes `parser.print_help()`
# properly show that they are.
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument("-d", "--date", dest="date",
help="Date for the meeting. 'DD/MM/YYYY'",
type=str)
optional.add_argument("-f", "--fname_in", dest="fname_in",
help="Filename for the HDF5 data file containing "
"the cities, the institutes at each city and the "
"groups at each institute. "
"Default: './data/astro3d_data.hdf5'",
default="./data/astro3d_data.hdf5", type=str)
args = parser.parse_args()
args = vars(args)
return args | 56f902995d7428af3f2e066a2254b4409e7f303f | 48,446 |
def eager_callback(callback):
"""Warpper for executing callbacks of eager options.
Parameters
----------
ctx : :py:class:`click.Context`
Context object.
value : any
Value.
*args :
Positional arguments passed to the callback.
**kwds :
Keyword arguments passed to the callback.
"""
def callback_wrapper(ctx, param, value, *args, **kwds):
"""Wrapped callback."""
if not value or ctx.resilient_parsing:
return
callback(*args, **kwds)
ctx.exit()
return callback_wrapper | a4dfc99ae4c49afff9738a27b3b38fc824616120 | 48,448 |
def _action_message_helper(obj_cls, n_updated, message=None,
force_plural=False):
"""Helper function for getting notification messages after actions"""
if n_updated == 1 and not force_plural:
ret = "1 %s " % obj_cls._meta.verbose_name.title()
else:
ret = "%s %s " % (n_updated, obj_cls._meta.verbose_name_plural.title())
if not message:
message = "was updated successfully."
if not message.endswith('.'):
message = "%s." % message
return "%s %s" % (ret, message) | 9df14c0dccf5c47903ef270bad92c2d590d7a8a8 | 48,449 |
def region_matches_partition(region, partition):
"""
Returns True if the provided region and partition are a valid pair.
"""
valid_matches = [
("cn-", "aws-cn"),
("us-gov-", "aws-us-gov"),
("us-gov-iso-", "aws-iso"),
("us-gov-iso-b-", "aws-iso-b"),
]
for prefix, expected_partition in valid_matches:
if region.startswith(prefix):
return partition == expected_partition
return partition == "aws" | 9b6cf3fe7584b7c9ff3fed56b3ec2f0ad3a3e024 | 48,450 |
def op_pop(stack):
"""Helper method test if stack is empty and raises syntax error if it is"""
if stack.isEmpty():
raise SyntaxError()
return stack.pop() | c6f4586d7d4783f33d1aa4945219546ed5803b6b | 48,452 |
from typing import OrderedDict
def _sort_values_by_step(vals_dict):
"""Sorts values dictionary by step
Args:
vals_dict (dict): a values dictionary as created by extract_values
Returns:
dict: values dictionary with sorted steps
"""
for k, v in vals_dict.items():
vals_dict[k] = OrderedDict(sorted(v.items()))
return vals_dict | 087232b8766b420552d1b0adaae27471ba3e8d6c | 48,453 |
from typing import Tuple
from typing import Optional
def split_package(name: str) -> Tuple[Optional[str], str]:
"""
Return (package, name) given a fully qualified module name
package is ``None`` for toplevel modules
"""
if not isinstance(name, str):
raise TypeError(f"Expected 'str', got instance of {type(name)!r}")
if not name:
raise ValueError(f"Invalid module name {name!r}")
name_abs = name.lstrip(".")
dots = len(name) - len(name_abs)
if not name_abs or ".." in name_abs:
raise ValueError(f"Invalid module name {name!r}")
package, _, name = name_abs.rpartition(".")
if dots:
package = ("." * dots) + package
return (package if package != "" else None), name | d46eaf6960a238a9aa9cc600202860c3a965ae01 | 48,454 |
import argparse
from typing import List
def add_special_targets(subparsers, common: argparse.ArgumentParser) -> List[str]:
"""Add in generate and purge commands
Args:
subparsers: subparsers used to create new subparsers
common: common parser
Returns:
["generate", "purge"] list of special targets
"""
generate_parser = subparsers.add_parser(
"generate",
help="Generate a build cache directory. Defaults to generating a release build cache",
parents=[common],
add_help=False,
)
generate_parser.add_argument(
"-Dxyz",
action="append",
help="Pass -D flags through to CMakes",
nargs=1,
default=[],
)
purge_parser = subparsers.add_parser(
"purge",
help="Purge build cache directories",
add_help=False,
parents=[common],
)
purge_parser.add_argument(
"-f",
"--force",
default=False,
action="store_true",
help="Purges the build directory by force. No confirmation will be requested.",
)
return ["generate", "purge"] | dd6dbc6ab53050ddbd28203941fe431978e24a4c | 48,455 |
import torch
def rand_mat_list_like_parameters(problem, V):
"""Create list of random matrix with same trailing dimensions as parameters."""
return [
torch.rand(V, *p.shape, device=problem.device)
for p in problem.model.parameters()
] | 7aa912060339b7512371f266f63dd8bd659c898c | 48,457 |
def apply_smart_metering(
enduse,
fuel_y,
sm_assump,
strategy_vars,
curr_yr
):
"""Calculate fuel savings depending on smart meter penetration.
Change fuel consumption based on smart meter induced general savings.
Arguments
----------
enduse : str
Enduse
fuel_y : array
Yearly fuel per fueltype
sm_assump : dict
smart meter assumptions
strategy_vars : dict
Base simulation parameters
Returns
-------
fuel_y : array
New fuel per year
Note
-----
- The smart meter penetration is assumed with a sigmoid diffusion.
- In the assumptions the maximum penetration and also the
generally fuel savings for each enduse can be defined.
"""
# Enduse saving potentail of enduse of smart meter
enduse_savings = sm_assump['savings_smart_meter'][enduse]
# Smart meter penetration in current year (percentage of people having smart meters)
penetration_cy = strategy_vars['smart_meter_p'][curr_yr]
# Smart meter penetration in base year (percentage of people having smart meters)
penetration_by = sm_assump['smart_meter_p_by']
# Calculate fuel savings
saved_fuel = fuel_y * (penetration_cy - penetration_by) * enduse_savings
# Substract fuel savings
fuel_y = fuel_y - saved_fuel
return fuel_y | a0244dff26322181abf63b35f3c8edebe86721c6 | 48,458 |
def has_delays(trip):
"""
Checks ns-trip object and looks if there is a delay in departure and arrival times
"""
t = trip["legs"][0]
result = True
if "actualDateTime" in t["origin"] and "actualDateTime" in t["destination"]:
result = t["origin"]["plannedDateTime"] == t["origin"]["actualDateTime"] and \
t["destination"]["plannedDateTime"] == t["destination"]["actualDateTime"]
return not result | b4fa06ab42f9b716db182850793caded68b56762 | 48,460 |
import os
def get_src_libdir(src_dir, abi):
"""Gets the ABI specific lib directory for an NDK project."""
return os.path.join(src_dir, 'libs', abi) | d58bbd2021a1225ee674c259bd7142992254a766 | 48,461 |
import requests
from bs4 import BeautifulSoup
def get_meaning(word):
"""
Returns a list containg all the meanings of word
Args:
word (str): word for searching the meanings
Returns:
list
"""
url = "https://googledictionary.freecollocation.com/meaning?word=" + word
response = requests.get(url)
soupBody = BeautifulSoup(response.content,features = "html.parser")
text = soupBody.findAll("li", {"style" : "list-style:decimal"})
meanings_with_codes = []
for i in text:
meanings_with_codes.append(i.text)
meanings_raw = []
for i in range(0, len(meanings_with_codes)):
if "-" in meanings_with_codes[i]:
meanings_raw.append(meanings_with_codes[i][:meanings_with_codes[i].index("-")])
else :
meanings_raw.append(meanings_with_codes[i])
meanings_1 = []
for sub in meanings_raw:
meanings_1.append(sub.replace("\n", ""))
meanings = []
for sub in meanings_1:
meanings.append(sub.replace(";", " or"))
return meanings | d405e01198a08833717789b15e7be03909b9b130 | 48,464 |
import codecs
from bs4 import BeautifulSoup
def get_ldc_in_soup(fname, lan='ch'):
"""
:param fname: must be the full file name wit path
:param lan: 'ch' or 'en'
:return: soup object
"""
if lan == 'ch':
fhd = codecs.open(fname, 'rb', encoding='gb2312')
ch_txt = fhd.read()
soup = BeautifulSoup(ch_txt, 'html.parser')
return soup
elif lan == 'en':
fhd = open(fname, 'rb')
en_txt = fhd.read()
soup = BeautifulSoup(en_txt, 'html.parser')
return soup
else:
print('Usage: lan="ch" or "en"')
return | 2510fa034e79d2779a3affcebb9a8fda7cd545c9 | 48,465 |
def utf8_encode(space, input):
""" Convert latin-1 to utf8
"""
return space.wrap(input.decode("latin-1").encode("utf-8")) | 389a38642bfb3fff42748684e3facc4d7297c3c5 | 48,467 |
def partition(lis, predicate):
"""
Splits a list into two lists based on a predicate. The first list will contain
all elements of the provided list where predicate is true, and the second list
will contain the rest
"""
as_list = list(lis)
true_list = []
false_list = []
for l in as_list:
pred_value = predicate(l)
if pred_value is True:
true_list.append(l)
elif pred_value is False:
false_list.append(l)
else:
raise Exception("Invalid predicate")
return true_list, false_list | 0d1cdb7e410ccce46c02209df83d16d4f9823a2d | 48,468 |
def de_digits_for_type(maxvalue, base):
"""Find the max no of digits needed to represent a value in a given base
Keyword arguments:
maxvalue -- the maximum decimal value possible for the data type
base -- the base to which the values would be converted"""
digits = 0
while maxvalue > 0:
digits = digits + 1
maxvalue = int(maxvalue / base)
return digits | 70961e19823fbde1c82589903f3f6f81668e5e04 | 48,469 |
def saludar(nombre):
"""Funcion que recibe un nombre y regresa su saludo"""
return 'Hola {}, buenos dias.'.format(nombre) | 40e400dbcc20d15fb0f2079532486dd476fddf1e | 48,470 |
from typing import Optional
from typing import List
import os
def read_pages(fn: Optional[str] = None) -> List[str]:
"""Read all text files in the data/dict directory,
return as single list of all lines."""
base_path = "data/dict/"
files = sorted(os.listdir(base_path))
result = []
for file in files:
if fn and file != fn:
continue
fp = os.path.join(base_path, file)
if os.path.isfile(fp) == False:
continue
if file.endswith(".txt") == False:
continue
with open(fp, "r") as file:
file_contents = file.read()
lines = file_contents.split("\n")
for ln in lines:
# Skip all empty lines and comments
lns = ln.strip()
if not lns or lns.startswith("#"):
continue
result.append(ln)
return result | 80b5efacb76b1fdaac19c0ebd71972db26428034 | 48,472 |
def rank_ordered_neighbors_original(G):
"""Original implementation of rank-ordered number of neighbors."""
return sorted(G.nodes(), key=lambda x: len(list(G.neighbors(x))), reverse=True) | 823ad88d431aacaf3ddb88e7ce7edbd37063beff | 48,474 |
def contiguity(fragment):
""" A vague measurement of the average contiguity from byte to byte.
"""
total_diff = 0
total = 0
for i in range(len(fragment)-1):
total_diff += abs(ord(fragment[i]) - ord(fragment[i+1]))
total += 1
return [total_diff/(total+0.0)] | ef9f4600d6364ad37d5cce1cefe27023e310a5dd | 48,476 |
def modified_T(t, delta, opts):
"""
Computes the modified nonlinear strength for use with Chromaticity correction.
See Webb et al. "Chromatic and Dispersive Effects in Nonlinear Integrable Optics"
for a discussion of the requirements for the transformation of nonlinear strength parameter
t to be applied in the calculation of the adjusted invariant.
Arguments:
t (float): The strength parameter for the nonlinear magnet
delta (float): The relative momentum deviation of the particle.
opts (options.Options): A Synergia options instance specifying the needed phase advance quanties
Returns:
t/correction (float): A corrected effective strength for the nonlinear magnet
"""
mu0 = opts.full_tune
nu0 = opts.tune
Ch = opts.Ch
correction = 1. - (mu0*delta*Ch/nu0)
return t/correction | 77ad9fa5323c94821ded243ac54e112e3ff03702 | 48,477 |
def rescale(volume, min, max):
"""Rescale the values of a volume between min and max."""
factor = float(max - min) / float(volume.max() - volume.min())
return ((volume - volume.min()) * factor) + min | d15ecf01591f90daf9d196cf3beb31295eccaaa2 | 48,478 |
def quickview(pd_db, items=[], add=True):
""" view paperdb with essential columns """
views = ["year", "author1", "author", "title", "journal", "doi"]
if (len(items) > 0) and add:
views = views + items
elif (len(items) > 0) and not add:
views = items
#print('.... columns: {}'.format(views))
return pd_db[views] | c2ee775064e7c1c096cfe9eaf7f6b0d2284d3802 | 48,479 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.