content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import os
def split_path(path):
"""split path tree into list"""
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.insert(0, folder)
else:
if path != "":
folders.insert(0, path)
break
return folders
|
66be3e333214584b97966f1c82f088145d7bc522
| 23,470
|
def get_unique_actions_set(classifier_set):
"""Returns a set containing the unique actions advocated by the
classifiers in the classifier set.
"""
unique_actions = set()
for classifier in classifier_set:
unique_actions.add(classifier.rule.action)
return unique_actions
|
b6274413764bd8f2bac18700556e00e2855a0534
| 23,471
|
def default_pylogger_config(name="ctl"):
"""
The defauly python logging setup to use when no `log` config
is provided via the ctl config file
"""
return {
"version": 1,
"formatters": {"default": {"format": "[%(asctime)s] %(message)s"}},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "default",
"stream": "ext://sys.stdout",
}
},
"loggers": {
name: {"level": "INFO", "handlers": ["console"]},
"usage": {"level": "INFO", "handlers": ["console"]},
},
}
|
b8cb08d7d9bcd0dc28518608b45f398d7ec8f1ae
| 23,472
|
def _first_day(row):
"""
Get the first trading day of week, month, year.
"""
first_dotw = row['dotw'] < row['__prev_dotw__']
first_dotm = row['dotm'] < row['__prev_dotm__']
first_doty = row['doty'] < row['__prev_doty__']
return first_dotw, first_dotm, first_doty
|
e24ab3b633f37b77106275807eb2549aa5b6878a
| 23,474
|
def counter(start=0, step=1, number=1):
"""Возвращает функцию-счётчик.
Счётчик значение, начиная со start, с шагом step.
Приращение значения происходит при
количестве number вызовов функции-счётчика.
"""
i = 0 # Количество вызовов с последнего сброса (!УТОЧНИТЬ!)
count = start # Переменная счётчика
def incrementer():
nonlocal i, count, step, number
i += 1
if i > number:
i = 1
count += step
return count
return incrementer
|
b1e93a1e15b9d70baa35fe3bd2843a8a8e1c44c1
| 23,475
|
import struct
def interpret_header(header, fmt, names):
"""
given a format and header interpret it
"""
values = struct.unpack(fmt, header)
hdr = {}
i = 0
for name in names:
if name in hdr:
if isinstance(values[i], str):
hdr[name] = hdr[name] + values[i]
else:
try:
hdr[name].append(values[i])
except AttributeError:
hdr[name] = [hdr[name], values[i]]
else:
hdr[name] = values[i]
i = i + 1
return hdr
|
fcd26a31b56bb2df58025f7d70686554a162323d
| 23,476
|
def embedded_dg(original_apply):
"""
Decorator to add interpolation and projection steps for embedded
DG advection.
"""
def get_apply(self, x_in, x_out):
if self.discretisation_option in ["embedded_dg", "recovered"]:
def new_apply(self, x_in, x_out):
self.pre_apply(x_in, self.discretisation_option)
original_apply(self, self.xdg_in, self.xdg_out)
self.post_apply(x_out, self.discretisation_option)
return new_apply(self, x_in, x_out)
else:
return original_apply(self, x_in, x_out)
return get_apply
|
b92002f707fbcd402e888a3607312c9d56f1d3af
| 23,477
|
def ascii_to_hex(__ascii):
"""
translates ASCII string into an array of hex ASCII codes
"""
return [hex(ord(c)).replace('0x', '') for c in __ascii]
|
433b731ee1aad51757b2e0e4a0781c565c54eea4
| 23,481
|
def generate_date_lookup():
""" Generate a table to map dates """
res = {}
_month_lookup = {
'Jan' : '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04',
'May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08',
'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12' }
month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
for m in month:
for d in range(1, 32):
o_date = m + " " + str(d).zfill(2)
res[o_date] = _month_lookup[m] + "-" + str(d).zfill(2)
return res
|
41ea7011409fa3e41e44dbf466f7ff925b421d23
| 23,482
|
def mass_surface_solid(
chord,
span,
density=2700, # kg/m^3, defaults to that of aluminum
mean_t_over_c=0.08
):
"""
Estimates the mass of a lifting surface constructed out of a solid piece of material.
Warning: Not well validated; spar sizing is a guessed scaling and not based on structural analysis.
:param chord: wing mean chord [m]
:param span: wing span [m]
:param mean_t_over_c: wing thickness-to-chord ratio [unitless]
:return: estimated surface mass [kg]
"""
mean_t = chord * mean_t_over_c
volume = chord * span * mean_t
return density * volume
|
1bb2e1c571b6b9fee137bcd226d517151dd51fbd
| 23,483
|
def largest(die):
"""Return the largest value die can take on."""
return max(die)
|
17ad51ecb0960d3c83c2c8c5e8e67465046d8745
| 23,484
|
import math
def dist(x1, y1, x2, y2):
"""
Computes Euclidean distance between two points.
Parameters
----------
x1: float
point A x
y1: float
point A y
x2: float
point B x
y2: float
point B y
Returns
-------
float
distance from A to B
"""
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
ea2cf04ec831f0e045407cb7cb2b4c8a68bebf37
| 23,485
|
from typing import Tuple
from typing import Optional
def restore_postgres_db(
backup_file: str, postgres_db: str
) -> Tuple[Optional[str], bytes]:
"""Restore postgres db from a file."""
try:
# restore postgres with pg_restore and terminal
pass
# check if command succeeded
return None, bytes()
except Exception as e:
return f"Issue with the db restore : {e}", bytes()
|
9b9b9a43d97118807d190b1a1f317e73b9445484
| 23,486
|
def LinearVector(slope=0.0, offset=0.0):
"""A point pair set that has points in a straight line."""
return [[float(i), float(i*slope)+offset] for i in (10, 20, 30, 40)]
|
a0354db57886894929a368ece958c60b2e2f4d60
| 23,487
|
def _call_connected(manager, *args, **kwargs):
"""Helper function to get connected status"""
return manager.connected, False
|
0c2971e99e6ad49a6a11742674e5f125aef92b1e
| 23,490
|
def write_outfile(df, abs_filepath, abs_original_data_folder, analysis_folder):
"""
write out the processed ABS data to the ABS data folder and the analysis folder
Parameters
----------
df: pandas dataframe to write out
abs_filepath: Path object of original ABS file
abs_original_data_folder: Path object of path to ABS data folder
analysis_folder: Path to folder containing all NOM unit record parquet files
Returns
-------
None
"""
# ABS NOM filenames are of the type xxxx2018q1.sas...
# Want to extract the date compenent: 2018q1
date_start = abs_filepath.stem.find("2")
if date_start != -1: # if a '2' is found
filename_date = abs_filepath.stem[date_start:]
## append '_p' if it's a preliminary file
if abs_filepath.stem[0] == "p":
filename_date = filename_date + "_p"
else:
raise ValueError(
f"Chris - filename {abs_filepath.stem} does not appear to have a 20XXqY date in it"
)
filename = "traveller_characteristics" + filename_date + ".parquet"
# Write to original ABS folder:
# to keep as history for comparison with updated preliminary/final files
df.to_parquet(abs_original_data_folder / filename)
# Write to folder for analysis
df.to_parquet(analysis_folder / filename)
# if a final file replaces a preliminary file - delete it from the analysis file
if abs_filepath.stem[0] == "f":
preliminary_filename = (
"traveller_characteristics" + filename_date + "_p" + ".parquet"
)
preliminary_path = analysis_folder / preliminary_filename
if preliminary_path.exists():
preliminary_path.unlink()
return None
|
58e5864ae731b646a3843698ef9b6f86649edf61
| 23,491
|
def peaks_inside(comb, peaks):
"""
Check the number of peaks inside comb
:param comb: Tuple of coordinates
:param peaks: List of peaks to check
:return: Int
"""
cpt = []
if len(comb) == 0:
return cpt
for peak in peaks:
if peak > comb[0] and peak < comb[-1]:
cpt.append(peak)
return cpt
|
c176e03a49f0a3b12ab5abac98c055de9ce19e0f
| 23,492
|
def json_pointer(jsonpointer: str) -> str:
"""Replace escape characters in JSON pointer."""
return jsonpointer.replace("~0", "~").replace("~1", "/")
|
a0bd02c5cdc6d97ce76ac38ef0b254f0ed592b43
| 23,493
|
def expand_buses(pins_nets_buses):
"""
Take list of pins, nets, and buses and return a list of only pins and nets.
"""
# This relies on the fact that a bus is an iterable of its nets,
# and pins/nets return an iterable containing only a single pin/net.
pins_nets = []
for pnb in pins_nets_buses:
pins_nets.extend(pnb)
return pins_nets
|
6addae03775fba5b12f5e7c691a5426c12f7e0f7
| 23,494
|
def loss_batch(model, loss_func, x, y, opt=None):
"""
The function to perform backpropagation
:param model: Training model
:param loss_func: Function to calculate training loss
:param x: Feature in train dataset
:param y: Labels in train dataset
:param opt: Optimizer
:return: Loss per epoch, length of the features
"""
loss = loss_func(model(x), y)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(x)
|
a24a379bbfad4db0e7a549f23a9be500cead1610
| 23,496
|
import os
def get_virtualenv_path():
""" check if virtualenv is installed on system """
return os.environ.get('WORKON_HOME', False)
|
00d6879bbfed8f09c271bc3a0609a2f6b41c2c87
| 23,497
|
def XlaLaunchOpCount(labels):
"""Count how many XlaLaunch labels are present."""
return sum("XlaLaunch(" in x for x in labels)
|
e3b083de64bf1627ca98c427a268412cacf9f43b
| 23,499
|
def recurrence_str_to_sec(recurrence_str):
"""Convert recurrence string to seconds value.
Args:
recurrence_str: The execution recurrence formatted as a numeric value
and interval unit descriptor, e.b., 1d for a daily recurrence.
Returns:
Recurrence in seconds or None if input is misformatted.
"""
if not recurrence_str or len(recurrence_str) < 2:
return None
value = int(recurrence_str[:-1])
assert value > 0
unit = recurrence_str[-1]
if unit == 'w':
return 7 * 24 * 60 * 60 * value
elif unit == 'd':
return 24 * 60 * 60 * value
elif unit == 'H':
return 60 * 60 * value
elif unit == 'M':
return 60 * value
else:
return None
|
234ea3ce9d5748ac0afaf38820078ef6566540f5
| 23,501
|
import sys
def get_perc(freq, total):
"""
Get percent
"""
freq = int(freq)
total = float(total)
if freq==0 and total==0:
return 1000
if total==0:
sys.exit("cannot calculate percent as total is 0!")
return freq/total *100
|
fa936c660689da59991c12d54a6b317c156fa596
| 23,502
|
from typing import Dict
from typing import List
import copy
def find_routes(segments: Dict[str, List[str]],
route: List[str], rule: str) -> List[List[str]]:
"""Find complete routes from start to finish."""
routes = []
for next_segment in segments[route[-1]]:
work_route = copy.deepcopy(route)
work_route.append(next_segment)
if next_segment == 'end':
routes.extend([work_route])
elif rule == 'part one': # One small cave can be visited once.
if next_segment[0].islower() and next_segment in route:
continue
routes.extend(find_routes(segments, work_route, rule))
else:
# One small cave can be visited twice,
# Rest of small caves only once.
small_caves = set()
twice_occurrence = False
for cave in work_route:
if cave[0].islower(): # Small cave
if cave not in small_caves:
small_caves.add(cave)
elif twice_occurrence:
break
else:
twice_occurrence = True
else:
routes.extend(find_routes(segments, work_route, rule))
return routes
|
681d960ba2f84376625df7295d8caeec70e7db37
| 23,504
|
def read_parse_sql_file(path_sql_script):
"""
Function to read and parse a sql file
Parameters:
path_sql_script (str): path of the sql script to read and parse
Returns:
(list): list of string of sql requests
"""
with open(path_sql_script, 'r') as dml_file:
dml = dml_file.read().strip().split(';')[:-1]
return dml
|
719ae91664c9f61fc1e0af44edb150e91e0f99e7
| 23,505
|
import zipfile
import os
def extractZipfile(file, dir=".", verbose=False):
"""Extract zipfile contents to directory. Return list of names in the zip archive upon success."""
z = zipfile.ZipFile(file)
namelist = z.namelist()
dirlist = [x for x in namelist if x.endswith('/')]
filelist = [x for x in namelist if not x.endswith('/')]
pushd = os.getcwd()
if not os.path.isdir(dir):
os.mkdir(dir)
os.chdir(dir)
# create directory structure
dirlist.sort()
for dirs in dirlist:
dirs = dirs.split('/')
prefix = ''
for dir in dirs:
dirname = os.path.join(prefix, dir)
if dir and not os.path.isdir(dirname):
os.mkdir(dirname)
prefix = dirname
# extract files
for fn in filelist:
try:
fnDir = os.path.dirname(fn)
if fnDir not in ['', '.'] and not os.path.isdir(fnDir):
os.makedirs(fnDir)
z.extract(fn)
finally:
if verbose:
print(fn)
os.chdir(pushd)
return namelist
|
9ecfaa0dc4e33679150c6c7b1e2c4958ea2c9a03
| 23,506
|
def parse_service_url(url):
"""Given a URL, extract the 'group' and 'specific_path'
(See the get_service function.)"""
parts = url.split('/phylotastic_ws/')
return (parts[0].split(':')[1],
parts[1])
|
cb8578418deabebc1cfbca1a363bf774acb1d745
| 23,507
|
import webbrowser
def _open_image_page(profile_id):
"""
Input : Profile Id
Output : Opens a new tab with graph search results
"""
try:
new_url = "https://www.facebook.com/search/" + profile_id + "/photos-of"
webbrowser.open_new_tab(new_url)
return 1
except Exception as e:
print(e)
return -1
|
e192036ce6bfd5033d3182100ca0e5fa37d80c55
| 23,509
|
def get_symbols_with_positions(symbol_vector):
"""
Find all symbols in a casadi symbol vector and return a list with tuples (name, i, j) where the symbol is located at symbol_vector[i:j].
"""
symbols = []
for i in range(symbol_vector.size()[0]):
for j in range(i, symbol_vector.size()[0] + 1):
try:
name = symbol_vector[i:j].name()
symbols.append((name, i, j))
except RuntimeError:
pass
return symbols
|
c7afa5836d62fa2c250ccf61df77f1e0abd81449
| 23,511
|
def stencil(request):
"""Run a test for all stencils."""
return request.param
|
d4d600c1266af00b9f1cb1ea139ad13c7b029b06
| 23,512
|
def frequency_model_validation_to_text(model_params):
"""Generate readable text from validation of frequency model.
Validates the model type on the input data and parameters and calculate the
Mean Absolute Percent Error (MAPE).
Args:
model_params: Model parameters from the validation.
Returns:
Text string with the result of the validation.
"""
output_text = f"""Frequency Model: {model_params['frequency_model']}
Customers modeled for validation: {model_params['num_customers_cohort']} \
({model_params['perc_customers_cohort']}% of total customers)
Transactions observed for validation: {model_params['num_transactions_validation']} \
({model_params['perc_transactions_validation']} % of total transactions)
Mean Absolute Percent Error (MAPE): {str(model_params['mape'])}%"""
return output_text
|
e3b251e4d077ebc296682a089c6b9a5e9cdffd07
| 23,513
|
def semester_str(semester_abs: int) -> str:
""" Строковое представление семестра """
year, half = divmod(semester_abs, 2)
sem = 'осень' if half else 'весна'
return f'{year}, {sem}'
|
18cda79b2caec4d563d04d9594ca704c10edc061
| 23,515
|
def vectf2(function):
"""
:param function:
:return:
>>> from m2py.misc import vectorize as m >>> from math import *
>>> def f(x, y, z):
... return sqrt(x**2+y**2+z**2)
...
>>>
>>> fv = m.vectf2(f)
>>>
>>> fv([[1,23,5], [23, 49,5], [12,4,6]])
[23.558437978779494, 54.35991169970753, 14.0]
"""
vectorized = lambda rows: [function(*x) for x in rows]
vectorized.__doc__ = function.__doc__
return vectorized
|
6d215471b5125b71005e0222fdd2d4ba67d65120
| 23,517
|
def get_arsc_info(arscobj):
"""
Return a string containing all resources packages ordered by packagename, locale and type.
:param arscobj: :class:`~ARSCParser`
:return: a string
"""
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(
package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff
|
f0dee00dfc983c3f88543f708faa53c755575fb5
| 23,519
|
import random
def random_simple_split(data, split_proportion=0.8):
"""Splits incoming data into two sets, randomly and with no overlapping. Returns the two resulting data objects along
with two arrays containing the original indices of each element.
Args:
data: the data to be split
split_proportion: proportion of the data to be assigned to the fist split subset. As this function returns
two subsets, this parameter must be strictly between 0.0 and 1.0 (Default value = 0.8)
Returns:
the two resulting datasets and the original index lists
"""
assert 0.0 < split_proportion < 1.0
indices = list(range(len(data))) # all indices in data
random.shuffle(indices)
split_index = int(len(data) * split_proportion)
return data[indices[:split_index]], data[indices[split_index:]], indices[:split_index], indices[split_index:]
|
547900ccda505416b10a07a94836649a4c84172a
| 23,521
|
from datetime import datetime
from pathlib import Path
def create_run_path(checkpoints_path):
"""create the run path to save the checkpoints of the model
Arguments:
checkpoints_path {str} -- the path to save the checkpoints
Returns:
Path -- the path to save the checkpoints
"""
run_folder = 'run_' + datetime.now().strftime('%Y%m%d_%H:%M.%S')
run_path = Path(checkpoints_path) / run_folder
return run_path
|
5930ff28ade4edb2c9b02b483b1e41934a519aeb
| 23,523
|
def TaillePx(xImg):
"""
Paramètre d’entrée :
xImg : Int
Longueur sur l'axe x de l'image (en pixels)
Paramètre de sortie :
ModulePx : Int
Longueur d'un coté d'un module du QR Code (en pixels)
"""
iMpx = 1
iTotalpx = iMpx*21+iMpx*8
while iTotalpx<xImg:
iMpx+=1
iTotalpx = iMpx*21+iMpx*8
ModulePx = iMpx
return(ModulePx)
|
7012f00e73282bbeca2a8683a77cdb5db48201d4
| 23,524
|
def ensure_iterability(var, len_=None):
""" This function ensures iterability of a variable (and optional length). """
if hasattr(var, "__iter__") and not isinstance(var, str):
if isinstance(len_, int) and len(var) != len_:
raise ValueError("Length of variable differs from %i." % len_)
else:
len_ = len_ or 1
var = [var]*len_
return var
|
c9031c42ffb0b3c175407631689de067087aafd2
| 23,525
|
import requests
def get_html(url: str) -> str:
"""Получает URL и возвращает тело HTML-документа"""
return requests.get(url=url, headers={'User-Agent': 'Custom'}).text
|
f95a9faf27d9a7c971b3bda0eeb8b23aca9b4ae4
| 23,527
|
def elo_expected(d :float ,f :float =400 )->float:
""" Expected points scored in a match by White player
:param d: Difference in rating (Black minus White)
:param f: "F"-Factor
:return:
"""
if d/ f > 8:
return 0.0
elif d / f < -8:
return 1.0
else:
return 1. / (1 + 10 ** (d / f))
|
739b01e6c641e1d0bd163cc104deb50dcf76187a
| 23,528
|
def volt2lux(volt):
"""
Convert the voltage read by the ADC to the resistance across the LDR
and convert the resistance to an approximate value of light
intensity.
https://emant.com/316002
"""
res = (3.3 / volt) - 1
return 50/res
|
d612948ae9d8e6f21262d4e66a9b11a1d5b0b2fa
| 23,529
|
import importlib
import inspect
def loadInputProcessors(X):
"""
Load Input Processors for 1 LPU.
Parameters
----------
X: List of dictionaries
Each dictionary contains the following key/value pairs:
'module': str,
specifying the module that the InputProcessor class
can be imported
'class': str,
name of the InputProcessor class.
and other keys should correspond to the arguments of the InputProcessor
"""
inList = []
record = []
for a in X:
d = importlib.import_module(a.pop('module'))
processor = getattr(d, a.pop('class'))
sig = inspect.signature(processor)
arg_dict = {param_name: a.get(param_name) if param.default is param.empty\
else a.get(param_name, param.default) \
for param_name, param in sig.parameters.items()}
input_processor = processor(**arg_dict)
inList.append(input_processor)
record.append(input_processor.record_settings)
# for a in X:
# if a['name'] == 'InIGaussianNoise':
# inList.append(InIGaussianNoise(a['node_id'], a['mean'], a['std'], a['t_start'], a['t_end']))
# elif a['name'] == 'InISinusoidal':
# inList.append(InISinusoidal(a['node_id'], a['amplitude'], a['frequency'], a['t_start'], a['t_end'], a['mean'], a['shift'], a['frequency_sweep'], a['frequency_sweep_frequency'], a['threshold_active'], a['threshold_value']))
# elif a['name'] == 'InIBoxcar':
# inList.append(InIBoxcar(a['node_id'], a['I_val'], a['t_start'], a['t_end']))
# elif a['name'] == 'StepInputProcessor':
# inList.append(StepInputProcessor(a['variable'], a['uids'], a['val'], a['start'], a['stop']))
# elif a['name'] == 'BU_InputProcessor':
# inList.append(BU_InputProcessor(a['shape'], a['dt'], a['dur'], a['id'], a['video_config'],
# a['rf_config'], a['neurons']))
# elif a['name'] == 'PB_InputProcessor':
# inList.append(PB_InputProcessor(a['shape'], a['dt'], a['dur'], a['id'], a['video_config'],
# a['rf_config'], a['neurons']))
return inList, record
|
51c1375e0afe90582c25a55fbac86c9c93581606
| 23,530
|
from typing import Any
from typing import Iterable
def to_iterable(val: Any) -> Iterable:
"""Get something we can iterate over from an unknown type
>>> i = to_iterable([1, 2, 3])
>>> next(iter(i))
1
>>> i = to_iterable(1)
>>> next(iter(i))
1
>>> i = to_iterable(None)
>>> next(iter(i)) is None
True
>>> i = to_iterable('foobar')
>>> next(iter(i))
'foobar'
>>> i = to_iterable((1, 2, 3))
>>> next(iter(i))
1
"""
if isinstance(val, Iterable) and not isinstance(val, (str, bytes)):
return val
return (val,)
|
6c24de85d822a5511adb26149ec863197164c61b
| 23,531
|
import random
def random_or_none(s):
"""Return a random element of sequence S, or return None if S is empty."""
if s:
return random.choice(s)
|
05ed40d21d754422c3a9ca45d52867f6947d23a4
| 23,532
|
def localizePath(path):
"""
str localizePath(path)
returns the localized version path of a file if it exists, else the global.
"""
#locale='de_DE'
#locPath=os.path.join(os.path.dirname(path), locale, os.path.basename(path))
#if os.path.exists(locPath):
# return locPath
return path
|
9ba452005d1fd7692811501e8d0f769cbeac03ab
| 23,533
|
from typing import Union
from typing import List
def comma_separated_string_to_list(line: str) -> Union[List[str], str]:
"""
Converts a comma-separated string to a List of strings.
If the input is a single item (no comma), it will be returned unchanged.
"""
values = line.split(",")
return values[0] if len(values) <= 1 else values
|
998a4a93d8a6cdcd31ec6ff53cbd171224aba782
| 23,534
|
def checksum(string): # taken from https://en.wikipedia.org/wiki/NMEA_0183
"""
c = 0
for i in string:
if isinstance(i, str):
c ^= ord(i)
else:
c ^= i
return c
"""
crc = 0xffff
for i in range(len(string)):
if isinstance(string, str):
crc ^= ord(string[i]) << 8
else:
crc ^= string[i] << 8
for ii in range(0, 8):
if (crc & 0x8000) > 0:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
return crc & 0xffff
|
d9fd4da996f8a801ea9be34b20bc8e74da63686a
| 23,535
|
def bezier(p1x, p1y, p2x=None, p2y=None):
"""
Creates an interpolating function for a bezier curve defined by one or two control points. The curve starts at
*(0, 0)* and ends at *(1, 1)*.
- **Quadratic bezier curve:** One control point
- **Cubic bezier curve:** Two control points
**Example:**::
bezier(0.5, 1) # Quadratic bezier
bezier(0.25, 1, 0.75, 0) # Cubic bezier
:param p1x:
x-component of the first control point.
:param p1y:
y-component of the first control point.
:param p2x:
x-component of the second control point, if present.
:param p2y:
y-component of the second control point, if present.
:return:
An interpolating function.
"""
if p2x == None or p2y == None:
p1x *= 0.6666666666666666
p1y *= 0.6666666666666666
p2x, p2y = p1x + 0.3333333333333333, p1y + 0.3333333333333333
e = 1e-6
cx = 3 * p1x
bx = 3 * (p2x - p1x) - cx
ax = 1 - cx - bx
cy = 3 * p1y
by = 3 * (p2y - p1y) - cy
ay = 1 - cy - by
def X(t):
return ((ax * t + bx) * t + cx) * t
def X_inv(x):
t2 = x
for i in range(8):
x2 = X(t2) - x
if abs(x2) < e:
return t2
d2 = (3 * ax * t2 + 2 * bx) * t2 + cx
if abs(d2) < e:
break
t2 -= x2 / d2
t0, t1, t2 = 0, 1, x
if t2 < t0:
return t0
if t2 > t1:
return t1
while t0 < t1:
x2 = X(t2)
if abs(x2 - x) < e:
return t2
if x > x2:
t0 = t2
else:
t1 = t2
t2 = (t1 - t0) * .5 + t0
return t2
def f(t):
x = X_inv(t)
return ((ay * x + by) * x + cy) * x
return f
|
94f33a6683aa5539d2fedbd4d4f951dfc660e870
| 23,536
|
def arr_to_dict(arr):
"""
takes in an numpy array or list and returns a dictionary with indices, values
"""
d = {}
for i in range(len(arr)):
for j in range(len(arr[0])):
d[(i,j)] = arr[i][j]
return d
|
bf9382eaf9ca20b4dfff80e4a18afa055d78ec00
| 23,537
|
def compute_intersection_over_union(gt_bbox, pred_bbox):
""" Compute the intersection over union for a ground truth bounding box and a prediction
bounding box
Params:
gt_bbox (RectangleProto): single ground truth bbox
pred_bbox (RectangleProto): single prediction bbox
Returns:
iou_width (double): intersection over union for width dimension
iou_height (double): intersection over union for height dimension
iou_area (double): intersection over union area
"""
intersection_width = min(gt_bbox.max.y, pred_bbox.max.y) - max(gt_bbox.min.y, pred_bbox.min.y)
intersection_height = min(gt_bbox.max.x, pred_bbox.max.x) - max(gt_bbox.min.x, pred_bbox.min.x)
intersection_area = intersection_width * intersection_height
gt_bbox_width = gt_bbox.max.y - gt_bbox.min.y
gt_bbox_height = gt_bbox.max.x - gt_bbox.min.x
gt_bbox_area = gt_bbox_width * gt_bbox_height
pred_bbox_width = pred_bbox.max.y - pred_bbox.min.y
pred_bbox_height = pred_bbox.max.x - pred_bbox.min.x
pred_bbox_area = pred_bbox_width * pred_bbox_height
union_width = gt_bbox_width + pred_bbox_width - intersection_width
union_height = gt_bbox_height + pred_bbox_height - intersection_height
union_area = gt_bbox_area + pred_bbox_area - intersection_area
iou_width = intersection_width / union_width
iou_height = intersection_height / union_height
iou_area = intersection_area / union_area
return iou_width, iou_height, iou_area
|
b556b9f2d36118c05de14949a54b8531dbfe1baa
| 23,538
|
def _type(value):
""" _type
Depending on the type, might send back a dict
rather than the standard tuple. This is to account for situations like:
{
"type": "integer",
"format": "int32",
}
rather than:
("type", "string")
"""
val_str = str(value)
if str(value).endswith("[]"):
# an array
res = {"type": "array", "zero-indexes": value.zero_indexes}
items = {}
if value.dimensions:
items["dimensions"] = value.dimensions
res["items"] = _type(value.item_type)
elif val_str == "INTEGER":
res = {
"type": "integer",
"format": "int32",
}
elif val_str == "SMALLINT":
res = {
"type": "integer",
"format": "int8",
}
elif val_str == "BIGINT":
res = {
"type": "integer",
"format": "int64",
}
elif val_str == "DATETIME":
res = {"type": "date-time"}
elif val_str.startswith("VARCHAR"):
# no support for minimum length right now
pos1 = val_str.find("(")
if val_str == "VARCHAR":
res = {"type": "string"}
elif pos1 > -1:
res = {"type": "string", "maxLength": int(val_str[pos1 + 1 : -1])}
else:
res = {"type": val_str}
else:
res = {"type": val_str.lower()}
if hasattr(value, "choices"):
res.update(dict(choices=value.choices))
return res
|
d4f4a7e2cf61e2764d42f3daa06461fbe3124853
| 23,539
|
def spandex_dataset_ids(input_file):
"""
Read all dataset IDs currently in the Spandex index.
Args:
input_file (str): The path to a flat file with a list of dataset IDs currently in spandex
Returns:
A set of dataset IDs
"""
return {x.strip() for x in open(input_file, "r") if x.strip()}
|
b1ae62221a7534264ac39881b8365749f4b8d625
| 23,540
|
def get_next_chapter(current_page):
"""returns the previous chapter page, if available, otherwise none."""
next_sibling = current_page.parent.get_next_sibling()
if not next_sibling:
return None
children = next_sibling.get_children()
if children:
return children[0]
return None
|
46e54cceb5884be73b59b3738687758336b8e025
| 23,541
|
def compterLignes(grille, symbole):
"""Compte le nombre de fois où le symbole est sur
une ligne qui peut gagner"""
maximum = 0
for ligne in grille:
count = 0
for case in ligne :
if case == symbole :
count += 1
elif case == -symbole :
count = 0
break
maximum = max(count, maximum)
return maximum
|
16cffcde3413186e4cf4499bbe0ba9fcb2d90f04
| 23,542
|
from typing import Optional
import subprocess
def get_game_pgn(id_or_username: str, search_type: str) -> tuple[Optional[str], Optional[str]]:
"""Runs cgf to get a PGN for a chess game"""
if search_type == "id":
proc = subprocess.run(["cgf", id_or_username, "--pgn"], capture_output=True)
elif search_type == "player":
proc = subprocess.run(["cgf", id_or_username, "--player", "--pgn"], capture_output=True)
else:
raise ValueError('search_type must be either "id" or "player"')
error = proc.stderr.decode("utf-8")
if error != "":
return None, error
return proc.stdout.decode("utf-8"), None
|
ea7983e404da06b71706f89791503ef6db515442
| 23,543
|
import os
def check_exe(name, env=None):
"""
Ensures that a program exists
:type name: string
:param name: path to the program
:param env: configuration object
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:return: path of the program or None
:raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added.
"""
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
env = env or os.environ
for path in env['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
|
6525069ea30255a033b92ca698ac4f408dab22a6
| 23,544
|
def url_join(path: str, *path_list):
"""
连接多个的路径
:param path: 原始path路径
:param path_list: 需要连接的路径列表
:return:
"""
if path.endswith("/"):
path = path.rstrip("/")
for arg in path_list:
if arg.startswith("/"):
arg = arg.lstrip("/")
path = f"{path}/{arg}"
return path
|
309a707c57a641b24899a605fddfd854fe3aa712
| 23,545
|
def merge_with_cache(cached_datapoints, start, step, values, func=None, raw_step=None):
"""Merge values with datapoints from a buffer/cache."""
consolidated = []
# Similar to the function in render/datalib:TimeSeries
def consolidate(func, values):
usable = [v for v in values if v is not None]
if not usable:
return None
if func == 'sum':
return sum(usable)
if func == 'average':
return float(sum(usable)) / len(usable)
if func == 'max':
return max(usable)
if func == 'min':
return min(usable)
if func == 'last':
return usable[-1]
raise Exception("Invalid consolidation function: '%s'" % func)
# if we have a raw_step, start by taking only the last data point for each interval to match what whisper will do
if raw_step is not None and raw_step > 1:
consolidated_dict = {}
for (timestamp, value) in cached_datapoints:
interval = timestamp - (timestamp % raw_step)
consolidated_dict[interval] = value
cached_datapoints = list(consolidated_dict.items())
# if we have a consolidation function and the step is not the default interval, consolidate to the requested step
if func and step != raw_step:
consolidated_dict = {}
for (timestamp, value) in cached_datapoints:
interval = timestamp - (timestamp % step)
if interval in consolidated_dict:
consolidated_dict[interval].append(value)
else:
consolidated_dict[interval] = [value]
consolidated = [(i, consolidate(func, consolidated_dict[i])) for i in consolidated_dict]
# otherwise just use the points
else:
consolidated = cached_datapoints
for (interval, value) in consolidated:
try:
i = int(interval - start) // step
if i < 0:
# cached data point is earlier then the requested data point.
# meaning we can definitely ignore the cache result.
# note that we cannot rely on the 'except'
# in this case since 'values[-n]='
# is equivalent to 'values[len(values) - n]='
continue
values[i] = value
except BaseException:
pass
return values
|
d2cb255dd277c8b4e904a1a68a4d39466aec8c31
| 23,547
|
import os
def get_profile(profile_name=None):
"""Determine and return the AWS profile. Check in order:
the value of 'profile_name',
the user's shell environment,
the 'default'.
"""
if profile_name:
aws_profile = profile_name
elif os.environ.get('AWS_PROFILE'):
aws_profile = os.environ.get('AWS_PROFILE')
else:
aws_profile = 'default'
return aws_profile
|
24228453768d453d9cca02b32685d252e702e545
| 23,548
|
def sortAndReturnQuantiles(values):
"""Returns minimum, 0.25-quantile, median, 0.75-quantile, maximum"""
values.sort()
N = len(values)
return (values[0], values[N/4], values[N/2], values[(3*N)/4], values[N-1])
|
067aec1fc88cfcf33f9bab4201b81dfede82f61d
| 23,549
|
def generate_diff(old_list, new_list):
"""Returns 2 lists of added, deleted and unchanged elements.
Added elements are elements presents in new_list but not in old_list.
Deleted elements are elements presents in old_list but not in new_list.
Args:
old_list (list): Old list.
new_list (list): New list.
Returns:
tuple: Contains 2 elements:
- A list of added elements;
- A list of deleted elements.
- A list of unchanged elements.
"""
old_set = set(old_list)
new_set = set(new_list)
added = new_set - old_set
deleted = old_set - new_set
unchanged = old_set.intersection(new_set)
return list(added), list(deleted), list(unchanged)
|
f915c0ca33b6f9fa53450dc3d40b042271ca3fc2
| 23,551
|
def use_cache():
"""
If used to decorate a function and if fn_cache is set, it will store the
output of the function if the output is not None. If a function output
is None, the execution result will not be cached.
:return:
"""
def wrap(func):
def wrap_f(*args, **kwargs):
fn_cache = kwargs.pop('fn_cache')
if fn_cache is None:
results = func(*args, **kwargs)
else:
cached_result = fn_cache.get(
(func.__name__, tuple(args[0]), frozenset(kwargs.items())))
if cached_result is not None:
return cached_result
else:
results = func(*args, **kwargs)
if results is not None:
fn_cache.put(
(func.__name__, tuple(args[0]),
frozenset(kwargs.items())),
results)
return results
return wrap_f
return wrap
|
27c9ed8d5e48bcec47fc17cca8766dedf4333153
| 23,554
|
def test_new_completion_unquoted_random_override(input_obj, random_unquoted_words):
"""
Complete completely random words and ensure that the input is
changed adequately.
"""
words = random_unquoted_words
# try the completion on the middle element without affecting the others
input_obj.text = '/example %s %s %s' % (words[0], words[1], words[2])
base = len(input_obj.text) - len(words[2]) - 1
input_obj.pos = base
def f(n):
return '/example %s %s' % (words[0], words[n])
for i in range(len(words)):
pos = input_obj.get_argument_position(False)
input_obj.new_completion(words[:], pos, quotify=False, override=True)
assert f(i) + " " + words[2] == input_obj.text
assert len(f(i)) == input_obj.pos
assert input_obj.text == '/example %s %s %s' % (words[0], words[-1], words[2])
pos = input_obj.get_argument_position(False)
input_obj.new_completion(words[:], pos, quotify=False, override=True)
assert input_obj.text == '/example %s %s %s' % (words[0], words[0], words[2])
input_obj.reset_completion()
# try the completion on the final element without affecting the others
input_obj.text = '/example %s %s %s' % (words[0], words[1], words[2])
base = len(input_obj.text)
input_obj.pos = base
def f2(n):
return '/example %s %s %s' % (words[0], words[1], words[n])
print(words)
for i in range(len(words)):
pos = input_obj.get_argument_position(False)
input_obj.new_completion(words[:], pos, quotify=False, override=True)
assert f2(i) == input_obj.text
assert len(f2(i)) == input_obj.pos
assert input_obj.text == '/example %s %s %s' % (words[0], words[1], words[-1])
|
69f7ad62f9f934d41ca3f899c5275ef3c7f340ff
| 23,556
|
import os
import re
def getHostDir():
"""
Get the directory of the current host.
Format: /home/<HOST>/
-> which 'HOST' is either 'timmya` or 'timmy-beta'
NOTE: THIS ONLY WORKS ON THE VPS.
"""
runPath = os.path.realpath(__file__)
runDir = re.search("/home/[^/]*", runPath)
print(runPath)
if runDir is not None:
runDir = runDir.group(0)
else:
runDir = None
print(runDir)
return runDir
|
8f02c399031467c3450713138b17332f79bd0585
| 23,558
|
import os
def csv_command(query: str,
outfile: str,
config: dict,
header: bool = False,
account: str='user',
server: str='vertica'):
"""Generate command line command for passing `query` into vsql
and directing the output to `outfile`."""
# print(os.environ.get("pw"))
params = {}
if server in config:
config = config[server]
params['host'] = config.get(
"host", os.environ.get("_".join([server, "host"])))
params['database'] = config.get(
"database", os.environ.get("_".join([server, "database"])))
if account in config:
config = config[account]
params['user'] = config.get("username", os.environ.get(
"_".join([server, account, "username"])))
params['password'] = config.get("password", os.environ.get(
"_".join([server, account, "password"])))
params['query'] = query
params['outfile'] = outfile
if header:
params['header'] = ''
else:
params['header'] = 't'
return """/usr/local/bin/vsql
-h {host}
-d {database}
-U {user}
-w '{password}'
-F $'|'
-A{header}
-c "{query}" |
gzip -c > data/raw/{outfile}""".format(params)
|
4d1c18f7dfde4ec7ef91d9f9b14c924591653741
| 23,560
|
def gcd(a, b):
"""Euclid's greatest common denominator algorithm."""
if abs(a) < abs(b):
return gcd(b, a)
while abs(b) > 0:
q, r = divmod(a, b)
a, b = b, r
return a
|
f0b023ad587c896d99ff196f9bb5a7491b529106
| 23,561
|
def autocomplete_if_release_report(actions, objects, field_name='user'):
"""
Returns value of the first item in the list objects of the field_name
if release_report is actions.
Args:
actions: Transition action list
objects: Django models objects
field_name: String of name
Returns:
String value from object
"""
try:
obj = objects[0]
except IndexError:
return None
value = getattr(obj, field_name, None)
if value:
for action in actions:
if action.__name__ == 'release_report':
return str(value.pk)
return None
|
b7966f72c174a7aabb943d7a714744b2d076a29a
| 23,562
|
import asyncio
def sync_version(async_coroutine):
""" Decorates asyncio coroutine in order to make it synchronous.
Args:
async_coroutine: asyncio coroutine to wrap.
Returns:
Synchronous version of the method.
"""
def sync(*args, **kwargs):
event_loop = asyncio.get_event_loop()
result = event_loop.run_until_complete(async_coroutine(*args, **kwargs))
return result
return sync
|
b8a6e034186faa080c02c21c2433678634b1f155
| 23,563
|
def c3_merge(bases):
""" Merge together the list of base classes into the mro that will be
created using the C3 linearisation algorithm
"""
# Protect against empty base class lists (although this should never happens
# because everyone derives from object *right*?)
if not bases:
return []
mro = []
# The input to c3 is the linearisation of each base class and the list of
# bases itself
to_merge = [b.mro() for b in bases] + [bases]
# Non-empty lists evaluate to True, so the while loop here goes until all
# lists are exhausted, which is the endpoint of the c3 algorithm
while to_merge:
# First, we have to find the first 'good' head.
# A good head is the head of a list that does not appear in the tails of
# any of the other lists
try:
head = next(l[0] for l in to_merge if not any(l[0] in l2[1:] for l2 in to_merge) )
except StopIteration:
raise TypeError(
"Failed to calculate MRO - cannot order classes {0}".format(
", ".join([l[0].__name__ for l in to_merge])))
# append it to the mro and remove it from the heads of any list
mro.append(head)
to_merge = [l for l in (l2[1:] if l2[0] == head else l2 for l2 in to_merge) if l]
return mro
|
01b4ef9f0477d2a663328e512eda6ce60a73b299
| 23,565
|
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
|
cf6f2300442afe961e96a5f10943393cf071eb5b
| 23,566
|
import os
def read_configuration(key, path=None, default=None, single_config=False, fallback_to_env=True):
"""
Read configuration from a file, Docker config or secret or from the environment variables.
:param key: the configuration key
:param path: the path of the configuration file (regular file or Docker config or secret)
:param default: the default value when not found elsewhere (default: `None`)
:param single_config: treat the configuration file as containing the full configuration,
otherwise the file is expected to be a '=' separated key-value list line by line
(default: `False`)
:param fallback_to_env: look for the configuration key in the environment variables
if not found elsewhere (default: `True`)
"""
if path and os.path.exists(path):
with open(path, 'r') as config_file:
if single_config:
return config_file.read()
for line in config_file:
if line.startswith('%s=' % key):
return line.split('=', 1)[1].strip()
if fallback_to_env and key in os.environ:
return os.environ[key]
return default
|
cdafd406add20d2883299916c969b5682414d483
| 23,567
|
def get_lat_array(bw, bw_array, lat_array):
"""
Returns the latency for measured bandwidth using bandwidth-latency dependency.
@params:
bw - Required : measured bandwidth (Float)
bw_array - Required : array of measured bandwidths for bw-lat dependency (List of floats)
lat_array - Required : array of measured latencies for bw-lat dependency (List of floats)
"""
if bw > bw_array[len(bw_array)-1]:
return lat_array[len(bw_array)-1]
i = 0
while bw > bw_array[i]:
i+=1
if i == len(bw_array):
return 0
if i == 0:
return lat_array[0]
else:
bw_percent = ( bw - bw_array[i-1] )/( bw_array[i] - bw_array[i-1] )
latency = lat_array[i-1] + bw_percent*(lat_array[i] - lat_array[i-1])
return latency
|
d92c63f8f3a1cb96220a4c13b377244522c32d6a
| 23,568
|
import csv
def getDataRows(file_name):
"""
读取CSV文件返回字典列表
"""
rows = []
with open(file_name) as fo:
reader = csv.DictReader(fo)
for i in reader:
rows.append(i)
return rows
|
48c88dbecb40f28ab7f6c95c9e9f795dabd1a7f5
| 23,569
|
def get_quoted_name_for_wlst(name):
"""
Return a wlst required string for a name value in format ('<name>')
:param name: to represent in the formatted string
:return: formatted string
"""
result = name
if name is not None and '/' in name:
result = '(' + name + ')'
return result
|
1ead7ae2a0b5c1d3dc3d5fc1e8e07a51697597b4
| 23,570
|
import torch
def common_args_to_opts(common_args):
"""Parse common_args into a dict of Python objects."""
opts = dict(
# device=torch.device(common_args.device),
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
)
return opts
|
40d1e1b9f71670e31c5d4e9cb58e0ef56091b34c
| 23,571
|
def is_excluded_File(File, config, platform):
""" param File is a <File> node """
# <File RelativePath="...">
# <FileConfiguration
# Name="MTI_static_release_vi16|Win32"
# ExcludedFromBuild="true"
# />
for FileConf in File.findall('./FileConfiguration'):
cond = FileConf.attrib['Name'] # required attrib
if not 'ExcludedFromBuild' in FileConf.attrib:
continue # not excluded
excl = FileConf.attrib['ExcludedFromBuild']
if excl != 'true':
continue # not excluded
(exclude_config, exclude_platform) = cond.split('|')
if exclude_config == config and exclude_platform == platform:
return True
return False
|
89649c829d98c79d166d78473f2f7926ae0e7627
| 23,572
|
import os
def read_sql_user_name():
"""
read SQL user name of this tool
return SQL user name
"""
file_path = os.path.expanduser("~/replica.my.cnf")
with open(file_path, 'r') as f:
lines = f.readlines()
user_name_line = next(line for line in lines if line.strip().startswith('user = '))
user_name = user_name_line.split('=')[-1].strip()
return user_name
|
2ce154397e7ea381211405618d713b34e30fd540
| 23,574
|
import torch
def load_models_from_ckp(path_checkpoint, model):
"""
Function to load values in checkpoint file.
:param path_checkpoint: path to ckp file
:param model: model for which to load the weights
:return:
"""
if path_checkpoint is not None:
dict_ckp = torch.load(path_checkpoint, map_location=torch.device('cpu'))
print("Loaded model from: {}".format(path_checkpoint))
for key in dict_ckp:
print("{}".format(key))
model.load_state_dict(dict_ckp)
return True
return False
|
65ea0e1be5f2b392cdc6792fa79ff912dd51f926
| 23,575
|
def get_reverse_bits(bytes_array):
"""
Reverse all bits in arbitrary-length bytes array
"""
num_bytes = len(bytes_array)
formatstring = "{0:0%db}" % (num_bytes * 8)
bit_str = formatstring.format(int.from_bytes(bytes_array, byteorder='big'))
return int(bit_str[::-1], 2).to_bytes(num_bytes, byteorder='big')
|
c4c64624a9fab5d9c564b8f781885a47984f1eaf
| 23,577
|
def inspect_strategy(inspector, opponent):
"""Inspects the strategy of an opponent.
Simulate one round of play with an opponent, unless the opponent has
an inspection countermeasure.
Parameters
----------
inspector: Player
The player doing the inspecting
opponent: Player
The player being inspected
Returns
-------
Action
The action that would be taken by the opponent.
"""
if hasattr(opponent, "foil_strategy_inspection"):
return opponent.foil_strategy_inspection()
else:
return opponent.strategy(inspector)
|
318f5d23f74981b84abbe89cce7c3fc315156975
| 23,578
|
def get_reg_dict(kwd_df):
"""
Create a dictionary of domains. All regexes belonging to a domain are joined into one regex.
Parameters
----------
kwd_df: DataFrame
dataframe with the columns `domain` and `regex`
Returns
-------
dict
dictionary with the domain as key and a "joined domain regex" as value
"""
reg_dict = dict()
for domain in kwd_df.domain.unique():
reg_dict[domain] = '|'.join(kwd_df.query("domain == @domain").regex)
return reg_dict
|
2fb3e86e3f77329d88731f9d6743eafcc999133d
| 23,579
|
def regex_ignore_case(term_values):
"""
turn items in list "term_values" to regexes with ignore case
"""
output = []
for item in term_values:
output.append(r'(?i)'+item)
return output
|
dc9fd3cb9e54896bacb7e2a296d5583a16aaf3ec
| 23,580
|
import glob
import re
def get_calcium_stack_lenghts(folder):
"""
Function to extract calcium stack lenghts from imageJ macro files associated to the stacks.
params:
- folder: path of the folder containing the IJ macros files
return:
- list of stack lenghts
"""
record_lenghts = []
pattern_nFrame = r".*number=(\d*) .*"
for fn in glob.glob(folder+"/*.txt"):
with open(fn) as f:
line = f.readline()
record_lenghts.append(int(re.findall(pattern_nFrame, line)[0]))
return record_lenghts
|
a9985bc6427ac31e7e1e3d941c227ac302af9206
| 23,581
|
def retry_if_value_error(exception):
"""
Helper to let retry know whether to re-run
:param exception: Type of exception received
:return: <bool> True if test failed with ValueError
"""
return isinstance(exception, ValueError)
|
a4f184da177fa26dee55f8f7ed76e3c7b8dbfc87
| 23,582
|
from typing import Tuple
def get_happiest_and_saddest(emotions: list) -> Tuple[int, int]:
"""
Get happiest and saddest index
:param emotions: list of lists containing emotions likelihood
:return: happiest_tweet_index, saddest_tweet_index
"""
happiest_item = max(emotions, key=lambda e: e[2]) # Based on joy
saddest_item = max(emotions, key=lambda e: e[4]) # Based on sadness
return emotions.index(happiest_item), emotions.index(saddest_item)
|
c31b9d4d8908f49a03c909f6ba1716c430bbd30f
| 23,583
|
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
|
309eb8e0ac756a4449f7f3cb2ca8ce9372a7f547
| 23,586
|
import os
def parseName(fpath):
"""Extract the file name from the file path.
Args:
fpath (str): File path.
Returns:
str: File name.
"""
return os.path.basename(fpath)
|
e060b309ecdf8d1eea7358f864ce37419c164097
| 23,590
|
def _find_slice_interval(f, r, x, u, D, w=1.):
"""Given a point u between 0 and f(x), returns an approximated interval
under f(x) at height u.
"""
a = x - r*w
b = x + (1-r)*w
if a < D[0]:
a = D[0]
else:
while f(a) > u:
a -= w
if a < D[0]:
a = D[0]
break
if b > D[1]:
b = D[1]
else:
while f(b) > u:
b += w
if b > D[1]:
b = D[1]
break
return a, b
|
aeaede0846e0569512b6ffc19c09c6aa88b42ac0
| 23,592
|
def zip_with_index(rdd):
"""
Alternate version of Spark's zipWithIndex that eagerly returns count.
"""
starts = [0]
if rdd.getNumPartitions() > 1:
nums = rdd.mapPartitions(lambda it: [sum(1 for _ in it)]).collect()
count = sum(nums)
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
else:
count = rdd.count()
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return count, rdd.mapPartitionsWithIndex(func)
|
14666f7436b16a0c292375e18d635a5f57dccf54
| 23,593
|
def result_contains_node_bindings(result, bindings: dict[str, list[str]]):
"""Check that the result object has all bindings provided (qg_id->kg_id).
KPs that are returning a (proper) subclass of an entity allowed by the qnode
may use the optional `qnode_id` field to indicate the associated superclass.
"""
for qg_id, kg_ids in bindings.items():
if not any(nb.id in kg_ids for nb in result.node_bindings[qg_id]):
return False
return True
|
617f33a769a0b7c4048e31743c4c3ca1b2c815de
| 23,594
|
def has_gendered_nouns(doc):
"""
Doc-level spaCy attribute getter, which returns True if
any Token with a NOUN pos_ tag is of "m" or "f" gender.
"""
noun_genders = [token._.gender for token in doc if token.pos_ in ["NOUN", "PROPN"]]
has_gendered_noun = any([g in ["m", "f"] for g in noun_genders])
return has_gendered_noun
|
dbf28f9d43ce5bcd5cb1b391f14f35076d193975
| 23,595
|
import string
def password_is_valid(password: str) -> bool:
"""
@param password
@return: password is valid
"""
if len(password) < 8:
return False
if not any([c in password for c in "!@#$%^&*"]):
return False
if not any([c in password for c in string.ascii_lowercase]):
return False
if not any([c in password for c in string.ascii_uppercase]):
return False
if not any([c in password for c in string.digits]):
return False
return True
|
24f1e98ba6ed58ba69a1d28162a13ecbd31bf3b4
| 23,596
|
import sys
import argparse
import os
def parse_args(filepath=__file__, source=sys.argv, custom_commands=[]):
"""
This function will parse command line arguments. To display help and exit
if the argument is invalid. Will return command, hostname, port and config.
"""
global original_args
original_args = sys.argv[:]
defaults = {
'myself': source.pop(0),
'hostname': 'localhost',
'port': 9000,
'conf': 'app/config/settings.py'
}
if len(source) == 0:
source.append(defaults['myself'])
parser = argparse.ArgumentParser(
description='Example: {myself} -h {hostname}' +
'-p {port} -c config/devel.py start'.format(
**defaults), conflict_handler='resolve')
parser.add_argument('command', choices=[
'start', 'stop', 'restart', 'status', 'runserver'] + custom_commands)
parser.add_argument('-h', '--hostname', default=defaults['hostname'])
parser.add_argument('-p', '--port', type=int, default=defaults['port'])
parser.add_argument(
'-c', '--config', required=False, default=defaults['conf'], type=argparse.FileType(),
help='config must match the location of a module containing' +
'decanter required configuration items, i.e. app/config/settings.py')
args = parser.parse_args(source)
# 'type=argparse.FileType()' will confirm the existence of a file.
# but it open file.
args.config.close()
args.config = os.path.relpath(os.path.realpath(args.config.name),
os.path.dirname(os.path.realpath(filepath)))
return args
|
3db9277c71eeda08a25092e1d5367bd78b67032b
| 23,597
|
def map_to_int_range(values, target_min=1, target_max=10):
"""Maps a list into the integer range from target_min to target_max
Pass a list of floats, returns the list as ints
The 2 lists are zippable"""
integer_values = []
values_ordered = sorted(values)
min_value = float(values_ordered[0])
max_value = float(values_ordered[-1])
if min_value == max_value:
#convert to integer and clamp between min and max
integer_value = int(min_value)
integer_value = max(target_min, integer_value)
integer_value = min(integer_value, target_max)
return [integer_value for v in values]
target_range = (target_max - target_min)
float_range = (max_value - min_value)
for value in values:
value = target_min + int(target_range * ((value - min_value) / float_range))
integer_values.append(value)
return integer_values
|
524f87a74de9e1879a9894accecab10a54be6651
| 23,598
|
import string
def is_not_alphanumeric_string(secret: str) -> bool:
"""
This assumes that secrets should have at least ONE letter in them.
This helps avoid clear false positives, like `*****`.
"""
return not bool(set(string.ascii_letters) & set(secret))
|
a4fcb3ca718d40b02125d444601aea0f4b2966e4
| 23,599
|
def retrieveSampleAnnotationsFromCondensedSdrf(condensedSdrfStr):
"""
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("")
>>> len(diseases)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tcell line\t5637\thttp://www.ebi.ac.uk/efo/EFO_0002096")
>>> len(diseases) + len(tissues) + len(crossRefs)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tdisease\\tbladder carcinoma\\thttp://www.ebi.ac.uk/efo/EFO_0000292")
>>> "bladder carcinoma" in diseases
True
>>> "EFO_0000292" in crossRefs
True
>>> tissues
set()
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-513\\t\\tERR030881\\tfactor\\torganism part\\tadrenal\\thttp://purl.obolibrary.org/obo/UBERON_0002369")
>>> "adrenal" in tissues
True
>>> "UBERON_0002369" in crossRefs
True
>>> diseases
set()
"""
diseases, tissues, crossRefs = (set([]), set([]), set([]))
for row in condensedSdrfStr.split("\n"):
arr = row.strip().split("\t")
if len(arr) > 4 and arr[3] == "factor":
if arr[4].lower() == "organism part":
tissues.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
elif arr[4].lower() == "disease":
diseases.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
return (diseases, tissues, crossRefs)
|
cfada3e870e0ec1b6790381aa7bb966e6ecf8f41
| 23,601
|
def get_column_names():
"""
:return: Column names of Data.csv
"""
feature_names_of_players = ["PLAYER_NAME", "MIN", "FGM", "FGA", "FG_PCT", "FG3M", "FG3A", "FG3_PCT", "FTM", "FTA",
"FT_PCT", "OREB", "DREB", "REB", "AST", "STL", "BLK", "TO", "PF", "PTS", "PLUS_MINUS"]
feature_names_of_matchups = ["gameId", "teamAbbr", "opptAbbr", "rslt", "teamMin", "teamPTS", "teamPTS1", "teamPTS2",
"teamPTS3", "teamPTS4", "opptPTS",
"opptPTS1", "opptPTS2", "opptPTS3", "opptPTS4",
"teamFGM", "teamFGA", "teamFG", "team3PM", "team3PA", "team3PCT", "teamFTM", "teamFTA",
"teamFTC", "teamORB", "teamDRB", "teamREB", "teamAST", "teamSTL"
, "teamBLK", "teamTO", "teamPF", "team2P", "teamTS", "teamEFG", "teamPPS", "teamFIC", "teamFIC40", "teamOrtg",
"teamDrtg", "teamPlay",
"opptMin", "opptFGM", "opptFGA", "opptFG", "oppt3PM", "oppt3PA", "oppt3PCT", "opptFTM",
"opptFTA", "opptFTC", "opptORB", "opptDRB", "opptREB", "opptAST", "opptSTL"
, "opptBLK", "opptTO", "opptPF", "oppt2P", "opptTS", "opptEFG", "opptPPS", "opptFIC", "opptFIC40", "opptOrtg",
"opptDrtg", "opptPlay", "poss", "pace"]
team_features = ["gameId", "teamAbbr", "opptAbbr", "rslt", "teamMin", "teamPTS", "teamPTS1", "teamPTS2", "teamPTS3",
"teamPTS4", "opptPTS",
"opptPTS1", "opptPTS2", "opptPTS3", "opptPTS4",
"teamFGM", "teamFGA", "teamFG", "team3PM", "team3PA", "team3PCT", "teamFTM", "teamFTA", "teamFTC",
"teamORB", "teamDRB", "teamREB", "teamAST", "teamSTL"
, "teamBLK", "teamTO", "teamPF", "team2P", "teamTS", "teamEFG", "teamPPS", "teamFIC", "teamFIC40", "teamOrtg",
"teamDrtg", "teamPlay"]
team_features = team_features + feature_names_of_players * 11
oppt_features = ["opptMin", "opptFGM", "opptFGA", "opptFG", "oppt3PM", "oppt3PA", "oppt3PCT", "opptFTM", "opptFTA",
"opptFTC", "opptORB", "opptDRB", "opptREB", "opptAST", "opptSTL"
, "opptBLK", "opptTO", "opptPF", "oppt2P", "opptTS", "opptEFG", "opptPPS", "opptFIC", "opptFIC40", "opptOrtg",
"opptDrtg", "opptPlay"]
oppt_features = oppt_features + feature_names_of_players * 11
last_features = ["poss", "LM_totalPoint","LM_dayOffset","pace"]
feature_names_of_matchups = team_features + oppt_features + last_features
return feature_names_of_matchups
|
03a084961b828a1c2042d2a0531719762e118550
| 23,603
|
import argparse
def checkArgs():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(
description=('comment'))
parser.add_argument('-i', '--elementsToInclude', help='The list of '
'elements names that will correspond to the '
'modules to be added to the instance.',
required=True)
parser.add_argument('-a', '--appName', help='This apps name.',
required=False)
parser.add_argument('-e', '--environment', help='The environment the.'
' app is running in.',
required=False)
parser.add_argument('-c', '--configFile', help='Config file that '
'holds variables that define paths on the local'
'system.',
required=False)
parser.add_argument('-p', '--profile',
help='This is the AWS profile that needs to be used.',
required=False)
parser.add_argument('-o', '--organization',
help='This is the AWS profile that needs to be used.',
required=False)
parser.add_argument('-r', '--region',
help='This is the AWS region that needs to be used.',
required=True)
parser.add_argument('--suffix',
help='This is the defined suffix for this instance.',
required=False)
parser.add_argument('--stack',
help='This is the name of the stack to use.',
required=False)
parser.add_argument('--stackDir',
help='This is the path to where the stack is.',
required=False)
parser.add_argument('--privateIP',
help='The private IP of this instance.',
nargs='?',
required=False)
parser.add_argument('--postgresqlVersion',
help='The postgresql version number.',
nargs='?',
required=False)
parser.add_argument('--combinedWebWorker',
help='The flag to say if this instance is a shared '
'web and worker on the same instance.',
nargs='?',
required=False)
parser.add_argument('--scratchVolume',
help='Signifies that there needs to be a scratch '
'volume created.',
nargs='?',
required=False)
parser.add_argument('-t', '--test', help='Will run the script but '
'will not actually execute the shell commands.'
'Think of this as a dry run or a run to be used'
' with a testing suite',
action="store_true",
required=False)
args, unknown = parser.parse_known_args()
retArgs = {}
if args.elementsToInclude:
retElements = [x.strip() for x in args.elementsToInclude.split(',')]
retArgs["ELEMENTS_TO_INCLUDE"] = retElements
if args.configFile:
retArgs["CONFIG_FILE"] = args.configFile
if args.organization:
retArgs["ORGANIZATION"] = args.organization
if args.profile:
retArgs["ORGANIZATION"] = args.profile
if args.region:
retArgs["REGION"] = args.region
if args.suffix:
retArgs["SUFFIX"] = args.suffix
if args.stack:
retArgs["STACK"] = args.stack
if args.stackDir:
retArgs["STACK_DIR"] = args.stackDir
if args.appName:
retArgs["APP_NAME"] = args.appName
if args.environment:
retArgs["ENV"] = args.environment
if args.postgresqlVersion:
retArgs["PGVERSION"] = args.postgresqlVersion
if args.combinedWebWorker:
retArgs["COMBINED_WEB_WORKER"] = args.combinedWebWorker
if args.scratchVolume:
retArgs["CREATE_SCRATCH_VOLUME"] = args.scratchVolume
if args.privateIP:
retArgs["PRIVATE_IP"] = args.privateIP
retTest = ""
if args.test:
retTest = args.test
return (retArgs, retTest)
|
4081966d518ae17cc33b20890232a39cf13999ee
| 23,604
|
def _strip_prefix(s, prefix):
"""A helper to strip the prefix from the string if present"""
return s[len(prefix):] if s and s.startswith(prefix) else s
|
8438b6e8c3b7e478fe93ede3433adaa9a22a739e
| 23,605
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.