content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def dummy(xy_in, params, invert=False):
"""
"""
return xy_in
|
74e6986c37bd1367dbc152e77b5cef2097bf8e14
| 698,956
|
import random
def pick_sites(grid):
"""
Randomly pick two adjacent sites on a grid.
@type grid: Grid object
@param grid: grid
"""
# Pick the first site
site1 = (random.randrange(grid.height), random.randrange(grid.width))
# Pick an adjacent site
site2 = random.choice(grid.cells[site1][0])
return (site1, site2)
|
18ed6a17aae292a23cec4a66dab98b290c0c5992
| 698,957
|
from typing import Dict
from typing import Set
from typing import Any
def unique(domains: Dict[str, Set[Any]]) -> bool:
""" Each variable has exactly one value """
return all((len(domain) == 1
for domain in domains.values()))
|
64551d938135b1a288ca97d480abe4282eddeefd
| 698,958
|
from typing import Optional
def parse_dot_notation(input: str) -> tuple[str, Optional[tuple[str, ...]]]:
""" Parse dot-notation
Example:
parse_dot_notation('a') #-> 'a', None
parse_dot_notation('a.b.c') #-> 'a', ['b', 'c']
"""
name, _, sub_path_str = input.partition('.')
sub_path = tuple(sub_path_str.split('.')) if sub_path_str else None
return name, sub_path
|
06650868fb773b41b97a839b0d423cc8f3cd4a85
| 698,959
|
def createURIString(valueString, delimiter, vocab):
"""This function takes a delimiter separted string of values and returns a string
in which every of these values is prefixed with the specified vocab URI.
>>> createURIString('nl;fr;de', ';', 'http://id.loc.gov/vocabulary/languages/')
'http://id.loc.gov/vocabulary/languages/nl;http://id.loc.gov/vocabulary/languages/fr;http://id.loc.gov/vocabulary/languages/de'
An empty input string results in an empty output string
>>> createURIString('', ';', 'http://id.loc.gov/vocabulary/languages/')
''
Only a delimiter results in an empty string
>>> createURIString(';', ';', 'http://id.loc.gov/vocabulary/languages/')
''
"""
uris = []
urisString = ""
values = valueString.split(delimiter)
if len(values) > 1:
for v in values:
if len(v) > 0:
uris.append(vocab + v)
urisString = ';'.join(uris)
elif len(values) == 1:
if len(values[0]) > 0:
urisString = vocab + valueString
else:
urisString = ''
return urisString
|
6fb6898b1531b5741dd890452c9ddd9e4db6f205
| 698,960
|
import requests
from bs4 import BeautifulSoup
def get_page(url, **kwargs):
"""Pulls in the HTML from a URL and returns the results as a BeautifulSoupt object.
Parameters
----------
url : str
The URL to scrape
Returns
-------
soup : bs4.BeautifulSoup
The BeautifulSoup representation of the webpage
"""
response = requests.get(url, **kwargs)
if response.status_code != 200:
raise RuntimeError(
f"Response from {url} failed with status code " "{response.status_code}"
)
else:
return BeautifulSoup(response.text, "lxml")
|
392c83be8b24bdeb27cf482c9df72c30b4b945dc
| 698,961
|
def parse_float(string:str) -> float:
"""
A function to parse string characters into float
-> string: str => a string of chars, ex : "abc12a.a3"
----
=> float: numbers present inside the string passed in, ex : 12.3
"""
if not isinstance(string, str):
raise TypeError('string must be a string')
return float("".join([i for i in string.strip() if i.isnumeric() or i == "."]))
|
305c945979d4eb880d53207a9a1466f46cb7d981
| 698,963
|
from pathlib import Path
import json
def read_jupyter_as_json(filepath: Path) -> Path:
"""
Read in rendered notebook-- read in the JSON representation that is 'under
the hood'
:param filepath: path to jupyter notebook.
"""
with open(filepath, "r") as fout:
contents = fout.read()
return json.loads(contents)
|
d91344b1bddcd0e1078effe6dd7947f7e04ea6af
| 698,964
|
def _to_db_str(sequential_list):
"""Convert a list or tuple object to a string of database format."""
entry_list = []
for _entry in sequential_list:
# I know only text type need to be converted by now. More types could
# be added in the future when we know.
if isinstance(_entry, str):
entry_list.append("u'%s'" % _entry)
else:
entry_list.append(str(_entry))
return "(%s)" % ", ".join(entry_list)
|
42a4e008964c0accb3e596dc75859641e999a0f4
| 698,965
|
import glob
def get_file_sets(root_path, resnet_version, dlc_network_shuffle= 'shuffle2',dlc_network_iteration = '500000', filtered=False):
""" function intended to fetch the proper network file sets for a given session. A session directory
may have multiple predictions from unique DLC networks saved to a folder. This requires us to dictate which
network's predictions to use
Attributes
------------
root_path: str, path of root experimental directory
resnet_version: str, network version of DLC to use
dlc_network: str, name of network to use in advanced string filtering
filtered: bool, flag to filter DLC predictions
Returns
-------------
cam1_list: list containing cam1 DLC predictions for a specified network
cam2_list: list containing cam2 DLC predictions for a specified network
cam3_list: list containing cam3 DLC predictions for a specified network
"""
cam1_list = []
cam2_list = []
cam3_list = []
all_files = glob.glob(root_path, recursive=True)
aaa = [k for k in all_files if dlc_network_iteration in k]
nf = [k for k in aaa if dlc_network_shuffle in k]
file_list = 0
for file in nf:
if 'cam3' in file:
files = file.rsplit('/', 1)[1]
names = str(files.rsplit('_cam3', 1)[0])
file_list = [file_list for file_list in nf if names in file_list]
for s in file_list:
if "cam1" in s:
cam1_list.append(s)
if "cam2" in s:
cam2_list.append(s)
if "cam3" in s:
cam3_list.append(s)
print('Total Number of 3-D reconstructable DLC predictions found for this rat is ' + str(len(cam1_list)))
if file_list:
print('Total Number of files in the block is ' + str(len(file_list)))
else:
cam1_list, cam2_list, cam3_list = 0, 0, 0
print('No 2-D DeepLabCut prediction files found for this experimental block for your given network. Please re-run the script DLC.py on this rats block.')
return cam1_list, cam2_list, cam3_list
|
d7920667545dece6d6c4fccc6075a14c8eb53837
| 698,966
|
def histogram(s):
"""to check s in string"""
d={}
for c in s:
d[c]=1+d.get(c,0)
return d
|
4b643a05f97ae0829235b1296e37ee73ad8807f4
| 698,967
|
def _to_spdx(lic):
"""
we are munging this stuff from conda-build
d_license = {'agpl3': ['AGPL-3', 'AGPL (>= 3)', 'AGPL',
'GNU Affero General Public License'],
'artistic2': ['Artistic-2.0', 'Artistic License 2.0'],
'gpl2': ['GPL-2', 'GPL (>= 2)', 'GNU General Public License (>= 2)'],
'gpl3': ['GPL-3', 'GPL (>= 3)', 'GNU General Public License (>= 3)',
'GPL', 'GNU General Public License'],
'lgpl2': ['LGPL-2', 'LGPL (>= 2)'],
'lgpl21': ['LGPL-2.1', 'LGPL (>= 2.1)'],
'lgpl3': ['LGPL-3', 'LGPL (>= 3)', 'LGPL',
'GNU Lesser General Public License'],
'bsd2': ['BSD_2_clause', 'BSD_2_Clause', 'BSD 2-clause License'],
'bsd3': ['BSD_3_clause', 'BSD_3_Clause', 'BSD 3-clause License'],
'mit': ['MIT'],
}
"""
r_to_spdx = {
"AGPL-3": "AGPL-3.0-only",
"AGPL (>= 3)": "AGPL-3.0-or-later",
"Artistic License 2.0": "Artistic-2.0",
"GPL-2": "GPL-2.0-only",
"GPL (>= 2)": "GPL-2.0-or-later",
"GNU General Public License (>= 2)": "GPL-2.0-or-later",
"GPL-3": "GPL-3.0-only",
"GPL (>= 3)": "GPL-3.0-or-later",
"GNU General Public License (>= 3)": "GPL-3.0-or-later",
"LGPL-2": "LGPL-2.0-only",
"LGPL (>= 2)": "LGPL-2.0-or-later",
"LGPL-2.1": "LGPL-2.1-only",
"LGPL (>= 2.1)": "LGPL-2.1-or-later",
"LGPL-3": "LGPL-3.0-only",
"LGPL (>= 3)": "LGPL-3.0-or-later",
"BSD_2_clause": "BSD-2-Clause",
"BSD_2_Clause": "BSD-2-Clause",
"BSD 2-clause License": "BSD-2-Clause",
"BSD_3_clause": "BSD-3-Clause",
"BSD_3_Clause": "BSD-3-Clause",
"BSD 3-clause License": "BSD-3-Clause",
"Apache License 2.0": "Apache-2.0",
"CC BY-SA 4.0": "CC-BY-SA-4.0",
"Apache License (== 2.0)": "Apache-2.0",
"FreeBSD": "BSD-2-Clause-FreeBSD",
"Apache License (>= 2.0)": "Apache-2.0",
"CC0": "CC0-1.0",
"MIT License": "MIT",
"CeCILL-2": "CECILL-2.0",
"CC BY-NC-SA 4.0": "CC-BY-NC-SA-4.0",
"CC BY 4.0": "CC-BY-4.0",
}
return r_to_spdx.get(lic, lic)
|
66aaa5fa8428ec680ab347b9a33ed7f03f18281c
| 698,968
|
import numpy
def plane_fit(coordinates, center=None):
"""
Compute n-dimensional plane to a series or coordinates and return the
normal to that plane
:param coordinates: coordinates to fit a plane to
:type coordinates: :numpy:ndarray
:param center: point on the plane used as origin for the normal
geometric center by default.
:type center: :numpy:ndarray
:return: normal to the fitted plane
:rtype: :numpy:ndarray
"""
if center is None:
center = coordinates.mean(axis=0)
x = coordinates.T - center[:, None]
return numpy.linalg.svd(x)[0][:, -1]
|
4922b42c7c8a6c4faa7227386396ebc93bc1bafb
| 698,969
|
def linear_scale_between(x1: float, x2: float, y1: float, y2: float, x: float, *, clamp=True, easing_func=None) -> float:
"""Standard linear interpolation function, optionally clamped (between y1 and y2)"""
pct = (x - x1) / (x2 - x1)
if easing_func:
pct = easing_func(pct)
if clamp:
pct = globals()['clamp'](pct)
return y1 + pct * (y2 - y1)
|
f488346a4fe07b82f1281b646c3a39171b5fe757
| 698,970
|
def make_dict_from_config(path, config_file):
"""
Function to make a dict from a configuration file
Parameters
---------------
path: str
path dir to the config file
config_file: str
config file name
Returns
----------
dict with the config file infos
"""
# open and load the file here
ffile = open('{}/{}'.format(path, config_file), 'r')
line = ffile.read().splitlines()
ffile.close()
# process the result
params = {}
for i, ll in enumerate(line):
if ll != '' and ll[0] != '#':
spla = ll.split('#')
lspl = spla[0].split(' ')
lspl = ' '.join(lspl).split()
n = len(lspl)
keym = ''
lim = n-2
for io, keya in enumerate([lspl[i] for i in range(lim)]):
keym += keya
if io != lim-1:
keym += '_'
params[keym] = (lspl[n-1], lspl[n-2], spla[1])
return params
|
fd928dde7bb9ea3d360237c34bc7c46f9f26b07a
| 698,971
|
def extract_domain(email_address):
"""
Given an email address, extract the domain name from it. This is done by finding the @ and
then splicing the email address and returning everything found after the @. If no @ is found
then the entire email address string is returned.
:param email_address:
:return:
"""
email_address = email_address.lower()
# figure out the domain from the email address
try:
return email_address[email_address.index(u'@') + 1:]
except ValueError:
# no @ found, just use the whole string
return email_address
|
3e09c9cef431c09d126d8de6854990dc9351ef0d
| 698,972
|
def get_sweep_info(pul, pgf, group_idx, series_idx):
"""
Find the associated record in the StimTree from the PulseTree
"""
pul_series = pul["ch"][group_idx]["ch"][series_idx]
num_sweeps_in_recorded_data = pul_series["hd"]["SeNumberSweeps"]
pul_sweep = pul_series["ch"][0]
stim_idx = pul_sweep["hd"]["SwStimCount"] - 1
stim_sweep = pgf["ch"][stim_idx]
return stim_sweep, pul_sweep, num_sweeps_in_recorded_data
|
ebb2f125f9dbf45f80e088d63314a6fd10069a06
| 698,973
|
def check_solution(model, solution):
"""
Helper function. if solution is None, attempts to get it from the model.
:param model:
:param solution:
:return:
"""
if solution is None:
try:
solution = model.solution
except AttributeError:
raise AttributeError('If not providing a solution object, please '
'provide a model with an embedded solution '
'(call model.solve())')
return solution
|
a03ee5e2033ee99caa0fd761f9d78d3d597a906b
| 698,974
|
def degTodms(ideg):
"""
Converts degrees to degrees:minutes:seconds
:param ideg: objects coordinate in degrees
:type ideg: float
:return: degrees:minutes:seconds
:rtype: string
"""
if (ideg < 0):
s = -1
else:
s = 1
ideg = abs(ideg)
deg = int(ideg) + 0.
m = 60. * (ideg - deg)
minutes = int(m) + 0.
seconds = 60. * (m - minutes)
if s < 0:
dms = "-%02d:%02d:%06.3f" % (deg, minutes, seconds)
else:
dms = "%02d:%02d:%06.3f" % (deg, minutes, seconds)
return dms
|
169cf4a89e7a2bd8526cf32acd1e88ec62d0237f
| 698,975
|
def build_command(input_device, rates, bits, buffers, sox_effects):
"""
:param input_device: device index e.g. 2
:param rates: [input, output] e.g. [48000, 48000]
:param bits: [input, output] e.g. [16, 16]
:param buffers: [input, sox] e.g. [512, 1024]
:param sox_effects: [left, right] e.g. [['equalizer', '80', '5q', '-6'], ['equalizer', '10000', '2q', '3']]
:return str: command
"""
rec = ['pipeq-recorder', str(input_device), str(rates[0]), str(bits[0]), str(buffers[0])]
# rec = ['cat', 'test.pcm']
tmp = "L1=$(mktemp -u);R1=$(mktemp -u);L2=$(mktemp -u);R2=$(mktemp -u);" \
"mkfifo $L1 $L2 $R1 $R2;" \
"trap 'rm -f $L1 $L2 $R1 $R2' EXIT;"
s2l = "sh -c " \
"'sox --buffer "+str(buffers[1])+"" \
" -t raw -b "+str(bits[0])+" -e signed -c 2 -r "+str(rates[0])+" $0" \
" -t raw -b "+str(bits[0])+" -e signed -c 1 -r "+str(rates[0])+" $1" \
" remix 1 "+' '.join(sox_effects[0])+"'" \
" $L1 $L2"
s2r = "sh -c " \
"'sox --buffer "+str(buffers[1])+"" \
" -t raw -b "+str(bits[0])+" -e signed -c 2 -r "+str(rates[0])+" $0" \
" -t raw -b "+str(bits[0])+" -e signed -c 1 -r "+str(rates[0])+" $1" \
" remix 2 "+' '.join(sox_effects[1])+"'" \
" $R1 $R2"
rec = "sh -c '"+' '.join(rec)+" | tee $0 > $1' $L1 $R1"
mix = "sox --buffer "+str(buffers[1])+" -M" \
" -t raw -b "+str(bits[0])+" -e signed -c 1 -r "+str(rates[0])+" $L2" \
" -t raw -b "+str(bits[0])+" -e signed -c 1 -r "+str(rates[0])+" $R2" \
" -t raw -b "+str(bits[1])+" -e signed -c 2 -r "+str(rates[1])+" -"
return tmp + s2l + ' & ' + s2r + ' & ' + rec + ' & ' + mix
|
8c17f55ef8ff3d0abc8ffbdbe85a2d98306a6b37
| 698,976
|
import argparse
def parse_arguments():
"""
Parse command line arguments
:return: Parsed arguments
"""
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--modeldir",
help="Folder the .tflite file is located.",
default="../tflite_model/")
parser.add_argument("--graph",
help="Name of the .tflite file.",
default="palm_detection_without_custom_op.tflite")
parser.add_argument("--labels",
help="Name of the labelmap file.",
default="palm_detection_labelmap.txt")
parser.add_argument("--min_conf",
help="Minimum confidence threshold for displaying detected hand palm.",
type=float,
default=0.7)
parser.add_argument("--input_filename",
help="Full filename of input file to process. Support formats: mp4, mp3, jpg, png",
required=True)
parsed_args = parser.parse_args()
return parsed_args
|
c93178c7101fadfd1c6d42c3986e9815129ceeb6
| 698,977
|
def prepare_results(cursor_description, rows):
"""
Generate result in JSON format with an entry consisting of key value pairs.
:param cursor_description: a tuple with query result columns
:param rows: list of returned sql query values
:return: dictionary
"""
if rows is None or len(rows) == 0:
return {"entries": None}
# List of column names from SQL result to use as dictionary keys
dt_column_keys = [column[0] for column in cursor_description]
# Build dictionary: key-value pairs consisting of column name - row value
entries_data_list = []
for row in rows:
entries_data_list.append(dict(zip(dt_column_keys, row)))
entries = {"entries": [entry for entry in entries_data_list]}
return entries
|
8fcc48f0a732a200c65d27817349a0de1a5f1172
| 698,978
|
import os
def process_workspace_list(arguments, config, calling_dir = os.getcwd()):
"""
Processes a list of arguments from command line and resolves it to
a list of workspaces.
Parameters
----------
arguments : list of str
command line arguments to resolve to workspaces
aliases : dict of alias info
alias information from Config()
calling_dir : str
used to determine the default workspace if not explicitly specified
Returns
-------
list of str
a list of workspaces the arguments resolve to
"""
def resolve_default():
if len(config.workspaces.keys()) == 1:
# Default to the single workspace that exists...
return [w for w in config.workspaces.keys()]
else:
# Check if we can get a default based on what directory we are calling from :)
abs_call_dir = os.path.abspath(calling_dir)
def_results = []
for w in config.workspaces:
config_dir = os.path.abspath(config.workspaces[w]["workspace_root"])
rel_path = os.path.relpath(abs_call_dir, config_dir)
if ".." not in rel_path:
def_results.append(w)
return def_results
def resolve_all():
return [k for k in config.workspaces.keys()]
initial_workspaces = []
processed_aliases = []
final_workspaces = []
resolvable_aliases = {"all": resolve_all, "default": resolve_default}
def process_extension(workspaces, source=""):
for ws in workspaces:
if ws in initial_workspaces:
continue
if ws in processed_aliases:
print("warn: arrived at alias [{}] again from [{}], possible loop or redundant args.".format(ws, source))
continue
if ws not in config.aliases and ws not in config.workspaces and ws not in resolvable_aliases:
raise Exception("Error: unable to resolve [{}] to a worksapce or alias.".format(ws))
initial_workspaces.append(ws)
if len(arguments) <= 0:
initial_workspaces.append("default")
else:
process_extension(arguments, "<arguments>")
while len(initial_workspaces) > 0:
ws_now = initial_workspaces.pop(0)
if ws_now in config.aliases:
process_extension(config.aliases[ws_now]["to"], ws_now)
processed_aliases.append(ws_now)
elif ws_now in resolvable_aliases:
process_extension(resolvable_aliases[ws_now](), ws_now)
processed_aliases.append(ws_now)
else:
# We will add the ws to the final workspaces
if ws_now not in final_workspaces:
final_workspaces.append(ws_now)
return final_workspaces
|
d87672722a3f6ee568580efaa98cdafa8271a311
| 698,979
|
import torch
def _rescale(dat, mn_out=0, mx_out=511):
""" Rescales image intensities between mn_out and mx_out.
"""
dtype = dat.dtype
device = dat.device
dat[(dat == dat.min()) | ~torch.isfinite(dat) | (dat == dat.max())] = 0
# Make scaling to set image intensities between mn_out and mx_out
mn = torch.tensor([dat.min(), 1], dtype=dtype, device=device)[None, ...]
mx = torch.tensor([dat.max(), 1], dtype=dtype, device=device)[None, ...]
sf = torch.cat((mn, mx), dim=0)
sf = torch.tensor([mn_out, mx_out], dtype=dtype, device=device)[..., None].solve(sf)[0].squeeze()
# Rescale
dat = dat*sf[0] + sf[1]
# Clamp
dat = dat.clamp_min(mn_out).clamp_max(mx_out)
return dat
|
3b502094e22fe97fadfeb4ff987774385442c52e
| 698,980
|
import pkg_resources
def walk(module_name, dirname, topdown=True):
"""
Copy of :func:`os.walk`, please refer to its doc. The only difference is that we walk in a package_resource
instead of a plain directory.
:type module_name: basestring
:param module_name: module to search in
:type dirname: basestring
:param dirname: base directory
:type topdown: bool
:param topdown: if True, perform a topdown search.
"""
def rec_walk(root):
"""
Recursively list subdirectories and filenames from the root.
:param root: the root path
:type root: basestring
"""
dirnames = []
filenames = []
for name in pkg_resources.resource_listdir(module_name, root):
# noinspection PyUnresolvedReferences
fullname = root + "/" + name
isdir = pkg_resources.resource_isdir(module_name, fullname)
if isdir:
dirnames.append(name)
if not topdown:
rec_walk(fullname)
else:
filenames.append(name)
yield root, dirnames, filenames
if topdown:
for name in dirnames:
# noinspection PyUnresolvedReferences
for values in rec_walk(root + "/" + name):
yield values
return rec_walk(dirname)
|
8a001a4673b9a123f4c4260f2226eff0c360b698
| 698,981
|
def get_flat_params(parameter_groups):
"""
:param parameter_groups:
:return: List of parameters pulled out of parameter groups
"""
parameter_list = []
for parameter_group in parameter_groups:
parameter_list += parameter_group['params']
return parameter_list
|
ef11030018341635594d07f56a23dd9aa3c1244e
| 698,982
|
def read_name_hash(read_name):
"""Takes a read name from an input bam and shortens & obfuscates it"""
return str(abs(hash(read_name)) % 10**9)
|
bc1e048925f0a0e710481656f0b90e4c290c2341
| 698,983
|
def _get_updated_parsed(job_item):
"""
:param job_item:
:return:
"""
return job_item.published.timetuple()
|
a1377d3adcbae990727abed8ecdabfb9fb2d610b
| 698,984
|
import argparse
def getArguments():
"""Arguments for script."""
parser = argparse.ArgumentParser()
parser.add_argument("--name", default="sensorapp",
help="name of the application")
parser.add_argument("--app-port", type=int, default="8081",
help="port to run application server")
parser.add_argument("--host", default="localhost",
help="hostname of the Beetle server")
parser.add_argument("--port", "-p", type=int, default=3002,
help="port the server is runnng on")
parser.add_argument("--cert", "-c", type=str,
help="client certificate")
parser.add_argument("--key", "-k", type=str,
help="private key for client certificate")
parser.add_argument("--rootca", "-r",
help="root CA certificate")
parser.add_argument("--nocerts", "-x", action="store_true",
help="disable verification and use of client certificates")
return parser.parse_args()
|
3e9ce78bcf459a5f956b988265d0cc39834d5e41
| 698,985
|
def pressure_from_altitude(y):
"""
Calculate standard atmospheric pressure based on an altitude in m. The
basic formula can be found many places. For instance, Munson, Young, and
Okiishi, 'Fundamentals of Fluid Mechanics', p. 51.
Enter: y: altitude in m.
Exit: p: pressure in N/m/m.
"""
p0 = 101325 # Pa, standard pressure at sea level
L = 0.0065 # K/m, temperature lapse rate
T0 = 288.15 # K, reference temperature at sea level
g = 9.80655 # m/s/s, gravity at sea level
# I've used the more authoritative values from CIPM-2007 for these constants
M = 0.02896546 # kg/mol, molar mass of dry air, from CIPM-2007
R = 8.314472 # J/(mol*K), universal gas constant, from CIPM-2007
# For exceptionally high altitudes, treat this as nearly zero.
if y >= T0 / L - 1:
y = T0 / L - 1
p = p0*(1-L*y/T0)**(g*M/(R*L))
return p
|
b8f7d42b28d3448ac1a0f62079a0ae4ed672f9e9
| 698,986
|
import pandas as pd
def _lsa_events_converter(events_file):
"""Make a model where each trial has its own regressor using least squares
all (LSA)
Parameters
----------
events_file : str
File that contains all events from the bold run
Yields
------
events : DataFrame
A DataFrame in which each trial has its own trial_type
"""
events = pd.read_csv(events_file, sep='\t')
events['original_trial_type'] = events['trial_type']
for cond, cond_df in events.groupby('trial_type'):
cond_idx = cond_df.index
for i_trial, trial_idx in enumerate(cond_idx):
trial_name = '{0}_{1:04d}'.format(cond, i_trial+1)
events.loc[trial_idx, 'trial_type'] = trial_name
return events
|
4cc216bdf01f67dbefbd31005f0398cd217aa8a5
| 698,987
|
def get_dataset_location(bq):
"""Parse payload for dataset location"""
# Default BQ location to EU, as that's where we prefer our datasets
location = "EU"
# ... but the payload knows best
if "location" in bq:
location = bq["location"]
return location
|
071a5258a72b3a3a248c6e44b99b7ccc2890d3f5
| 698,989
|
def get_fws_and_tasks(workflow, fw_name_constraint=None, task_name_constraint=None):
"""
Helper method: given a workflow, returns back the fw_ids and task_ids that match name
constraints. Used in developing multiple powerups.
Args:
workflow (Workflow): Workflow
fw_name_constraint (str): a constraint on the FW name
task_name_constraint (str): a constraint on the task name
Returns:
a list of tuples of the form (fw_id, task_id) of the RunVasp-type tasks
"""
fws_and_tasks = []
for idx_fw, fw in enumerate(workflow.fws):
if fw_name_constraint is None or fw_name_constraint in fw.name:
for idx_t, t in enumerate(fw.tasks):
if task_name_constraint is None or task_name_constraint in str(t):
fws_and_tasks.append((idx_fw, idx_t))
return fws_and_tasks
|
28f4f2cdcc58b942ee3e0631c21bf2a3a052db35
| 698,990
|
def _get_tags(ws_info):
"""Get the tags relevant to search from the ws_info metadata"""
metadata = ws_info[-1]
if metadata.get('searchtags'):
if isinstance(metadata['searchtags'], list):
return metadata['searchtags']
else:
return [metadata['searchtags']]
else:
return []
|
f6922f92913446284545aa73cb2b8cd7139749e8
| 698,991
|
import inspect
import sys
def get_module_classes(module_name):
""" Get all the classes from a module """
clsmembers = inspect.getmembers(sys.modules[module_name], inspect.isclass)
return clsmembers
|
8049c8a5c0277c86e8c334c4e158eaa90aaa0992
| 698,992
|
def _str_bool(v):
"""convert a string rep of yes or true to a boolean True, all else to False"""
if (type(v) is str and v.lower() in ['yes', 'true']) or \
(type(v) is bool and bool(v)):
return True
return False
|
ba4663c402b94b00edb4b1a97a4ba4ed755caf85
| 698,993
|
def get_fixed_params():
""" Parameters that currently cannot be searched during HPO """
fixed_params = {'batch_size': 512, # The size of example chunks to predict on.
'n_cont_embeddings': 0, # How many continuous feature embeddings to use.
'norm_class_name': 'LayerNorm', # What kind of normalization to use on continuous features.
'column_embedding': True, # If True, 1/(n_shared_embs)th of every embedding will be reserved for a learned parameter that's common to all embeddings.
#'shared_embedding': False,
#'n_shared_embs': 8,
'orig_emb_resid': False, # If True, concatenate the original embeddings on top of the feature embeddings in the Transformer layers.
'one_hot_embeddings': False, # If True, one-hot encode variables whose cardinality is < max_emb_dim.
'drop_whole_embeddings': False, # If True, dropout pretends the embedding was a missing value. If false, dropout sets embed features to 0
'max_emb_dim': 8, # Maximum allowable amount of embeddings.
'base_exp_decay': 0.95, # Rate of exponential decay for learning rate, used during finetuning.
'encoders': {'CATEGORICAL': 'CategoricalOrdinalEnc', # How to "encode"(vectorize) each column type.
'DATETIME' : 'DatetimeOrdinalEnc',
'LATLONG' : 'LatLongQuantileOrdinalEnc',
'SCALAR' : 'ScalarQuantileOrdinalEnc',
'TEXT' : 'TextSummaryScalarEnc'},
'aug_mask_prob' : 0.4, # What percentage of values to apply augmentation to.
'num_augs' : 1, # Number of augmentations to add.
'pretext': 'BERTPretext', # What pretext to use when performing pretraining/semi-supervised learning.
'n_cont_features': 8, # How many continuous features to concatenate onto the categorical features
'fix_attention': False, # If True, use the categorical embeddings in the transformer architecture.
'epochs': 200, # How many epochs to train on with labeled data.
'pretrain_epochs': 200, # How many epochs to pretrain on with unlabeled data.
'epochs_wo_improve': 30, # How many epochs to continue running without improving on metric. aka "Early Stopping Patience"
'num_workers': 16, # How many workers to use for torch DataLoader.
'max_columns': 500, # Maximum number of columns TabTransformer will accept as input. This is to combat huge memory requirements/errors.
}
return fixed_params
|
186e786f96f2a81c88423fdcd27bf28aef97b511
| 698,995
|
def to_env_dict(config):
"""Convert configuration object to a flat dictionary."""
entries = {}
if config.has_value():
return {config.__to_env_var__(): config.node['value']}
if config.has_default():
return {config.__to_env_var__(): config.node['default']}
for k in config.node:
entries.update(to_env_dict(config[k]))
return entries
|
4c46f365e7b53d79d25dea7d062f9d985d30877d
| 698,996
|
import base64
import json
def encode_base64_json(data):
"""
Encode dict-like data into a base64 encoded JSON string.
This can be used to get dict-like data into HTTP headers / envvar.
"""
return base64.b64encode(bytes(json.dumps(data), 'utf-8'))
|
08b9d8568a59717173adf00658aad03bddb8df14
| 698,997
|
def conjugate_row(row, K):
"""
Returns the conjugate of a row element-wise
Examples
========
>>> from sympy.matrices.densetools import conjugate_row
>>> from sympy import ZZ
>>> a = [ZZ(3), ZZ(2), ZZ(6)]
>>> conjugate_row(a, ZZ)
[3, 2, 6]
"""
result = []
for r in row:
conj = getattr(r, "conjugate", None)
if conj is not None:
conjrow = conj()
else:
conjrow = r
result.append(conjrow)
return result
|
c4c5ccb03513c32e3bc9866ef6c3a370e70d01d6
| 698,998
|
import os
import sys
def get_proj_incdirs(proj_dir):
"""
This function finds the include directories
"""
proj_incdir = os.environ.get("PROJ_INCDIR")
incdirs = []
if proj_incdir is None:
if os.path.exists(os.path.join(proj_dir, "include")):
incdirs.append(os.path.join(proj_dir, "include"))
else:
sys.exit("ERROR: PROJ_INCDIR dir not found. Please set PROJ_INCDIR.")
else:
incdirs.append(proj_incdir)
return incdirs
|
f39728b8454efeef1e6729d05f4e6dd03110a90d
| 698,999
|
import os
def make_absolute_path(files, root):
"""Make paths absolute."""
for role, path in files.items():
files[role] = os.path.join(root, path)
return files
|
49970ca1c7d6eb4d2480fbe73093c40aec1400d6
| 699,000
|
import torch
def random_mask_tokens(args, inputs, tokenizer):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
if len(inputs.shape) == 1:
inputs = inputs.unsqueeze(0)
labels = inputs.clone()
input_mask = (~inputs.eq(0)).to(torch.float)
masking_prob = 0.15
# Consider padding
masked_indices = torch.bernoulli(input_mask * masking_prob).bool()
labels[~masked_indices] = -1
indices_replaced = torch.bernoulli(input_mask * 0.8).bool() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
indices_random = torch.bernoulli(input_mask * 0.5).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return inputs, labels
|
5522b3aeedad605eb2645a3c8938315ff59b0b7c
| 699,001
|
import math
def edgeweight_properties(graph, weight_label="weight"):
"""
Calculates properties of edge weights.
Parameters
----------
graph: nx.Graph
Graph to calculate the properties from
weight_label:
Returns
-------
max_weight: number
Maximum weights of an edge
min_weight: number
Minimum weight of an edge
num_of_zero_weights: int
Number of edges with zero weight
"""
max_weight = -math.inf
min_weight = math.inf
num_of_zero_weights = 0
for u, v, d in graph.edges(data=True):
weight = d[weight_label]
if weight > max_weight:
max_weight = weight
if weight < min_weight:
min_weight = weight
if weight == 0:
num_of_zero_weights += 1
return max_weight, min_weight, num_of_zero_weights
|
a22857d844c2235859c004bfac897b3b5c80feee
| 699,002
|
def timedelta_to_seconds(td):
"""
Converts a timedelta to total seconds.
(This is built-in in Python 2.7)
"""
# we ignore microseconds for this
if not td:
return None
return td.seconds + td.days * 24 * 3600
|
ef4ebd88581d8a2a1f64b9f940afbe22da8f55bc
| 699,003
|
import torch
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = torch.sum(losses)
if num_present == 0:
return 0 * total_loss
else:
return torch.div(total_loss, num_present)
|
4174dead8e2fc582633589713e076871a0631de1
| 699,004
|
def _feet_to_alt_units(alt_units: str) -> float:
"""helper method"""
if alt_units == 'm':
factor = 0.3048
elif alt_units == 'ft':
factor = 1.
else:
raise RuntimeError(f'alt_units={alt_units} is not valid; use [ft, m]')
return factor
|
95d5208741fdff275211b9c33a901dedaa65186e
| 699,005
|
def get_proper_state(job, state):
"""
Return a proper job state to send to server.
This function should only return 'starting', 'running', 'finished', 'holding' or 'failed'.
If the internal job.serverstate is not yet set, it means it is the first server update, ie 'starting' should be
sent.
:param job: job object.
:param state: internal pilot state (string).
:return: valid server state (string).
"""
if job.serverstate in ('finished', 'failed'):
pass
elif job.serverstate == "" and state != "finished" and state != "failed":
job.serverstate = 'starting'
elif state in ('finished', 'failed', 'holding'):
job.serverstate = state
else:
job.serverstate = 'running'
return job.serverstate
|
ba54c1f4eee99055e73099bc381ccbb69da4b183
| 699,006
|
def are_collinear(p1, p2, p3, tolerance=0.5):
"""return True if 3 points are collinear.
tolerance value will decide whether lines are collinear; may need
to adjust it based on the XY tolerance value used for feature class"""
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
x3, y3 = p3[0], p3[1]
res = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)
if -tolerance <= res <= tolerance:
return True
|
337065b80e1cc1810be263bccd9c8ae66c258ed3
| 699,007
|
def transformResults(threadCounts, values, function):
"""Apply the function to all of the measurements"""
res = {}
for bm in list(values.keys()):
res[bm] = []
for (nThreads, v) in zip(threadCounts, values[bm]):
res[bm].append(None if v == None else function(v, nThreads))
return res
|
1fe7944479b045bebd403eeb888fe0c00b012291
| 699,008
|
import math
def coriolis_freq(lat):
"""Compute coriolis frequency.
https://en.wikipedia.org/wiki/Earth%27s_rotation#Angular_speed
omega = 7.2921150 ± 0.0000001×10−5 radians per second
:param theta: Latitude in degrees.
"""
omega = 7.2921150e-5
theta = math.radians(lat)
f = 2 * omega * math.sin(theta)
return f
|
a5f40ef6b3862746905ab8e3cd4e77f1fdc1d484
| 699,009
|
def _msearch_success(response):
"""Return true if all requests in a multi search request succeeded
Parameters
----------
response : requests.models.Response
Returns
-------
bool
"""
parsed = response.json()
if 'responses' not in parsed:
return False
for result in parsed['responses']:
if result['status'] != 200:
return False
return True
|
acdac4408464120fdeb7f20a06f07aac6ca3809f
| 699,010
|
import json
def get_team_port(duthost, pc):
"""
Dump teamd info
Args:
duthost: DUT host object
pc: PortChannel name
"""
dut_team_cfg = duthost.shell("teamdctl {} config dump".format(pc))['stdout']
dut_team_port = json.loads(dut_team_cfg)['ports'].keys()
return dut_team_port[0]
|
f60c18d1a4f8514ec0ee042a820eaec3902bfc24
| 699,012
|
from pathlib import Path
def _get_city(path, root):
"""Extract the city name of standard traffic4cast like data structure path
Args:
path (str): Traffic4cast path: [root + city + city_test + filename]
root (str): Root directory
Returns:
TYPE: Description
"""
city_path = Path(path.replace(root, ''))
city_name = city_path.parts[1]
return city_name
|
322d43c1c1f43d8d59f21a67c804fc343955816d
| 699,013
|
def reverse(graph):
"""replace all arcs (u, v) by arcs (v, u) in a graph"""
rev_graph = [[] for node in graph]
for node, _ in enumerate(graph):
for neighbor in graph[node]:
rev_graph[neighbor].append(node)
return rev_graph
|
5b1b0281df529676e90ba4d27d1ab554d4a50537
| 699,015
|
def conjugado (num):
"""Funcion que retorna el conjugado de un numero imaginario
(list 1D) -> list 1D"""
num1 = num[1] * -1
return (num[0], num1)
|
af1dfc52b5a8967ff7399f369c8ef7bf9dd2cc17
| 699,016
|
def _get_config_value_for_remote(ctx, remote, config, key):
"""
Look through config, and attempt to determine the "best" value to use
for a given key. For example, given::
config = {
'all':
{'branch': 'master'},
'branch': 'next'
}
_get_config_value_for_remote(ctx, remote, config, 'branch')
would return 'master'.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:param key: the name of the value to retrieve
"""
roles = ctx.cluster.remotes[remote] if ctx else None
if 'all' in config:
return config['all'].get(key)
elif roles:
for role in roles:
if role in config and key in config[role]:
return config[role].get(key)
return config.get(key)
|
30452cde0dcb1f09763a5b10550f59d649067b08
| 699,017
|
import random
def propose_any_node_flip(partition):
"""Flip a random node (not necessarily on the boundary) to a random part
"""
node = random.choice(tuple(partition.graph))
newpart = random.choice(tuple(partition.parts))
return partition.flip({node: newpart})
|
1ba9d747b92b707cad34c820abec9325913aca55
| 699,018
|
import string
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
"""
s = ""
c = ''
for i in text:
if i not in string.ascii_letters:
i = ' '
s += i
return s.split()
|
cc0b7dbc548696ed74b48dec57b386fe38adfa41
| 699,019
|
def mods_to_step_size(mods):
"""
Convert a set of modifier keys to a step size.
:param mods: Modifier keys.
:type mods: :class:`tuple`[:class:`str`]
:return: Step size, by name.
:rtype: :class:`str`
"""
if "alt" in mods:
return "fine"
elif "shift" in mods:
return "coarse"
return "medium"
|
3c93fd11a8b5b0fad5cc26a35872e0616ebf7231
| 699,020
|
from typing import Optional
def contains_symbol(text: str, symbols: Optional[str] = None) -> bool:
"""If text contains a symbol in symbols, return True."""
if symbols is None:
for character in text:
if character.isascii() and (not character.isalnum()):
return True
return False
else:
for character in text:
if character in symbols:
return True
return False
|
22c6a090c93e81969fb1f4fbcb8bb88ce1bf5714
| 699,021
|
import re
def process(text, pattern = '(<\/?[a-zA-Z]*>)|(\n)'):
"""
process - process SGML-styled text into preferred text
Inputs:
- text : raw text
- pattern : matching pattern to remove SGML tags
Outputs:
- text : processed text
"""
# remove SGML tags
text = re.sub(pattern = pattern, repl = '', string = text)
return text
|
87f16159e015df0341f654d0f46fe5334cd11a8d
| 699,022
|
import yaml
def read_parameter_file(parameter_file_path: str) -> dict:
"""
Reads the parameters from a yaml file into a dictionary.
Parameters
----------
parameter_file_path: Path to a parameter file.
Returns
-------
params: Dictionary containing the parameters defined in the provided yam file
"""
with open(parameter_file_path, 'r') as f:
params = yaml.safe_load(f)
return params
|
5f203bf596c2b1c39f14cea1e99e0c33c2f97ce2
| 699,023
|
import hashlib
def make_hash(to_hash: str) -> str:
""" Return a hash of to_hash. """
new_hash = hashlib.md5()
new_hash.update(to_hash.encode("utf-8"))
return str(new_hash.hexdigest())
|
7e800f7942df23256373c221428e5c24b65cabee
| 699,024
|
import os
import pkgutil
def find_document_issuers(issuers_dir):
"""
Given a path to an issuers directory, return a list of all the document issuer
names that are available.
"""
document_issuers_dir = os.path.join(issuers_dir, "documents")
return [
name
for _, name, is_pkg in pkgutil.iter_modules([document_issuers_dir])
if not is_pkg and not name.startswith("_")
]
|
1d177445628163d1e1b9833beb90af5a381021ca
| 699,025
|
def resolution(liste,point,n=3):
"""fonction permettant de savoir si les coordonnées d'un point sont trop proches des coordonnées du dernier point d'une liste"""
if len(liste)==0 :
#si la liste est vide
return True
else :
point_precedent=liste[-1]
if abs(point_precedent[0]-point[0])<=n and abs(point_precedent[1]-point[1])<=n :
#si le point est trop proche du point précédent
return False
else :
#si le point est à bonne distance du point précédent
return True
|
401a3972b09f5959e7c7face45611539469e7a8f
| 699,027
|
def isDoor(case):
"""Teste si la case est une porte"""
if "special_type" in case.keys():
return case['special_type'] == "door"
else:
return False
|
54a8242a8139e98867a0b3cad8597c8fe36ca685
| 699,028
|
def spatial_scenario_selection(dataframe_1,
dataframe_2,
dataframe_1_columns,
dataframe_2_columns,
):
"""Intersect Polygons to collect attributes
Parameters
- dataframe_1 - First polygon dataframe
- dataframe_2 - Second polygon dataframe
- dataframe_1_columns - First polygon dataframe columns to collect
- dataframe_2_columns - Second polygon dataframe columns to collect
Outputs
data_dictionary - Dictionary of intersection attributes:
"""
intersection_dictionary = []
# create spatial index
dataframe_1_sindex = dataframe_1.sindex
total_values = len(dataframe_2.index)
for values in dataframe_2.itertuples():
intersected_polys = dataframe_1.iloc[list(
dataframe_1_sindex.intersection(values.geometry.bounds))]
for intersected_values in intersected_polys.itertuples():
if (
intersected_values.geometry.intersects(values.geometry) is True
) and (
values.geometry.is_valid is True
) and (
intersected_values.geometry.is_valid is True
):
dataframe_1_dictionary = dict([(v,getattr(intersected_values,v)) for v in dataframe_1_columns])
dataframe_2_dictionary = dict([(v,getattr(values,v)) for v in dataframe_2_columns])
geometry_dictionary = {"geometry":values.geometry.intersection(intersected_values.geometry)}
intersection_dictionary.append({**dataframe_1_dictionary, **dataframe_2_dictionary,**geometry_dictionary})
print (f"* Done with Index {values.Index} out of {total_values}")
return intersection_dictionary
|
7c7d380d141244543d9a0a840ead456996eda006
| 699,030
|
def _beam_fit_fn_3(z, z0, Theta):
"""Fitting function for z0 and Theta."""
return (Theta*(z-z0))**2
|
76955c6464ae7a33927986d146e9000e9a45c12b
| 699,031
|
def stations_by_river(stations):
"""Returns a dictionary containing rivers (keys), and the stations on each river (values)"""
rivers = {}
for station in stations:
# only add the river if station.river has been set
river = station.river
if river is not None:
# add the station to the river key in the dictionary
# if the key is not in the dictionary, add it
if river in rivers:
rivers[river].append(station)
else:
rivers[river] = [station]
return rivers
|
7feef52d4d5c14109807e1c2e559f206b420c5cc
| 699,032
|
def json_citation_for_ij(query, score, doi):
"""
Because we are parsing the PDF, we cannot ensure the validity of each subfieldobtained form the parser (CERMINE) i.e title, journal, volume, authors, etc.
Instead, we join all the information obtained from the parser in plain text.
This plain text is the same used in the field query.bibliographic in crossref.
If the crossref query (see query_crossref_with_citation_list) gives a first match with a high score (see score_threshold) containing a DOI, we store that DOI.
If the score is low, or the match doesn't have a doi, then our doi is not populated, or null.
The output keys are:
unstructured,
score,
doi,
The idea is that if we store a DOI is because we are pretty sure it's the right one. If not, we just store the unstructured text and the score.
"""
out = {}
out["unstructured"] = query
if doi:
out["doi"] = doi
if score:
out["score"] = score
return out
|
1f226db5420b61cd40b88015ec727ebd84a08138
| 699,033
|
def find_combination(value, stream):
"""
>>> find_combination(127, [35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576])
62
"""
x = 0
y = 2
while True:
total = sum(stream[x:y])
if total < value:
y += 1
elif total > value:
x += 1
else:
return min(stream[x:y]) + max(stream[x:y])
|
17a61c6918ce78f283bc1b641edb4c235746d428
| 699,034
|
def data_pattern(data):
"""
"""
data = list(data.items())
data.sort(key=lambda x: x[1])
data.sort(key=lambda row:
[[r[i] for n, r in data].count(x)
for i, x in enumerate(row[1])])
for c in range(len(data[0][1])):
values = {row[c]: i for i, (name, row) in enumerate(data)}
for name, row in data:
row[c] = values[row[c]]
return tuple(zip(*data))
|
f3805ab18f92e60e858638f2ec60e660e1819c60
| 699,035
|
def _range_checker(ip_check, first, last):
"""
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
"""
if ip_check >= first and ip_check <= last:
return True
else:
return False
|
924e617374fdbc4cabc28cb18e63c45192da3b4c
| 699,037
|
def crr_m(ksigma, msf, crr_m7p5):
"""Cyclic resistance ratio corrected for m_w and confining stress"""
return crr_m7p5 * ksigma * msf
|
76b271ac72b29ec583aa8a23aeb22e8dbf248365
| 699,039
|
import re
def concatenate_lines(lines, pattern):
""" concatenate lines
>>> import re
>>> pattern = re.compile(r"(^\s+)(.+?)")
>>> lines = [u"a", u" b", u" c", u" d", u"e"]
>>> concatenate_lines(lines, pattern)
[u'a', u' b c d', u'e']
>>> lines = [u"a", u" b", u" c", u" d", u"e"]
>>> concatenate_lines(lines, pattern)
[u'a', u' b', u' c d', u'e']
>>> lines = [u"a", u" b", u" c", u" d", u"e"]
>>> concatenate_lines(lines, pattern)
[u'a', u' b', u' c', u' d', u'e']
>>> lines = [u"a", u" b", u"c", u" d", u" e"]
>>> concatenate_lines(lines, pattern)
[u'a', u' b', u'c', u' d e']
>>> lines = [u"a", u" b", u" c", u" d", u" e"]
>>> concatenate_lines(lines, pattern)
[u'a', u' b c d', u' e']
>>> pattern = re.compile(r"(^\s*)(.+?)")
>>> lines = [u"a", u"b", u"c", u" d", u"e"]
>>> concatenate_lines(lines, pattern)
[u'a b c', u' d', u'e']
"""
_lines, prev_prefix = [], None
for line in lines:
match = re.search(pattern, line)
if match:
prefix, text = match.groups()
if prev_prefix == prefix:
_lines[-1] = u"{0} {1}".format(_lines[-1].rstrip(), text)
else:
_lines.append(line)
prev_prefix = prefix
else:
_lines.append(line)
prev_prefix = None
return _lines
|
eb21d40771d16d33754aceab106770c8047a83fa
| 699,040
|
from bs4 import BeautifulSoup
def parse_words(html):
"""解析 html 源码,返回汉字列表"""
soup = BeautifulSoup(html)
a_list = soup.find_all('a', attrs={'target': '_blank'})
return [x.text for x in a_list if x.text]
|
629ccc8be01699ea747061c3340772569109d109
| 699,041
|
def translate_status(sbd_status):
"""Translates the sbd status to fencing status.
Key arguments:
sbd_status -- status to translate (string)
Return Value:
status -- fencing status (string)
"""
status = "UNKNOWN"
# Currently we only accept "clear" to be marked as online. Eventually we
# should also check against "test"
online_status = ["clear"]
offline_status = ["reset", "off"]
if any(online_status_element in sbd_status \
for online_status_element in online_status):
status = "on"
if any(offline_status_element in sbd_status \
for offline_status_element in offline_status):
status = "off"
return status
|
2274ff63aea9249cecacf455598a57fba1245ace
| 699,042
|
import math
def entropy(data):
"""
Calculate informational entropy.
"""
entropy = 0.0
frequency = {}
for instance in data:
p_instance = int(round(instance/5) * 5)
if p_instance in frequency:
frequency[p_instance] += 1
else:
frequency[p_instance] = 1
for freq in frequency.values():
entropy += (-freq/len(data)) * math.log(float(freq)/len(data), 2)
return entropy
|
4b96c229e4cc0318a764990569d2951003447a72
| 699,044
|
def plot_scatter(ax, prng, nb_samples=100):
"""Scatter plot."""
for mu, sigma, marker in [(-0.5, 0.75, "o"), (0.75, 1.0, "s")]:
x, y = prng.normal(loc=mu, scale=sigma, size=(2, nb_samples))
ax.plot(x, y, ls="none", marker=marker)
ax.set_xlabel("X-label")
ax.set_title("Axes title")
return ax
|
02f23849ed4f2dded875866027eead00ebd1f0dc
| 699,045
|
def s2c_stereographic(sph):
"""
Stereographic projection from the sphere to the plane.
"""
u = sph[..., 0]
v = sph[..., 1]
w = sph[..., 2]
return (u + 1j*v)/(1+w)
|
1aff6cf6accd6bb26c647f014dc964404e84b979
| 699,046
|
def text(node):
"""
Get all the text of an Etree node
Returns
-------
str
"""
return ''.join(node.itertext())
|
f5000a6220da74059230a499dc2b48057e5c4ada
| 699,047
|
from typing import Union
from typing import Callable
def fully_qualified_name(thing: Union[type, Callable]) -> str:
"""Construct the fully qualified name of a type."""
return thing.__module__ + '.' + thing.__qualname__
|
eacbffdcda78fa38667af0b9d7bc0c53c2fbdb1f
| 699,048
|
def remove_whitespace(text):
# type: (str) -> str
"""strips all white-space from a string"""
if text is None:
return ""
return "".join(text.split())
|
747538de63b11e49d498b2f4ccb8286975019ec8
| 699,049
|
import re
def address_line1(a):
"""Return only the main part of a multipart address
Warnings:
- Only works on canonicalized addresses. Call canonicalize() first.
- Returns addresses that are incorrect!
- Only use the output of this function for fuzzy matching.
>>> address_line1('1910 south magnolia avenue suite 101, los angeles, ca 90007')
'1910 magnolia, los angeles, ca 90007'
>>> a = '9201 W. Sunset Blvd., Suite 812\\nWest Hollywood, Ca. 90069'
>>> address_line1(canonicalize(a))
'9201 sunset, west hollywood, ca. 90069'
>>> a = '45104 10th St W Ste A, Lancaster, CA 93534'
>>> address_line1(canonicalize(a))
'45104 10th, lancaster, ca 93534'
"""
a = re.sub(r",? suite [0-9a-z]+,", ",", a)
s = a.split(", ", 1)
if len(s) == 2:
address = s[0]
address = re.sub(r" east$", r"", address)
address = re.sub(r"^east ", r"", address)
address = re.sub(r" east ", r" ", address)
address = re.sub(r" west$", r"", address)
address = re.sub(r"^west ", r"", address)
address = re.sub(r" west ", r" ", address)
address = re.sub(r" north$", r"", address)
address = re.sub(r"^north ", r"", address)
address = re.sub(r" north ", r" ", address)
address = re.sub(r" south$", r"", address)
address = re.sub(r"^south ", r"", address)
address = re.sub(r" south ", r" ", address)
a = f"{address}, {s[1]}"
a = a.replace(" avenue,", ",")
a = a.replace(" boulevard,", ",")
a = a.replace(" center,", ",")
a = a.replace(" estates,", ",")
a = a.replace(" parkway,", ",")
a = a.replace(" road,", ",")
a = a.replace(" route,", ",")
a = a.replace(" suite,", ",")
a = a.replace(" street,", ",")
a = a.replace(" way,", ",")
return a
|
453e68ed2889e899ef339def772191c32f7997bc
| 699,050
|
from typing import Dict
def _invert(mapping: Dict) -> Dict:
"""Invert dictionary {k: v} -> {v: k}."""
return {target: source for source, target in mapping.items()}
|
013894d56e95a5df273a5bed6a7acc9c49c18d10
| 699,051
|
def _options_handler(request):
"""Request handler for OPTIONS requests
This is a request handler suitable for return from
_get_handler_for_request. It returns a 200 and an empty body.
Args:
request (twisted.web.http.Request):
Returns:
Tuple[int, dict]: http code, response body.
"""
return 200, {}
|
319b083565551f2512a71bb671dd76cfd76063dc
| 699,052
|
import os
def GetEbuildPathsFromSymLinkPaths(symlinks):
"""Reads the symlink(s) to get the ebuild path(s) to the package(s).
Args:
symlinks: A list of absolute path symlink/symlinks that point
to the package's ebuild.
Returns:
A dictionary where the key is the absolute path of the symlink and the value
is the absolute path to the ebuild that was read from the symlink.
Raises:
ValueError: Invalid symlink(s) were provided.
"""
# A dictionary that holds:
# key: absolute symlink path
# value: absolute ebuild path
resolved_paths = {}
# Iterate through each symlink.
#
# For each symlink, check that it is a valid symlink,
# and then construct the ebuild path, and
# then add the ebuild path to the dict.
for cur_symlink in symlinks:
if not os.path.islink(cur_symlink):
raise ValueError('Invalid symlink provided: %s' % cur_symlink)
# Construct the absolute path to the ebuild.
ebuild_path = os.path.realpath(cur_symlink)
if cur_symlink not in resolved_paths:
resolved_paths[cur_symlink] = ebuild_path
return resolved_paths
|
22ae73b3939ec608a9392853031961f9e22c3234
| 699,053
|
def kind(event):
"""
Finds the type of an event
:param event: the event
:return: the type of the event
"""
return event.type
|
68f0170eac9fc06f954542769dcd0d4ef974e725
| 699,054
|
def select(*_):
"""
Always return None
"""
return None
|
9f29559a50440143d9e3fe46be766590516f1fc4
| 699,055
|
import struct
import os
def genSerial():
"""
Generate a (hopefully) unique integer usable as an SSL certificate serial.
"""
return abs(struct.unpack('!l', os.urandom(4))[0])
|
bafc3d8145ff876816bbd224fee0850934ab4c31
| 699,056
|
def modular_power(a, n, p):
"""
计算a^ n % p
if n == 0:
return 1
elif n == 1:
return a % p
temp = a * a % p
if n & 1:
return a % p * modular_power(temp, n // 2, p) % p
else:
return (modular_power(temp, n // 2, p)) % p
原文:https://blog.csdn.net/qq_36921652/article/details/79368299
"""
return pow(a, n, p)
|
f9f234ec6532e13fcd9d393163933967ae6e7da7
| 699,057
|
def is_correct_type(item, expected_type):
"""Function for check if a given item has the excpected type.
returns True if the expected type matches the item, False if not
Parameters:
-item: the piece of data we are going to check.
-expected_type: the expected data type for item.
"""
if item != None: #if we are not in front of a missing value
if expected_type in {int, float, (int, float)}:
try:
if (float(item)*10%10) == 0: #if the number is not a float
if expected_type in {int, (int, float)}:
return True
elif expected_type == float:
return False
elif expected_type in {float, (int, float)}:
return True
except ValueError: #This means that the value was not an integer neither float
return False
elif expected_type == bool and item.strip().upper() in {'TRUE', 'FALSE'}:
return True
elif expected_type == str and item.strip().upper() not in {'TRUE', 'FALSE'}:
try:
float(item)
return False
except ValueError: #This means that we could not convert the item to float, wich means is not a numeric value.
return True
else:
return False
|
37d23e2c7dc1e630d8ead99fde09250196bd664e
| 699,058
|
def is_person(possible_person:dict):
"""Helper for getting party ID"""
return not possible_person.get('value',{}).get('entityRepresentation',{}).get('value',{}).get('personOtherIdentification') is None
|
66a193cc491296fc94840c684a7066d5f1541e54
| 699,059
|
import sys
def python_lt(major, minor=None, micro=None):
"""Returns true if the python version is less than the given major.minor.patch version."""
if sys.version_info.major < major:
return True
elif sys.version_info.major == major:
if minor is not None:
if sys.version_info.minor < minor:
return True
elif sys.version_info.minor == minor:
if micro is not None:
if sys.version_info.micro < micro:
return True
return False
|
24634c7565078a5fc4aa1f0e8f27ecb650e1dccb
| 699,060
|
def cells_number(rc):
"""
Calculates number of cells in each frame.
Intedend for use on 'cells' or subsets of 'cells' tables.
"""
return rc[['frame', 'cell_id']].groupby('frame').agg(len).reset_index().sort('frame')['cell_id'].values
|
91725cc352de6a1aa31cf4f82301ed8de6e11bb4
| 699,061
|
def to_cuda(data):
"""
Move an object to CUDA.
This function works recursively on lists and dicts, moving the values
inside to cuda.
Args:
data (list, tuple, dict, torch.Tensor, torch.nn.Module):
The data you'd like to move to the GPU. If there's a pytorch tensor or
model in data (e.g. in a list or as values in a dictionary) this
function will move them all to CUDA and return something that matches
the input in structure.
Returns:
list, tuple, dict, torch.Tensor, torch.nn.Module:
Data of the same type / structure as the input.
"""
# the base case: if this is not a type we recognise, return it
return data
|
28186b60bd7509d3df76646fbf41babe1a4f76e4
| 699,062
|
import numpy
def coordinates(n):
"""
Generate a 1D array with length n,
which spans [-0.5,0.5] with 0 at position n/2.
See also docs for numpy.mgrid.
:param n: length of array to be generated
:return: 1D numpy array
"""
n2 = n // 2
if n % 2 == 0:
return numpy.mgrid[-n2:n2] / n
return numpy.mgrid[-n2 : n2 + 1] / n
|
1371601f991432f1bad0a82fb741f050d9346d4e
| 699,063
|
def max_wkend_patterns_init(M):
"""
Compute maximum number of weekend worked patterns
:param M:
:return:
"""
max_patterns = 2 ** (2 * M.n_weeks.value)
return max_patterns
|
bc3cc554c858d80bbe5f33c3deaf87465ddfe657
| 699,064
|
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
|
34ab969f133429959463903fe7e86e18ee644f20
| 699,066
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.