content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def add_units_to_query(df, udict=None):
"""
"""
for k, u in udict.items():
if k not in df.colnames:
continue
try:
df[k].unit
except Exception as e:
print(e)
setattr(df[k], 'unit', u)
else:
df[k] *= u / df[k].unit # TODO in-place
return df
| 5,344,600
|
def update_lr(it_lr, alg, test_losses, lr_info=None):
"""Update learning rate according to an algorithm."""
if lr_info is None:
lr_info = {}
if alg == 'seung':
threshold = 10
if 'change' not in lr_info.keys():
lr_info['change'] = 0
if lr_info['change'] >= 4:
return it_lr, lr_info
# Smooth test_losses then check to see if they are still decreasing
if len(test_losses) > threshold:
smooth_test = signal.savgol_filter(np.asarray(test_losses), 3, 2)
check_test = np.all(np.diff(smooth_test)[-threshold:] < 0)
if check_test:
it_lr = it_lr / 2.
lr_info['change'] += 1
return it_lr, lr_info
elif alg is None or alg == '' or alg == 'none':
return it_lr, lr_info
else:
raise NotImplementedError('No routine for: %s' % alg)
| 5,344,601
|
def ocr_page_image(
doc_path,
page_num,
lang,
**kwargs
):
"""
image = jpg, jpeg, png
On success returns ``mglib.path.PagePath`` instance.
"""
logger.debug("OCR image (jpeg, jpg, png) document")
page_path = PagePath(
document_path=doc_path,
page_num=page_num,
step=Step(1),
# jpeg, jpg, png are 1 page documents
page_count=1
)
notify_pre_page_ocr(
page_path,
page_num=page_num,
lang=lang,
file_name=doc_path.file_name,
**kwargs
)
# resize and eventually convert (png -> jpg)
resize_img(
page_path,
media_root=settings.MEDIA_ROOT
)
extract_txt(
page_path,
lang=lang,
media_root=settings.MEDIA_ROOT
)
notify_txt_ready(
page_path,
page_num=page_num,
lang=lang,
file_name=doc_path.file_name,
**kwargs
)
# First quickly generate preview images
for step in Steps():
page_path.step = step
resize_img(
page_path,
media_root=settings.MEDIA_ROOT
)
# reset page's step
page_path.step = Step(1)
# Now OCR each image
for step in Steps():
if not step.is_thumbnail:
extract_hocr(
page_path,
lang=lang,
media_root=settings.MEDIA_ROOT
)
notify_hocr_ready(
page_path,
page_num=page_num,
lang=lang,
# step as integer number
step=step.current,
file_name=doc_path.file_name,
**kwargs
)
return page_path
| 5,344,602
|
def factors(n):
"""
return set of divisors of a number
"""
step = 2 if n%2 else 1
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(sqrt(n))+1, step) if n % i == 0)))
| 5,344,603
|
def round_decimal(x, digits=0):
"""This function returns the round up float.
Parameters
----------
x : a float
digits : decimal point
Returns
----------
Rounded up float
"""
x = decimal.Decimal(str(x))
if digits == 0:
return int(x.quantize(decimal.Decimal("1"), rounding='ROUND_HALF_UP'))
if digits > 1:
string = '1e' + str(-1 * digits)
else:
string = '1e' + str(-1 * digits)
return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP'))
| 5,344,604
|
def basic_gn_stem(model, data, **kwargs):
"""Add a basic ResNet stem (using GN)"""
dim = 64
p = model.ConvGN(
data, 'conv1', 3, dim, 7, group_gn=get_group_gn(dim), pad=3, stride=2
)
p = model.Relu(p, p)
p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2)
return p, dim
| 5,344,605
|
def log_scale(start,end,num):
"""Simple wrapper to generate list of numbers equally spaced in logspace
Parameters
----------
start: floar
Inital number
end: Float
Final number
num: Float
Number of number in the list
Returns
-------
list: 1d array
List of number spanning start to end, equally space in log space
"""
return np.logspace(np.log10(start), np.log10(end), num = num)
| 5,344,606
|
def test_all_nodes_masters():
"""
Set a list of nodes with random masters/slaves config and it shold be possible
to itterate over all of them.
"""
n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.1", "port": 7001}])
n.initialize()
nodes = [node for node in n.nodes.values() if node['server_type'] == 'master']
for node in n.all_masters():
assert node in nodes
| 5,344,607
|
def init_test_subdirs(test_name):
"""Create all necessary test sub-directories if they don't already exist"""
dirs_to_create = ['input', 'logs', 'output', 'src', 'test_files', 'work']
for dd in dirs_to_create:
adir = TEST+"tests/"+test_name+'/'+dd
if not os.path.exists(adir):
os.mkdir(adir)
# create the two files that are imported before and after running the gear in run.py
files_to_create = ['start.py', 'finish.py']
for ff in files_to_create:
fromfile = TEST+'tests/test/src/'+ff
tofile = TEST+"tests/"+test_name+'/src/'+ff
if not os.path.exists(tofile):
shutil.copy(fromfile,tofile)
msg = 'Created directories: ' + ' '.join(d for d in files_to_create)
print(msg)
LOG.info(msg)
print('\nHere is what is inside "' + test_name + '"')
sp.run(['ls',TEST + "tests/" + test_name])
| 5,344,608
|
def measure_xtran_params(neutral_point, transformation):
"""
Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a determined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx)
# c2r_xtran = np.linalg.inv(r2c_xtran)
return rotx, roty, rotz, tranl
| 5,344,609
|
def getPrefix(routetbl, peer_logical):
""" FUNCTION TO GET THE PREFIX """
for route in routetbl:
if route.via == peer_logical:
return route.name
else:
pass
| 5,344,610
|
def Logger_log(level, msg):
"""
Logger.log(level, msg)
logs a message to the log.
:param int level: the level to log at.
:param str msg: the message to log.
"""
return _roadrunner.Logger_log(level, msg)
| 5,344,611
|
def setup(bot):
"""Entry point for loading cogs. Required for all cogs"""
bot.add_cog(ConnectFour(bot))
| 5,344,612
|
def p4_system(cmd):
"""Specifically invoke p4 as the system command. """
real_cmd = p4_build_cmd(cmd)
expand = not isinstance(real_cmd, list)
retcode = subprocess.call(real_cmd, shell=expand)
if retcode:
raise CalledProcessError(retcode, real_cmd)
| 5,344,613
|
def obj_test(**field_tests: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]:
"""Return a lambda that tests for dict with string keys and a particular type for each key"""
def test(dat: typing.Any) -> bool:
type_test(dict)(dat)
dom_test = type_test(str)
for dom, rng in dat.items():
dom_test(dom)
if dom not in field_tests:
continue
rng_test = field_tests[dom]
rng_test(rng)
missing = set(field_tests.keys()) - set(dat.keys())
if missing:
raise Exception(f"{dat!r} lacks fields {missing}")
return True
return test
| 5,344,614
|
def dump_yaml(file_path, data):
""" Writes data to a YAML file and replaces its contents"""
with open(file_path, 'w+') as usernames_yaml:
yaml.dump(data, usernames_yaml)
| 5,344,615
|
def hist2D(x, y, xbins, ybins, **kwargs):
""" Create a 2 dimensional pdf vias numpy histogram2d"""
H, xedg, yedg = np.histogram2d(x=x, y=y, bins=[xbins,ybins], density=True, **kwargs)
xcen = (xedg[:-1] + xedg[1:]) / 2
ycen = (yedg[:-1] + yedg[1:]) / 2
return xcen, ycen, H
| 5,344,616
|
def generate_ngram_dict(filename, tuple_length):
"""Generate a dict with ngrams as key following words as value
:param filename: Filename to read from.
:param tuple_length: The length of the ngram keys
:return: Dict of the form {ngram: [next_words], ... }
"""
def file_words(file_pointer):
"""Generator for words in a file"""
for line in file_pointer:
for word in line.split():
yield word
ngrams = defaultdict(lambda: set())
with open(filename, 'r') as fp:
word_list = []
for word in file_words(fp):
if len(word_list) < tuple_length:
word_list.append(word)
continue
ngrams[tuple(word_list)].add(word)
word_list = word_list[1:] + [word]
return {key: tuple(val) for key, val in ngrams.items()}
| 5,344,617
|
def pytest_addoption(parser):
"""Add pytest-bdd options."""
add_bdd_ini(parser)
cucumber_json.add_options(parser)
generation.add_options(parser)
gherkin_terminal_reporter.add_options(parser)
| 5,344,618
|
def get_end_point(centerline, offset=0):
"""
Get last point(s) of the centerline(s)
Args:
centerline (vtkPolyData): Centerline(s)
offset (int): Number of points from the end point to be selected
Returns:
centerline_end_point (vtkPoint): Point corresponding to end of centerline.
"""
centerline_end_points = []
for i in range(centerline.GetNumberOfLines()):
line = extract_single_line(centerline, i)
centerline_end_points.append(line.GetPoint(line.GetNumberOfPoints() - 1 - offset))
return centerline_end_points
| 5,344,619
|
def set_env_vars(args):
"""
From the user's input, set the right environmental
variables to run the container.
"""
# Get the parameters from the command line.
reference_data_path = args.reference_data_path
test_data_path = args.test_data_path
results_dir = args.results_dir
# If they are empty, try to get them from the Python file.
param_file = args.parameters
if not reference_data_path:
reference_data_path = get_parameter_from_file(param_file, 'reference_data_path')
if not test_data_path:
test_data_path = get_parameter_from_file(param_file, 'test_data_path')
if not results_dir:
results_dir = get_parameter_from_file(param_file, 'results_dir')
# Need to make sure paths are valid before actually setting the environmental variables.
if not os.path.exists(reference_data_path):
msg = '{} does not exist.'.format(reference_data_path)
raise IOError(msg)
if not os.path.exists(test_data_path):
msg = '{} does not exist.'.format(test_data_path)
raise IOError(msg)
if not os.path.exists(results_dir):
os.makedirs(results_dir, 0o775)
# Make the paths absolute.
# Docker needs this.
reference_data_path = os.path.abspath(reference_data_path)
test_data_path = os.path.abspath(test_data_path)
results_dir = os.path.abspath(results_dir)
# Then set them as the environmental variables.
os.environ['REFERENCE_DATA_PATH'] = reference_data_path
os.environ['TEST_DATA_PATH'] = test_data_path
os.environ['RESULTS_DIR'] = results_dir
| 5,344,620
|
def gen_group_tests(mod_name):
"""mod_name is the back-end script name without the.py extension.
There must be a gen_test_cases() function in each module."""
mod = importlib.import_module(mod_name)
mod.gen_test_cases()
| 5,344,621
|
def test_MultiLocus_offsets_even_inverted():
"""Offsets are assigned to the nearest locus."""
multi_locus = MultiLocus([(1, 3), (7, 9)], True)
invariant(
multi_locus.to_position, 5, multi_locus.to_coordinate, (1, 2, 0))
invariant(
multi_locus.to_position, 4, multi_locus.to_coordinate, (2, -2, 0))
| 5,344,622
|
def test_custom_name(name, container, expected):
"""
When name or container arguments are given, builder should use them to
greet.
"""
builder = HelloHTML(name=name, container=container)
assert builder.greet() == expected
| 5,344,623
|
def random_train_test_split(df, train_frac, random_seed=None):
"""
This function randomizes the dta based on the seed and then splits the dataframe into train and test sets which are changed to their list of vector representations.
Args:
df (Dataframe): The dataframe which is to be used to generate the train and test split.
train_frac (int): The percentage in the range 0.0 to 1 that should be used for training.
random_seed (int, optional): The seed for randomising. Defaults to None which means a seed is chosen at random.
Returns:
tuple: The list of lists representing the vectors in the train and test data frame respectively,
"""
if random_seed is not None:
df = df.sample(frac=1, random_state=random_seed)
else:
df = df.sample(frac=1)
split_point = int(len(df.index) * train_frac)
train = to_vector(df[:split_point])
test = to_vector(df[split_point:])
return train, test
| 5,344,624
|
def test_parser_record_str_input(change_dir, clean_output):
"""Verify the hook call works successfully."""
o2 = tackle(
'.',
no_input=True,
record=clean_output,
)
with open(clean_output) as f:
record_output = yaml.load(f)
assert 'stuff' in o2
assert 'stuff' in record_output
| 5,344,625
|
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None,
B0matrix=IDENTITYMATRIX,
offset=0,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=0.079,
dim=(2048, 2048),
kf_direction="Z>0"):
"""
must: len(varying_parameter_values)=len(varying_parameter_indices)
DATA_Q: array of all 3 elements miller indices
nspots: indices of selected spots of DATA_Q
UBmatrix:
WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat
returns:
"""
# selecting nspots of DATA_Q
# print "DATA_Q in calc_XY_pixelpositions", DATA_Q
# print "nspots", nspots
# print "len(DATA_Q)", len(DATA_Q)
DATAQ = np.take(DATA_Q, nspots, axis=0)
trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations
# print "DATAQ in xy_from_Quat", DATAQ
if UBmatrix is not None:
R = UBmatrix
# q = UB * B0 * Q
trQ = np.dot(np.dot(R, B0matrix), trQ)
# results are qx,qy,qz
else:
print("I DON'T LIKE INITROT == None")
print("this must mean that INITROT = Identity ?...")
Qrot = trQ # lattice rotation due to quaternion
Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors
twthe, chi = F2TC.from_qunit_to_twchi(Qrot / Qrotn, labXMAS=labXMAS)
# print "twthe, chi", twthe, chi
if verbose:
print("tDATA_Q", np.transpose(DATA_Q))
print("Qrot", Qrot)
print("Qrotn", Qrotn)
print("Qrot/Qrotn", Qrot / Qrotn)
print("twthe,chi", twthe, chi)
X, Y, theta = F2TC.calc_xycam_from2thetachi(
twthe,
chi,
calibration_parameters,
offset=offset,
verbose=0,
pixelsize=pixelsize,
kf_direction=kf_direction)
return X, Y, theta, R
| 5,344,626
|
def check_tx_trytes_length(trytes):
"""
Checks if trytes are exactly one transaction in length.
"""
if len(trytes) != TransactionTrytes.LEN:
raise with_context(
exc=ValueError('Trytes must be {len} trytes long.'.format(
len= TransactionTrytes.LEN
)),
context={
'trytes': trytes,
},
)
| 5,344,627
|
def load_csv(path):
"""
Function for importing data from csv. Function uses weka implementation
of CSVLoader.
:param path: input file
:return: weka arff data
"""
args, _sufix = csv_loader_parser()
loader = Loader(classname='weka.core.converters.CSVLoader',
options=args_to_weka_options(args, _sufix))
return loader.load_file(path)
| 5,344,628
|
def from_stream(stream, storage, form):
"""Reverses to_stream, returning data"""
if storage == "pure-plain":
assert isinstance(stream, str)
if isinstance(stream, str):
txt = stream
else:
assert not stream.startswith(MAGIC_SEAMLESS)
assert not stream.startswith(MAGIC_NUMPY)
txt = stream.decode("utf-8")
result = json.loads(txt)
return result
elif storage == "pure-binary":
b = BytesIO(stream)
arr0 = np.load(b, allow_pickle=False)
if arr0.ndim == 0 and arr0.dtype.char != "S":
arr = np.frombuffer(arr0,arr0.dtype)
return arr[0]
else:
return arr0
assert stream.startswith(MAGIC_SEAMLESS)
l = len(MAGIC_SEAMLESS)
s1 = stream[l:l+8]
s2 = stream[l+8:l+16]
len_jsons = np.frombuffer(s1, dtype=np.uint64).tolist()[0]
buffersize = np.frombuffer(s2, dtype=np.uint64).tolist()[0]
assert len(stream) == l + 16 + len_jsons + buffersize
bytes_jsons = stream[l+16:l+16+len_jsons]
jsons = json.loads(bytes_jsons.decode("utf-8"))
bytebuffer = stream[l+16+len_jsons:]
buffer = np.frombuffer(bytebuffer,dtype=np.uint8)
data = _from_stream(
None, storage, form,
jsons, buffer
)
return data
| 5,344,629
|
def draw_random_graph(i):
"""
Draw a random graph with 2**i nodes,
and p=i/(2**i)
"""
g_random = nx.gnp_random_graph(2**i,2*i/(2**i))
nx.draw(g_random,node_size=20)
plt.savefig("./random_graph.svg")
plt.close()
# plt.show()
| 5,344,630
|
def volume100():
"""Function for setting volume."""
os.system('vol 100')
return render_template('fmberryremote.html', GENRES=GENRES, choosenStation=STATION)
| 5,344,631
|
def UniformExploration(j, state):
"""Fake player j that always targets all arms."""
return list(np.arange(state.K))
| 5,344,632
|
def deduction_limits(data):
"""
Apply limits on itemized deductions
"""
# Split charitable contributions into cash and non-cash using ratio in PUF
cash = 0.82013
non_cash = 1. - cash
data['e19800'] = data['CHARITABLE'] * cash
data['e20100'] = data['CHARITABLE'] * non_cash
# Apply student loan interest deduction limit
data['e03210'] = np.where(data.SLINT > 2500, 2500, data.SLINT)
# Apply IRA contribution limit
deductable_ira = np.where(data.AGE >= 50,
np.where(data.ADJIRA > 6500, 6500, data.ADJIRA),
np.where(data.ADJIRA > 5500, 5500, data.ADJIRA))
data['e03150'] = deductable_ira
return data
| 5,344,633
|
def get_args() -> argparse.Namespace:
"""Setup the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='A template for python projects.',
add_help=False)
# Required Args
required_args = parser.add_argument_group('Required Arguments')
config_file_params = {
'type': argparse.FileType('r'),
'required': True,
'help': "The configuration yml file"
}
required_args.add_argument('-c', '--config-file', **config_file_params)
required_args.add_argument('-l', '--log', required=True, help="Name of the output log file")
# Optional args
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument('-m', '--run-mode', choices=['run_mode_1', 'run_mode_2', 'run_mode_3'],
default='run_mode_1',
help='Description of the run modes')
optional_args.add_argument('-d', '--debug', action='store_true',
help='Enables the debug log messages')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
| 5,344,634
|
def cli(source, destination, fm_type, root, directives):
"""A flavor-agnostic extension framework for Markdown.
Reads from <SOURCE> and writes to <DESTINATION>.
If <SOURCE> is a single file, it will be converted to Markdown and written
to <DESTINATION> (default: stdout).
If <SOURCE> is a directory, all child Markdata files will be converted to
Markdown and written to <DESTINATION> (default: overwrite <SOURCE>).
"""
loaded = {}
if directives:
loaded = {
pkg: getattr(finder.find_module(pkg).load_module(pkg), "main")
for finder, pkg, _ in pkgutil.iter_modules(path=[directives])
}
src_p = pathlib.Path(source)
src_d = src_p.is_dir()
if src_d and destination:
shutil.copytree(source, destination)
src_p = pathlib.Path(destination)
files = src_p.glob("**/*.md") if src_d else [src_p]
for src in files:
with src.open() as f:
markdown = markdata(f, loaded, fm_type, root)
if src_d:
# We're working on a directory.
with src.open("w+") as f:
f.write(markdown)
elif destination:
# We were given a single-file destination.
with open(destination, "w+") as f:
f.write(markdown)
else:
# stdin (single file default).
click.echo(markdown)
| 5,344,635
|
def select_interacting(num_mtx, bin_mtx, labels):
"""
Auxiliary function for fit_msa_mdels.
Used for fitting the models in hard EM; selects observations with a hidden
variable value of 1.
"""
if labels is None:
# This is the case when initializing the models
return num_mtx, bin_mtx, labels
else:
# This is the case inside the EM loop
labels = np.asarray(labels)
idxs = np.where(np.asarray(labels) == 1)[0]
int_num = np.take(num_mtx, idxs, axis=0)
int_bin = np.take(bin_mtx, idxs, axis=0)
weights = np.take(labels, idxs)
return int_num, int_bin, weights
| 5,344,636
|
def _GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=None,
application_path=None):
"""Write discovery documents generated from a cloud service to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Raises:
ServerRequestException: If fetching the generated discovery doc fails.
Returns:
A list of discovery doc filenames.
"""
output_files = []
service_configs = GenApiConfig(service_class_names, hostname=hostname,
application_path=application_path)
for api_name_version, config in service_configs.iteritems():
discovery_doc = _FetchDiscoveryDoc(config, doc_format)
discovery_name = api_name_version + '.discovery'
output_files.append(_WriteFile(output_path, discovery_name, discovery_doc))
return output_files
| 5,344,637
|
def after_all(context):
"""
Closing the driver
:param context:
:return:
"""
context.driver.quit()
| 5,344,638
|
def nitestr(nite,sep=''):
"""
Convert an ephem.Date object to a nite string.
Parameters:
-----------
nite : ephem.Date object
sep : Output separator
Returns:
--------
nitestr : String representation of the nite
"""
import dateutil.parser
if isinstance(nite,basestring):
nite = dateutil.parser.parse(nite)
nite = ephem.Date(nite)
strtuple = nite.tuple()[:3]
nitestr = '{:4d}{sep}{:02d}{sep}{:02d}'
nitestr = nitestr.format(*strtuple,sep=sep)
return nitestr
| 5,344,639
|
def passrotPat():
"""Test passive rotation of a pattern. There are two sub-requests possible
and two input configs: full 3D output of 2D-cuts with input, single-pol
pattern or dual-pol.
"""
def doTrack():
#(thetas, phis) = pntsonsphere.cut_az(0.*math.pi/2) #Good for some tests.
(thetas, phis) = pntsonsphere.cut_theta(10.0/180*math.pi)
if type(ant) is not DualPolElem:
E_ths, E_phs = ant.getFFalong(1.0, (thetas, phis))
tvecfun.plotAntPat2D(thetas, E_ths, E_phs, freq=0.5)
else:
freq = 30e6
jones = ant.getJonesAlong([freq], (thetas, phis))
j00 = jones[..., 0, 0].squeeze()
j01 = jones[..., 0, 1].squeeze()
tvecfun.plotAntPat2D(phis, j00, j01, freq)
j10 = jones[..., 1, 0].squeeze()
j11 = jones[..., 1, 1].squeeze()
tvecfun.plotAntPat2D(phis, j10, j11, freq)
def do3D():
cutphis = numpy.arange(0, 2*math.pi, .2)
nrLngs = len(cutphis)
dims = (100, nrLngs)
THETA = numpy.zeros(dims)
PHI = numpy.zeros(dims)
E_TH = numpy.zeros(dims, dtype=complex)
E_PH = numpy.zeros(dims, dtype=complex)
for (cutNr, cutphi) in enumerate(cutphis):
(thetas, phis) = pntsonsphere.cut_theta(cutphi)
if type(ant) is DualPolElem:
freq = 30e6
jones = ant.getJonesAlong([freq], (thetas, phis))
j00 = jones[..., 0, 0].squeeze()
j01 = jones[..., 0, 1].squeeze()
j10 = jones[..., 1, 0].squeeze()
j11 = jones[..., 1, 1].squeeze()
# select y antenna
E_ths = j10
E_phs = j11
vfname = 'Hamaker-Arts'
else:
freq = 0.0
E_ths, E_phs = ant.getFFalong(freq, (thetas, phis))
vfname = 'E-dipole y-directed'
THETA[:, cutNr] = thetas
PHI[:, cutNr] = phis
E_TH[:, cutNr] = E_ths
E_PH[:, cutNr] = E_phs
tvecfun.plotvfonsph(THETA, PHI, E_TH, E_PH, freq=freq,
projection='equirectangular', vfname=vfname)
# Get a simple linear dipole along y.
singpol = False
if singpol:
ant = gen_simp_RadFarField()
# ant = antpat.theoreticalantennas.max_gain_pat(4)[0]
else:
dpath = dreambeam.__path__[0]+'/telescopes/LOFAR/data/'
ha = HamakerPolarimeter(pickle.load(open(dpath+'HA_LOFAR_elresp_LBA.p',
'rb')))
ant = DualPolElem(ha)
rotang = 1.*math.pi/4.
rotmat = pntsonsphere.rot3Dmat(0.0, 0.0*math.pi/2, 1.5*math.pi/2)
# Rotate the antenna 90 deg.
print(rotmat)
ant.rotateframe(rotmat)
# Choose between next 2 lines:
# doTrack()
do3D()
| 5,344,640
|
def register():
"""Register user"""
form = RegistrationForm()
if form.validate_on_submit():
# only allow 1 user on locally hosted version
if len(User.query.all()) == 0:
# add user to database
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, password=hashed_password)
db.session.add(user)
db.session.commit()
# create user directory
user_dir = os.path.join(OUTPUT_DIR, user.username)
if not os.path.exists(user_dir):
os.makedirs(user_dir)
# create user src directory
src_dir = os.path.join(user_dir, 'src')
if not os.path.exists(src_dir):
os.makedirs(src_dir)
# create user exfiltrated files directory
files_dir = os.path.join(user_dir, 'files')
if not os.path.exists(files_dir):
os.makedirs(files_dir)
# initialize c2 session storage
server.c2.sessions[user.username] = {}
# notify user and redirect to login
flash("You have successfully registered!", 'info')
logout_user()
return redirect(url_for('users.login'))
else:
flash("User already exists on this server.", 'danger')
return render_template("register.html", form=form, title="Register")
| 5,344,641
|
def test_black_tool_plugin_parse_invalid():
"""Verify that we can parse the normal output of black."""
btp = setup_black_tool_plugin()
output = "invalid text"
issues = btp.parse_output(output)
assert not issues
| 5,344,642
|
def dump_tuple(tup):
"""
Dump a tuple to a string of fg,bg,attr (optional)
"""
return ','.join(str(i) for i in tup)
| 5,344,643
|
def test_tas_account_filter_later_qtr_award_financial():
""" Ensure the fiscal year and quarter filter is working, later quarter - award_financial"""
# Create FederalAccount models
fed_acct1 = mommy.make("accounts.FederalAccount")
fed_acct2 = mommy.make("accounts.FederalAccount")
# Create Program Activities
prog1 = mommy.make("references.RefProgramActivity", program_activity_code="0001")
prog2 = mommy.make("references.RefProgramActivity", program_activity_code="0002")
# Create Object Classes
obj_cls1 = mommy.make("references.ObjectClass", object_class="001")
obj_cls2 = mommy.make("references.ObjectClass", object_class="002")
# Create TAS models
tas1 = mommy.make("accounts.TreasuryAppropriationAccount", federal_account=fed_acct1, tas_rendering_label="1")
tas2 = mommy.make("accounts.TreasuryAppropriationAccount", federal_account=fed_acct2, tas_rendering_label="2")
# Create file A models
mommy.make(
"awards.FinancialAccountsByAwards",
treasury_account=tas1,
reporting_period_start="1699-10-01",
reporting_period_end="1699-12-31",
program_activity=prog1,
object_class=obj_cls1,
)
mommy.make(
"awards.FinancialAccountsByAwards",
treasury_account=tas2,
reporting_period_start="1700-04-01",
reporting_period_end="1700-06-30",
program_activity=prog2,
object_class=obj_cls2,
)
queryset = account_download_filter(
"award_financial", FinancialAccountsByAwards, {"fy": 1700, "quarter": 3}, "treasury_account"
)
assert queryset.count() == 2
| 5,344,644
|
def get_single_endpoint(name):
"""
TODO - Add docstring
"""
class EndpointWithID(Resource):
def get(self, pid):
# TODO - Add return
pass
# TODO - Add `get.__doc__`
EndpointWithID.__name__ = name
return EndpointWithID
| 5,344,645
|
def generate_fermi_question(cfg, logratio, filter_single_number_lhs=True):
"""
Generates one Fermi question.
Args:
cfg: Expression config
logratio: Log ratio standard deviation (for RHS)
filter_single_number_lhs: Whether to exclude lhs of a single numerical term
round_bound: For numbers greater than this, we express in standard
form and also make sure the rhs is rounded to 3 sig. figures
"""
done = False
rhs_limit = 10**15
while not done:
lhs = generate_expression(cfg)
L = ne.evaluate(lhs['numerical'])
if L > rhs_limit:
continue
if filter_single_number_lhs:
if len(lhs['quantity_ids']) == 0 and lhs['length'] <= 1:
continue
# Always sample the rhs from a
# lognormal with a larger variance the larger the number is
if L == 0:
R = 0
while R == 0: # Loop until we get an R != L
R = int(np.random.normal(0, 1))
else:
R = L
while R == L: # Loop until we hit an R != L
# Now we set the variance of the log RHS so that it
# grows as the quantity gets bigger
sd = 0.1 + log10(abs(L)) * 0.065 + log10(abs(L))**2 * 0.0042
R_raw = sample_lognormal(L, sd)
# Then round to 3 sf
if R_raw != 0:
R = int(round(R_raw, -int(floor(log10(abs(R_raw)))) + 2))
else:
R = 0
assert R != L
try:
R = ne.evaluate(str(R))
done = True
except:
pass
question = lhs['expression'] + ' < ' + "{:,}".format(int(R))
numerical = lhs['numerical'] + ' < ' + str(R)
fermi_question = FermiQuestion(lhs['length'], question, numerical, lhs['estimation_difficulty'],
lhs['quantity_ids'], lhs['categories'], lhs['quantity_strings'])
return fermi_question
| 5,344,646
|
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict | None, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
| 5,344,647
|
def Packet_genReadUserTag(errorDetectionMode, buffer, size):
"""Packet_genReadUserTag(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t"""
return _libvncxx.Packet_genReadUserTag(errorDetectionMode, buffer, size)
| 5,344,648
|
def prefetch(tensor_dict, capacity):
"""Creates a prefetch queue for tensors.
Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a
dequeue op that evaluates to a tensor_dict. This function is useful in
prefetching preprocessed tensors so that the data is readily available for
consumers.
Example input pipeline when you don't need batching:
----------------------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20)
tensor_dict = prefetch_queue.dequeue()
outputs = Model(tensor_dict)
...
----------------------------------------------------
For input pipelines with batching, refer to core/batcher.py
Args:
tensor_dict: a dictionary of tensors to prefetch.
capacity: the size of the prefetch queue.
Returns:
a FIFO prefetcher queue
"""
names = tensor_dict.keys()
dtypes = [t.dtype for t in tensor_dict.values()]
shapes = [t.get_shape() for t in tensor_dict.values()]
prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes,
shapes=shapes,
names=names,
name='prefetch_queue')
enqueue_op = prefetch_queue.enqueue(tensor_dict)
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
prefetch_queue, [enqueue_op]))
tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name,
capacity),
tf.to_float(prefetch_queue.size()) * (1. / capacity))
return prefetch_queue
| 5,344,649
|
def get_total_shares():
"""
Returns a list of total shares (all, attending, in person, represented) for all voting principles.
"""
total_shares = {
'heads': [0, 0, 0, 0] # [all, attending, in person, represented]
}
principle_ids = VotingPrinciple.objects.values_list('id', flat=True)
for principle_id in principle_ids:
total_shares[principle_id] = [Decimal(0), Decimal(0), Decimal(0), Decimal(0)]
# Query delegates.
delegates = User.objects.filter(groups=2).select_related('votingproxy', 'keypad').prefetch_related('shares')
shares_exists = VotingShare.objects.exists()
for delegate in delegates:
# Exclude delegates without shares -- who may only serve as proxies.
if shares_exists and delegate.shares.count() == 0:
continue
total_shares['heads'][0] += 1
# Find the authorized voter.
auth_voter = find_authorized_voter(delegate)
# If auth_voter is delegate himself set index to 2 (in person) else 3 (represented).
i = 2 if auth_voter == delegate else 3
attending = auth_voter is not None and auth_voter.is_present
if config['voting_enable_votecollector']:
attending = attending and hasattr(auth_voter, 'keypad')
if attending:
total_shares['heads'][i] += 1
# Add shares to total.
for share in delegate.shares.all():
total_shares[share.principle_id][0] += share.shares
if attending:
total_shares[share.principle_id][i] += share.shares
for k in total_shares.keys():
total_shares[k][1] = total_shares[k][2] + total_shares[k][3]
return total_shares
| 5,344,650
|
def test_multi_cpu_sample_splitting(data_input, models_from_data, num_cpus):
"""
Tests simulator's _determine_num_cpu_samples() by ensuring that all samples
will be used and that the difference in number of samples between processes
is never greater than one.
"""
total_samples = 100
sample_sizes = np.zeros(num_cpus)
sim = MLMCSimulator(models=models_from_data, data=data_input)
for cpu_rank in range(num_cpus):
sim._num_cpus = num_cpus
sim._cpu_rank = cpu_rank
sample_sizes[cpu_rank] = sim._determine_num_cpu_samples(total_samples)
# Test that all samples will be utilized.
assert np.sum(sample_sizes) == total_samples
# Test that there is never more than a difference of one sample
# between processes.
assert np.max(sample_sizes) - np.min(sample_sizes) <= 1
| 5,344,651
|
def base_test_version(dl_class):
"""
Args:
dl_class (type): subclass of Dataloader.
"""
if isinstance(dl_class, str):
dl_class = Dataloader.load_class(dl_class)
assert hasattr(dl_class, '_version')
version = dl_class._version
version_path = str(version_dir / '{}_v{}.jsonl'.format(dl_class.__name__, version))
version_info = load_version_info(version_path)
assert version_info
for dic in version_info:
assert 'hash_value' in dic
assert 'args' in dic
assert 'kwargs' in dic
hash_value = dic['hash_value']
args = dic['args']
kwargs = dic['kwargs']
dl = dl_class(*args, **kwargs)
assert hash_value == dl.hash_value
| 5,344,652
|
def gen_check_box_idx():
""" Generate a list containing the coordinate of three
finder patterns in QR-code
Args:
None
Returns:
idx_check_box: a list containing the coordinate each pixel
of the three finder patterns
"""
idx_check_box = []
for i in range(7):
idx_check_box.append((0, i))
idx_check_box.append((6, i))
idx_check_box.append((30, i))
idx_check_box.append((36, i))
idx_check_box.append((0, 30+i))
idx_check_box.append((6, 30+i))
for i in range(1, 6):
idx_check_box.append((i, 0))
idx_check_box.append((i, 6))
idx_check_box.append((i, 30))
idx_check_box.append((i, 36))
idx_check_box.append((30+i, 0))
idx_check_box.append((30+i, 6))
for i in range(3):
for j in range(3):
idx_check_box.append((2+i, 2+j))
idx_check_box.append((32+i, 2+j))
idx_check_box.append((2+i, 32+j))
return idx_check_box
| 5,344,653
|
async def test_template_with_unavailable_entities(hass, states, start_ha):
"""Test unavailability with value_template."""
_verify(hass, states[0], states[1], states[2], states[3], states[4], None)
| 5,344,654
|
def preprocess_mc_parameters(n_rv, dict_safir_file_param, index_column='index'):
"""
NAME: preprocess_mc_parameters
AUTHOR: Ian Fu
DATE: 18 Oct 2018
DESCRIPTION:
Takes a dictionary object with each item represents a safir input variable, distributed or static, distributed
input parameter must be a dictionary object describing a distribution (see usage).
PARAMETERS:
:param n_rv: int, number of random samples for distributed parameters
:param dict_safir_in_param: dict, safir input (problem definition) file parameterised variable names
:param index_column: str, the returned DataFrame object
:return df_params: row equal to n_rv with columns the items in dict_safir_in_param
USAGE:
"""
# declare containers
dict_result_params_static = dict() # container for storing static parameters
dict_result_params_dist = dict() # container for storing distributed random parameters
# populate static parameters and extract
for key_, each_param in dict_safir_file_param.items():
if isinstance(each_param, dict):
dict_result_params_dist[key_] = each_param
else:
if isinstance(each_param, list):
if len(each_param) == n_rv:
dict_result_params_dist[key_] = each_param
else:
dict_result_params_static[key_] = [each_param] * n_rv
# make distribution random parameters
dict_result_params_dist = preprocess_safir_mc_parameters(n_rv, dict_result_params_dist)
# merge random distributed and static parameters
dict_result_params = {**dict_result_params_static, **dict_result_params_dist}
# make pandas.Dataframe
if index_column not in dict_result_params:
dict_result_params[index_column] = list(range(n_rv))
pf_params = pandas.DataFrame(dict_result_params)
pf_params.set_index(index_column, inplace=True)
return pf_params
| 5,344,655
|
def test_no_existing_transaction(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment_account.save()
invoice = factory_invoice(payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.find_active_by_invoice_id(invoice.id)
assert transaction is None
| 5,344,656
|
def AdjustColour(color, percent, alpha=wx.ALPHA_OPAQUE):
""" Brighten/Darken input colour by percent and adjust alpha
channel if needed. Returns the modified color.
@param color: color object to adjust
@param percent: percent to adjust +(brighten) or -(darken)
@keyword alpha: amount to adjust alpha channel
"""
radj, gadj, badj = [ int(val * (abs(percent) / 100.0))
for val in color.Get() ]
if percent < 0:
radj, gadj, badj = [ val * -1 for val in [radj, gadj, badj] ]
else:
radj, gadj, badj = [ val or 255 for val in [radj, gadj, badj] ]
red = min(color.Red() + radj, 255)
green = min(color.Green() + gadj, 255)
blue = min(color.Blue() + badj, 255)
return wx.Colour(red, green, blue, alpha)
| 5,344,657
|
def db_fill_tables(source_path: str, models: list = export_models_ac) -> None:
"""
Consecutively execute converters and send data to the database
! The execution order is important for at least the following data types:
Type -> Word -> Definition,
because the conversion of definitions depends on existing words,
and the conversion of words depends on existing types
:param source_path:
:param models:
:return:
"""
log.info("Start to fill tables with dictionary data")
ac_session = session()
for model in models:
model_name = model.__name__
url = f"{source_path}{model.import_file_name}"
data = download_dictionary_file(url, model_name)
log.info("Start to process %s objects", model_name)
objects = [model(**model.import_(item)) for item in data]
log.info("Total number of %s objects - %s", model_name, len(objects))
log.info("Add %s objects to Database", model_name)
ac_session.bulk_save_objects(objects)
log.debug("Commit Database changes")
ac_session.commit()
log.info("Finish to process %s objects\n", model_name)
ac_session.close()
log.info("Finish to fill tables with dictionary data\n")
| 5,344,658
|
def getdates(startdate, utc_to_local, enddate=None):
"""
Generate '~astropy.tot_time.Time' objects corresponding to 16:00:00 local tot_time on evenings of first and last
nights of scheduling period.
Parameters
----------
startdate : str or None
Start date (eg. 'YYYY-MM-DD'). If None, defaults to current date.
enddate : str or None
End date (eg. 'YYYY-MM-DD'). If None, defaults to day after start date.
utc_to_local : '~astropy.unit' hours
Time difference between utc and local tot_time.
Returns
-------
start : '~astropy.tot_time.core.Time'
UTC corresponding to 16:00 local tot_time on first night
end : '~astropy.tot_time.core.Time'
UTC corresponding to 16:00 local tot_time on last night
"""
if startdate is None:
current_utc = Time.now()
start = Time(str((current_utc + utc_to_local).iso)[0:10] + ' 16:00:00.00') - utc_to_local
else:
try:
start = Time(startdate + ' 16:00:00.00') - utc_to_local
except ValueError as e:
print(e)
raise ValueError('\"{}\" not a valid date. Expected string of the form \'YYYY-MM-DD\''.format(startdate))
if enddate is None: # default number of observation nights is 1
return start, None
else:
try:
end = Time(enddate + ' 16:00:00.00') - utc_to_local
diff = int((end - start).value) # difference between startdate and enddate
if diff <= 0:
raise ValueError('End date \"{}\" occurs before or on start date.'.format(enddate))
except ValueError as e:
print(e)
raise ValueError('\"{}\" not a valid date. '
'Must be after start date and of the form \'YYYY-MM-DD\''.format(enddate))
start.format = 'jd'
end.format = 'jd'
return start, end
| 5,344,659
|
def resolve_fix_versions(
service: VulnerabilityService,
result: Dict[Dependency, List[VulnerabilityResult]],
state: AuditState = AuditState(),
) -> Iterator[FixVersion]:
"""
Resolves a mapping of dependencies to known vulnerabilities to a series of fix versions without
known vulnerabilties.
"""
for (dep, vulns) in result.items():
if dep.is_skipped():
continue
if not vulns:
continue
dep = cast(ResolvedDependency, dep)
try:
version = _resolve_fix_version(service, dep, vulns, state)
yield ResolvedFixVersion(dep, version)
except FixResolutionImpossible as fri:
skip_reason = str(fri)
logger.debug(skip_reason)
yield SkippedFixVersion(dep, skip_reason)
| 5,344,660
|
def alerts_matcher(base_name, pattern, alerter, second_order_resolution_hours):
"""
Get a list of all the metrics that would match an ALERTS pattern
:param base_name: The metric name
:param pattern: the ALERT pattern
:param alerter: the alerter name e.g. smtp, syslog, hipchat, pagerdaty
:param second_order_resolution_hours: (optional) The number of hours that
Mirage should surface the metric timeseries for
:type base_name: str
:type pattern: str
:type alerter: str
:type second_order_resolution_hours: int
:return: matched_by
:rtype: str
('metric3', 'hipchat', 600),
# Log all anomalies to syslog
('stats.', 'syslog', 1),
# Wildcard namespaces can be used as well
('metric4.thing.*.requests', 'stmp', 900),
# However beware of wildcards as the above wildcard should really be
('metric4.thing\..*.\.requests', 'stmp', 900),
.. todo: This fully
"""
logger.info('matching metric to ALERTS pattern :: %s - %s' % (base_name, pattern))
# alert = ('stats_counts\..*', 'smtp', 3600, 168)
# alert = ('.*\.mysql\..*', 'smtp', 7200, 168)
alert = (pattern, alerter, second_order_resolution_hours)
ALERT_MATCH_PATTERN = alert[0]
new_base_name = base_name.replace('metrics.', '', 1)
METRIC_PATTERN = new_base_name
pattern_match = False
matched_by = 'not matched'
try:
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
matched_by = 'regex'
# pattern_match = True
print('matched_by %s' % matched_by)
except:
pattern_match = False
print('not matched by regex')
if not pattern_match:
print('not matched by regex')
if alert[0] in base_name:
pattern_match = True
matched_by = 'substring'
print('%s' % matched_by)
else:
print('not matched in substring')
if not pattern_match:
print('not matched by %s in %s' % (base_name, alert[0]))
if base_name == alert[0]:
pattern_match = True
matched_by = 'absolute_match'
print('%s' % matched_by)
else:
print('not matched in substring')
return matched_by
| 5,344,661
|
def multitask_bed_generation(
target_beds_file,
feature_size=1000,
merge_overlap=200,
out_prefix="features",
chrom_lengths_file=None,
db_act_file=None,
db_bed=None,
ignore_auxiliary=False,
no_db_activity=False,
ignore_y=False,
):
"""Merge multiple bed files to select sample sequence regions with at least one
peak.
This function outputs a .bed file in the specified directory containing seven
columns: chromosome, sequence start, sequence end, name, score, strand, and indexes
of experiments that have a peak in this region.
Parameters
----------
target_beds_file: str
Location of the sample file containing experiment label and their corresponding
file locations. Should be a two column text file, first row contains label,
second row contain directory for the .bed/.bed.gz file.
feature_size: int, optional
Length of the sequence region per sample in output. Default to 1000.
merge_overlap: int, optional
After adjusting peaks into feature_size, if two peak regions overlaps more than
this amount, they will be re-centered and merged into a single sample. Defaults
to 200.
output_prefix: str, optional
Location and naming of the output bed file. Default to 'features.bed'
chrom_lenghts_file: str, optional
Location of the chrom.sizes file. Default to None.
db_act_file: str, optional
Location of the existing database activity table. Defaults to None.
db_bed: str, optional
Location of the existing database .bed file. Defaults to None.
ignore_auxiliary: bool, optional
Ignore auxiliary chromosomes. Defaults to False.
no_db_acticity: bool, optional
Whether to pass along the activities of the database sequences. Defaults to
False.
ignor_y: bool, optional
Ignore Y chromsosome features. Defaults to False.
Returns
-------
None
Examples
--------
>>> multitask_bed_generation(
example_file,chrom_lengths_file='/data/hg38.chrom.size',
feature_size=1000,merge_overlap=200,out_prefix='/data/multitask.bed')
"""
if not target_beds_file:
raise Exception(
"Must provide file labeling the targets and providing BED file paths."
)
# determine whether we'll add to an existing DB
db_targets = []
db_add = False
if db_bed is not None:
db_add = True
if not no_db_activity:
if db_act_file is None:
raise ValueError(
"Must provide both activity table or specify -n if you want to add"
" to an existing database"
)
else:
# read db target names
db_act_in = open(db_act_file)
db_targets = db_act_in.readline().strip().split("\t")
db_act_in.close()
# read in targets and assign them indexes into the db
target_beds = []
target_dbi = []
for line in open(target_beds_file):
a = line.split()
if len(a) != 2:
print(a)
print(
"Each row of the target BEDS file must contain a label and BED file"
" separated by whitespace",
file=sys.stderr,
)
sys.exit(1)
target_dbi.append(len(db_targets))
db_targets.append(a[0])
target_beds.append(a[1])
# read in chromosome lengths
chrom_lengths = {}
if chrom_lengths_file is not None:
chrom_lengths = {}
for line in open(chrom_lengths_file):
a = line.split()
chrom_lengths[a[0]] = int(a[1])
else:
print(
"Warning: chromosome lengths not provided, so regions near ends may be"
" incorrect.",
file=sys.stderr,
)
#################################################################
# print peaks to chromosome-specific files
#################################################################
chrom_files = {}
chrom_outs = {}
peak_beds = target_beds
if db_add:
peak_beds.append(db_bed)
for bi in range(len(peak_beds)):
if peak_beds[bi][-3:] == ".gz":
peak_bed_in = gzip.open(peak_beds[bi], "rt")
else:
peak_bed_in = open(peak_beds[bi])
for line in peak_bed_in:
if not line.startswith("#"):
a = line.split("\t")
a[-1] = a[-1].rstrip()
# hash by chrom/strand
chrom = a[0]
strand = "+"
if len(a) > 5 and a[5] in "+-":
strand = a[5]
chrom_key = (chrom, strand)
# adjust coordinates to midpoint
start = int(a[1])
end = int(a[2])
mid = int(_find_midpoint(start, end))
a[1] = str(mid)
a[2] = str(mid + 1)
# open chromosome file
if chrom_key not in chrom_outs:
chrom_files[chrom_key] = "%s_%s_%s.bed" % (
out_prefix,
chrom,
strand,
)
chrom_outs[chrom_key] = open(chrom_files[chrom_key], "w")
# if it's the db bed
if db_add and bi == len(peak_beds) - 1:
if no_db_activity:
# set activity to null
a[6] = "."
print("\t".join(a[:7]), file=chrom_outs[chrom_key])
# print >> chrom_outs[chrom_key], '\t'.join(a[:7])
else:
print(line, chrom_outs[chrom_key])
# print >> chrom_outs[chrom_key], line,
# if it's a new bed
else:
# specify the target index
while len(a) < 7:
a.append("")
a[5] = strand
a[6] = str(target_dbi[bi])
print("\t".join(a[:7]), file=chrom_outs[chrom_key])
# print >> chrom_outs[chrom_key], '\t'.join(a[:7])
peak_bed_in.close()
# close chromosome-specific files
for chrom_key in chrom_outs:
chrom_outs[chrom_key].close()
# ignore Y
if ignore_y:
for orient in "+-":
chrom_key = ("chrY", orient)
if chrom_key in chrom_files:
print("Ignoring chrY %s" % orient, file=sys.stderr)
# print >> sys.stderr, 'Ignoring chrY %s' % orient
os.remove(chrom_files[chrom_key])
del chrom_files[chrom_key]
# ignore auxiliary
if ignore_auxiliary:
# TODO: \d appears to be an invalid escape sequence. And re.compile will escape
# \d anyway to \\d.
primary_re = re.compile("chr\\d+$")
for chrom_key in chrom_files.keys():
chrom, strand = chrom_key
primary_m = primary_re.match(chrom)
if not primary_m and chrom != "chrX":
print("Ignoring %s %s" % (chrom, strand), file=sys.stderr)
# print >> sys.stderr, 'Ignoring %s %s' % (chrom,strand)
os.remove(chrom_files[chrom_key])
del chrom_files[chrom_key]
#################################################################
# sort chromosome-specific files
#################################################################
for chrom_key in chrom_files:
chrom, strand = chrom_key
chrom_sbed = "%s_%s_%s_sort.bed" % (out_prefix, chrom, strand)
sort_cmd = "sortBed -i %s > %s" % (chrom_files[chrom_key], chrom_sbed)
subprocess.call(sort_cmd, shell=True)
os.remove(chrom_files[chrom_key])
chrom_files[chrom_key] = chrom_sbed
#################################################################
# parse chromosome-specific files
#################################################################
final_bed_out = open("%s.bed" % out_prefix, "w")
for chrom_key in chrom_files:
chrom, strand = chrom_key
open_peaks = []
for line in open(chrom_files[chrom_key], "rt"):
a = line.split("\t")
a[-1] = a[-1].rstrip()
# construct Peak
peak_start = int(a[1])
peak_end = int(a[2])
peak_act = _activity_set(a[6])
peak = Peak(peak_start, peak_end, peak_act)
peak.extend(feature_size, chrom_lengths.get(chrom, None))
if len(open_peaks) == 0:
# initialize open peak
open_end = peak.end
open_peaks = [peak]
else:
# operate on exiting open peak
# if beyond existing open peak
if open_end - merge_overlap <= peak.start:
# close open peak
mpeaks = _merge_peaks(
open_peaks,
feature_size,
merge_overlap,
chrom_lengths.get(chrom, None),
)
# print to file
for mpeak in mpeaks:
print(mpeak.bed_str(chrom, strand), file=final_bed_out)
# print >> final_bed_out, mpeak.bed_str(chrom, strand)
# initialize open peak
open_end = peak.end
open_peaks = [peak]
else:
# extend open peak
open_peaks.append(peak)
open_end = max(open_end, peak.end)
if len(open_peaks) > 0:
# close open peak
mpeaks = _merge_peaks(
open_peaks, feature_size, merge_overlap, chrom_lengths.get(chrom, None)
)
# print to file
for mpeak in mpeaks:
print(mpeak.bed_str(chrom, strand), file=final_bed_out)
# print >> final_bed_out, mpeak.bed_str(chrom, strand)
final_bed_out.close()
# clean
for chrom_key in chrom_files:
os.remove(chrom_files[chrom_key])
#################################################################
# construct/update activity table
#################################################################
final_act_out = open("%s_act.txt" % out_prefix, "w")
# print header
cols = [""] + db_targets
print("\t".join(cols), file=final_act_out)
# print >> final_act_out, '\t'.join(cols)
# print sequences
for line in open("%s.bed" % out_prefix):
a = line.rstrip().split("\t")
# index peak
peak_id = "%s:%s-%s(%s)" % (a[0], a[1], a[2], a[5])
# construct full activity vector
peak_act = [0] * len(db_targets)
for ai in a[6].split(","):
if ai != ".":
peak_act[int(ai)] = 1
# print line
cols = [peak_id] + peak_act
print("\t".join([str(c) for c in cols]), file=final_act_out)
# print >> final_act_out, '\t'.join([str(c) for c in cols])
final_act_out.close()
| 5,344,662
|
def get_dataframe_tail(n):
""" Returns last n rows of the DataFrame"""
return dataset.tail(n)
| 5,344,663
|
def json_formatter(result, verbose=False, indent=4, offset=0):
"""Format result as json."""
string = json.dumps(result, indent=indent)
string = string.replace("\n", "\n" + " "*offset)
return string
| 5,344,664
|
def run_test():
"""
Test run for checking a dataset.
"""
load_data()
| 5,344,665
|
def delete_dkim(_domaine):
""" Generate DKIM"""
cmd_dkim = "/opt/zimbra/libexec/zmdkimkeyutil -r -d " + _domaine
try:
print(os.system(cmd_dkim))
print("\n[ok] Delete DKIM\n")
except os.error as e:
print(e)
| 5,344,666
|
def rem():
"""Removes an item of arbitrary depth from the stack"""
try:
x = stack.pop()
del( stack[ int( x ) ] )
except:
print "Error: stack underflow!"
| 5,344,667
|
def simple_http_get(url, port=80, headers=None):
"""Simple interface to make an HTTP GET request
Return the entire request (line,headers,body) as raw bytes
"""
client_socket = create_async_client_socket((url, port))
calling_session = Reactor.get_instance().get_current_session()
@types.coroutine
def send_request(request_bytes):
def send_request_inner(session: Session):
try:
# TODO - maybe we can't send the entire request at once.
# there might be a reason why both .send and .sendall exist
result = session.socket.sendall(request_bytes)
except BrokenPipeError as err:
session.close()
return
# The result is None...whatever!
Reactor.get_instance().make_progress(session, result, IOIntention.read)
none = yield send_request_inner
return none
@types.coroutine
def receive_response(none):
def receive_response_inner(session: Session):
# TODO - so... can we just "read a line"?
# isn't the line MAYBE chunked, and we have to yield control,
# and wait for the socket to be readable again?
# Weird stuff happening here!
# Some sites send me a '\n' first
# Well I guess I should skip that
result_part = session.file.readline()
result_buffer = result_part
while not result_part:
result_part = session.file.readline()
result_buffer += result_part
Reactor.get_instance().make_progress(session, result_buffer, IOIntention.none)
res = yield receive_response_inner
return res
async def make_http_request(s: Session):
# TODO - make the request using the proper path, not just /
raw_request = (
b"GET / HTTP/1.1\r\n"
b"User-Agent: guy-creating-http-server-sorry-for-spam\r\n"
b"Reach-me-at: vlad.george.ardelean@gmail.com\r\n"
b"\r\n"
)
none = await send_request(raw_request)
response = await receive_response(none)
# see the `response` here? We're basically throwing that to line marked `3mfn5gwf`,
# as the result. The generator for the original session which wanted to make an HTTP
# call paused on line marked `3mfn5gwf`. We then set a callback on another socket on
# line marked `b4g9a`. The callback is this function. When this function completes
# and we have our result, we basically restart the previous generator with that result
Reactor.get_instance().make_progress(calling_session, response, IOIntention.none)
Reactor.get_instance().add_client_socket_and_callback(client_socket, make_http_request) # line:b4g9a
# `yield` works very well, BUT, I think that's just by accident.
# Sure, the `make_http_request` function will trigger progress, and make line 3mfn5gwf get
# the result and continue, but what if the socket in the initial session triggers first?
# ...then, the reactor will call the `yield`ed value, and this would blow up, because
# that result is None. That is why we still return a noop, which doesn't do any progress
# but is required for respecting the expectations of the reactor.
result = yield noop # line: 3mfn5gwf
return result
| 5,344,668
|
def _gumbel_softmax_sample(logits, temp=1, eps=1e-20):
"""
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
dims = logits.dim()
gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.data.new())
y = logits + Variable(gumbel_noise)
return F.softmax(y / temp, dims - 1)
| 5,344,669
|
def test_cell_response(tmpdir):
"""Test CellResponse object."""
# Round-trip test
spike_times = [[2.3456, 7.89], [4.2812, 93.2]]
spike_gids = [[1, 3], [5, 7]]
spike_types = [['L2_pyramidal', 'L2_basket'],
['L5_pyramidal', 'L5_basket']]
tstart, tstop, fs = 0.1, 98.4, 1000.
sim_times = np.arange(tstart, tstop, 1 / fs)
gid_ranges = {'L2_pyramidal': range(1, 2), 'L2_basket': range(3, 4),
'L5_pyramidal': range(5, 6), 'L5_basket': range(7, 8)}
cell_response = CellResponse(spike_times=spike_times,
spike_gids=spike_gids,
spike_types=spike_types,
times=sim_times)
cell_response.plot_spikes_hist(show=False)
cell_response.write(tmpdir.join('spk_%d.txt'))
assert cell_response == read_spikes(tmpdir.join('spk_*.txt'))
assert ("CellResponse | 2 simulation trials" in repr(cell_response))
# reset clears all recorded variables, but leaves simulation time intact
assert len(cell_response.times) == len(sim_times)
sim_attributes = ['_spike_times', '_spike_gids', '_spike_types',
'_vsoma', '_isoma']
net_attributes = ['_times', '_cell_type_names'] # `Network.__init__`
# creates these check that we always know which response attributes are
# simulated see #291 for discussion; objective is to keep cell_response
# size small
assert list(cell_response.__dict__.keys()) == \
sim_attributes + net_attributes
# Test recovery of empty spike files
empty_spike = CellResponse(spike_times=[[], []], spike_gids=[[], []],
spike_types=[[], []])
empty_spike.write(tmpdir.join('empty_spk_%d.txt'))
empty_spike.write(tmpdir.join('empty_spk.txt'))
empty_spike.write(tmpdir.join('empty_spk_{0}.txt'))
assert empty_spike == read_spikes(tmpdir.join('empty_spk_*.txt'))
assert ("CellResponse | 2 simulation trials" in repr(empty_spike))
with pytest.raises(TypeError,
match="spike_times should be a list of lists"):
cell_response = CellResponse(spike_times=([2.3456, 7.89],
[4.2812, 93.2]),
spike_gids=spike_gids,
spike_types=spike_types)
with pytest.raises(TypeError,
match="spike_times should be a list of lists"):
cell_response = CellResponse(spike_times=[1, 2], spike_gids=spike_gids,
spike_types=spike_types)
with pytest.raises(ValueError, match="spike times, gids, and types should "
"be lists of the same length"):
cell_response = CellResponse(spike_times=[[2.3456, 7.89]],
spike_gids=spike_gids,
spike_types=spike_types)
cell_response = CellResponse(spike_times=spike_times,
spike_gids=spike_gids,
spike_types=spike_types)
with pytest.raises(TypeError, match="indices must be int, slice, or "
"array-like, not str"):
cell_response['1']
with pytest.raises(TypeError, match="indices must be int, slice, or "
"array-like, not float"):
cell_response[1.0]
with pytest.raises(ValueError, match="ndarray cannot exceed 1 dimension"):
cell_response[np.array([[1, 2], [3, 4]])]
with pytest.raises(TypeError, match="gids must be of dtype int, "
"not float64"):
cell_response[np.array([1, 2, 3.0])]
with pytest.raises(TypeError, match="gids must be of dtype int, "
"not float64"):
cell_response[[0, 1, 2, 2.0]]
with pytest.raises(TypeError, match="spike_types should be str, "
"list, dict, or None"):
cell_response.plot_spikes_hist(spike_types=1, show=False)
with pytest.raises(TypeError, match=r"spike_types\[ev\] must be a list\. "
r"Got int\."):
cell_response.plot_spikes_hist(spike_types={'ev': 1}, show=False)
with pytest.raises(ValueError, match=r"Elements of spike_types must map to"
r" mutually exclusive input types\. L2_basket is found"
r" more than once\."):
cell_response.plot_spikes_hist(spike_types={'ev':
['L2_basket', 'L2_b']},
show=False)
with pytest.raises(ValueError, match="No input types found for ABC"):
cell_response.plot_spikes_hist(spike_types='ABC', show=False)
with pytest.raises(ValueError, match="tstart and tstop must be of type "
"int or float"):
cell_response.mean_rates(tstart=0.1, tstop='ABC',
gid_ranges=gid_ranges)
with pytest.raises(ValueError, match="tstop must be greater than tstart"):
cell_response.mean_rates(tstart=0.1, tstop=-1.0, gid_ranges=gid_ranges)
with pytest.raises(ValueError, match="Invalid mean_type. Valid "
"arguments include 'all', 'trial', or 'cell'."):
cell_response.mean_rates(tstart=tstart, tstop=tstop,
gid_ranges=gid_ranges, mean_type='ABC')
test_rate = (1 / (tstop - tstart)) * 1000
assert cell_response.mean_rates(tstart, tstop, gid_ranges) == {
'L5_pyramidal': test_rate / 2,
'L5_basket': test_rate / 2,
'L2_pyramidal': test_rate / 2,
'L2_basket': test_rate / 2}
assert cell_response.mean_rates(tstart, tstop, gid_ranges,
mean_type='trial') == {
'L5_pyramidal': [0.0, test_rate],
'L5_basket': [0.0, test_rate],
'L2_pyramidal': [test_rate, 0.0],
'L2_basket': [test_rate, 0.0]}
assert cell_response.mean_rates(tstart, tstop, gid_ranges,
mean_type='cell') == {
'L5_pyramidal': [[0.0], [test_rate]],
'L5_basket': [[0.0], [test_rate]],
'L2_pyramidal': [[test_rate], [0.0]],
'L2_basket': [[test_rate], [0.0]]}
# Write spike file with no 'types' column
for fname in sorted(glob(str(tmpdir.join('spk_*.txt')))):
times_gids_only = np.loadtxt(fname, dtype=str)[:, (0, 1)]
np.savetxt(fname, times_gids_only, delimiter='\t', fmt='%s')
# Check that spike_types are updated according to gid_ranges
cell_response = read_spikes(tmpdir.join('spk_*.txt'),
gid_ranges=gid_ranges)
assert cell_response.spike_types == spike_types
# Check for gid_ranges errors
with pytest.raises(ValueError, match="gid_ranges must be provided if "
"spike types are unspecified in the file "):
cell_response = read_spikes(tmpdir.join('spk_*.txt'))
with pytest.raises(ValueError, match="gid_ranges should contain only "
"disjoint sets of gid values"):
gid_ranges = {'L2_pyramidal': range(3), 'L2_basket': range(2, 4),
'L5_pyramidal': range(4, 6), 'L5_basket': range(6, 8)}
cell_response = read_spikes(tmpdir.join('spk_*.txt'),
gid_ranges=gid_ranges)
| 5,344,670
|
def interpolate_points(variable, variable_name, old_r, old_theta, new_r, new_theta):
"""Interpolate the old grid onto the new grid."""
grid = griddata(
(old_r, old_theta), variable, (new_r, new_theta), method=INTERPOLATION_LEVEL, fill_value=-1
)
n_error = 0
for i, element in enumerate(grid):
if element == -1:
n_error += 1
grid[i] = griddata(
(old_r, old_theta), variable, (new_r[i], new_theta[i]), method="nearest"
)
if VERBOSE:
print(f"{variable_name} interpolation problem for at r = {new_r[i]} theta = {np.rad2deg(new_theta[i])}")
if n_error:
print(f"There were {n_error} interpolation errors for {variable_name}")
return grid
| 5,344,671
|
def attitude(request):
"""
View configuration for discussion step, where we will ask the user for her attitude towards a statement.
Route: /discuss/{slug}/attitude/{position_id}
:param request: request of the web server
:return: dictionary
"""
LOG.debug("View attitude: %s", request.matchdict)
emit_participation(request)
db_statement = request.validated['statement']
db_issue = request.validated['issue']
db_user = request.validated['user']
history_handler.save_and_set_cookie(request, db_user, db_issue)
session_history = request.validated.get('session_history')
prepared_discussion = discussion.attitude(db_issue, db_user, db_statement, session_history, request.path)
modify_discussion_url(prepared_discussion)
modify_discussion_bubbles(prepared_discussion, request.registry)
rdict = prepare_request_dict(request)
append_extras_dict(prepared_discussion, rdict, request.authenticated_userid, False)
return prepared_discussion
| 5,344,672
|
def currentProgram():
"""currentProgram page."""
return render_template(
"currentProgram-index.j2.html",
title="currentProgram",
subtitle="Demonstration of Flask blueprints in action.",
template="currentProgram-template",
currentProgram=getCurrentProgr(),
timeStarted=timeStarted,
)
| 5,344,673
|
def checkIsMember(request):
"""
์ฌ์
์๋ฒํธ๋ฅผ ์กฐํํ์ฌ ์ฐ๋ํ์ ๊ฐ์
์ฌ๋ถ๋ฅผ ํ์ธํฉ๋๋ค.
- https://docs.popbill.com/statement/python/api#CheckIsMember
"""
try:
# ์กฐํํ ์ฌ์
์๋ฑ๋ก๋ฒํธ, '-' ์ ์ธ 10์๋ฆฌ
targetCorpNum = "1234567890"
response = statementService.checkIsMember(targetCorpNum)
return render(request, 'response.html', {'code': response.code, 'message': response.message})
except PopbillException as PE:
return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
| 5,344,674
|
def f1_score(y_true: List[List[str]], y_pred: List[List[str]],
*,
average: Optional[str] = 'micro',
suffix: bool = False,
mode: Optional[str] = None,
sample_weight: Optional[List[int]] = None,
zero_division: str = 'warn',
scheme: Optional[Type[Token]] = None,
partial_match: bool = False):
"""Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
average : string, [None, 'micro' (default), 'macro', 'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
mode : str, [None (default), `strict`].
if ``None``, the score is compatible with conlleval.pl. Otherwise,
the score is calculated strictly.
scheme : Token, [IOB2, IOE2, IOBES]
suffix : bool, False by default.
partial_match : bool, False by default.
Returns:
score : float or array of float, shape = [n_unique_labels].
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred, average='micro')
0.6666666666666666
>>> f1_score(y_true, y_pred, average='macro')
0.75
>>> f1_score(y_true, y_pred, average='weighted')
0.6666666666666666
>>> f1_score(y_true, y_pred, average=None)
array([0.5, 1. ])
"""
if mode == 'strict' and scheme:
_, _, f, _ = precision_recall_fscore_support_v1(y_true, y_pred,
average=average,
warn_for=('f-score',),
beta=1,
sample_weight=sample_weight,
zero_division=zero_division,
scheme=scheme,
suffix=suffix
)
else:
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
average=average,
warn_for=('f-score',),
beta=1,
sample_weight=sample_weight,
zero_division=zero_division,
suffix=suffix,
partial_match=partial_match)
return f
| 5,344,675
|
def test_txt_to_triplets():
"""Test txt_to_triplets function"""
data_file = './tests/data.txt'
u_col = 0
i_col = 1
r_col = 2
sep = '\t'
triplet_data = Reader.read_uir_triplets(data_file, u_col, i_col, r_col, sep, skip_lines=0)
assert len(triplet_data) == 10
assert triplet_data[4][2] == 3
assert triplet_data[6][1] == '478'
assert triplet_data[8][0] == '543'
try:
Reader.read_uir_triplets(data_file, 10)
except IndexError:
assert True
| 5,344,676
|
def main():
""" One time script to simply update the business types in Historic Duns instead of reloading from the source """
sess = GlobalDB.db().session
for historic_duns in sess.query(HistoricDUNS).all():
historic_duns.business_types = [DUNS_BUSINESS_TYPE_DICT[type_code]
for type_code in historic_duns.business_types_codes
if type_code in DUNS_BUSINESS_TYPE_DICT]
sess.commit()
sess.close()
logger.info("Updating historical DUNS complete")
| 5,344,677
|
def show_batch(ds: tf.data.Dataset,
classes: list,
rescale: bool = False,
size: tuple = (10, 10),
title: str = None):
"""
Function to show a batch of images including labels from tf.data object
Args:
ds: a (batched) tf.data.Dataset
classes: a list of all classes (in order of one-hot-encoding)
rescale: boolen whether to multiple image values by 255
size: tuple giving plot size
title: plot title
Returns:
matplotlib.pyplot
"""
plt.figure(figsize=size)
# Take on batch from dataset and iterate over image-label-combination
for image, label in ds.take(1):
image_array = image.numpy()
# Undo scaling in preprocess_input or plotting
image_array += 1.0
image_array /= 2.0
label_array = label.numpy()
batch_size = image_array.shape[0]
for idx in range(batch_size):
label = classes[np.argmax(label_array[idx])]
ax = plt.subplot(np.ceil(batch_size / 4), 4, idx + 1)
if rescale:
plt.imshow(image_array[idx] * 255)
else:
plt.imshow(image_array[idx])
plt.title(label + ' ' + str(image_array[idx].shape), fontsize=10)
plt.axis('off')
if title is not None:
plt.suptitle(title)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
| 5,344,678
|
def calc_proportion_identical(lst: Any) -> float:
"""
Returns a value between 0 and 1 for the uniformity of the values
in LST, i.e. higher if they're all the same.
"""
def count_most_common(lst):
"""
Find the most common item in LST, and count how many times it occurs.
"""
# Counter(['a', 'b', 'a']).most_common(2) -> [
# ('a', 2),
# ('b', 1),
# ]
# so this gives the count of the most common (in this case 2 occurrences of 'a')
return Counter(lst).most_common(1)[0][1]
most_common = count_most_common(lst)
if most_common == 1:
return 0
else:
return most_common / len(lst)
| 5,344,679
|
def _rolling_mad(arr, window):
"""Rolling window MAD outlier detection on 1d array."""
outliers = []
for i in range(window, len(arr)):
cur = arr[(i - window) : i]
med, cur_mad = _mad(cur)
cur_out = cur > (med + cur_mad * 3)
idx = list(np.arange((i - window), i)[cur_out])
outliers += idx
outliers = list(set(outliers))
# turn index into boolean
bool_outliers = np.zeros(arr.shape[0], dtype=bool)
bool_outliers[outliers] = True
return bool_outliers
| 5,344,680
|
def test_setup_chunk_task_no_chunks(mock_import_chunk, batch, valid_user):
"""Assert that if a batch has no chunks, nothing happens."""
tasks.setup_chunk_task(batch, "PUBLISHED", valid_user.username)
mock_import_chunk.delay.assert_not_called()
| 5,344,681
|
def compute_mean_and_cov(embeds, labels):
"""Computes class-specific means and shared covariance matrix of given embedding.
The computation follows Eq (1) in [1].
Args:
embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is
the sample size of training set, n_dim is the dimension of the embedding.
labels: An np.array of size [n_train_sample, ]
Returns:
mean_list: A list of len n_class, and the i-th element is an np.array of
size [n_dim, ] corresponding to the mean of the fitted Guassian distribution
for the i-th class.
cov: The shared covariance mmatrix of the size [n_dim, n_dim].
"""
n_dim = embeds.shape[1]
n_class = int(np.max(labels)) + 1
mean_list = []
cov = np.zeros((n_dim, n_dim))
for class_id in range(n_class):
data = embeds[labels == class_id]
data_mean = np.mean(data, axis=0)
cov += np.dot((data - data_mean).T, (data - data_mean))
mean_list.append(data_mean)
cov = cov / len(labels)
return mean_list, cov
| 5,344,682
|
def stackedensemble_multinomial_test():
"""This test check the following (for multinomial regression):
1) That H2OStackedEnsembleEstimator executes w/o errors on a 3-model manually constructed ensemble.
2) That .predict() works on a stack.
3) That .model_performance() works on a stack.
4) That test performance is better on ensemble vs the base learners.
5) That the validation_frame arg on H2OStackedEnsembleEstimator works correctly.
"""
df = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/test.csv.gz"))
y = "C785"
x = list(range(784))
df[y] = df[y].asfactor()
train = df[0:5000,:]
test = df[5000:10000,:]
# Number of CV folds (to generate level-one data for stacking)
nfolds = 2
# train and cross-validate a GBM
my_gbm = H2OGradientBoostingEstimator(distribution="multinomial",
nfolds=nfolds,
ntrees=10,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_gbm.train(x=x, y=y, training_frame=train)
# evaluate the performance
perf_gbm_train = my_gbm.model_performance()
perf_gbm_test = my_gbm.model_performance(test_data=test)
print("GBM training performance: ")
print(perf_gbm_train)
print("GBM test performance: ")
print(perf_gbm_test)
# train and cross-validate a RF
my_rf = H2ORandomForestEstimator(ntrees=10,
nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_rf.train(x=x, y=y, training_frame=train)
# evaluate performance
perf_rf_train = my_rf.model_performance()
perf_rf_test = my_rf.model_performance(test_data=test)
print("RF training performance: ")
print(perf_rf_train)
print("RF test performance: ")
print(perf_rf_test)
# Train and cross-validate an extremely-randomized RF
my_xrf = H2ORandomForestEstimator(ntrees=10,
nfolds=nfolds,
histogram_type="Random",
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_xrf.train(x=x, y=y, training_frame=train)
# evaluate performance
perf_xrf_train = my_xrf.model_performance()
perf_xrf_test = my_xrf.model_performance(test_data=test)
print("XRF training performance: ")
print(perf_xrf_train)
print("XRF test performance: ")
print(perf_xrf_test)
# Train a stacked ensemble using the GBM and GLM above
stack = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id, my_xrf.model_id])
stack.train(x=x, y=y, training_frame=train, validation_frame=test) # also test that validation_frame is working
assert type(stack) == h2o.estimators.stackedensemble.H2OStackedEnsembleEstimator
assert stack.type == "classifier"
# Check that prediction works
pred = stack.predict(test_data=test)
print(pred)
assert pred.nrow == test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(test.nrow)
assert pred.ncol == 11, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
# Evaluate ensemble performance
perf_stack_train = stack.model_performance()
assert type(perf_stack_train) == h2o.model.metrics_base.H2OMultinomialModelMetrics
perf_stack_valid = stack.model_performance(valid=True)
assert type(perf_stack_valid) == h2o.model.metrics_base.H2OMultinomialModelMetrics
perf_stack_test = stack.model_performance(test_data=test)
assert type(perf_stack_test) == h2o.model.metrics_base.H2OMultinomialModelMetrics
# Check that stack perf is better (smaller) than the best (smaller) base learner perf:
# Test Mean Per Class Error for each base learner
baselearner_best_mean_per_class_error_test = min(perf_gbm_test.mean_per_class_error(), \
perf_rf_test.mean_per_class_error(), \
perf_xrf_test.mean_per_class_error())
stack_mean_per_class_error_test = perf_stack_test.mean_per_class_error()
print("Best Base-learner Test Mean Per Class Error: {0}".format(baselearner_best_mean_per_class_error_test))
print("Ensemble Test Mean Per Class Error: {0}".format(stack_mean_per_class_error_test))
assert stack_mean_per_class_error_test <= baselearner_best_mean_per_class_error_test, + \
"expected stack_mean_per_class_error_test would be less than " \
" baselearner_best_mean_per_class_error_test, found it wasn't " \
"baselearner_best_mean_per_class_error_test = "+ \
str(baselearner_best_mean_per_class_error_test) + \
",stack_mean_per_class_error_test = "+ \
str(stack_mean_per_class_error_test)
# Check that passing `test` as a validation_frame produces the same metric as stack.model_performance(test)
# since the metrics object is not exactly the same, we can just test that RSME is the same
perf_stack_validation_frame = stack.model_performance(valid=True)
assert stack_mean_per_class_error_test == perf_stack_validation_frame.mean_per_class_error(), \
"expected stack_mean_per_class_error_test to be the same as " \
"perf_stack_validation_frame.mean_per_class_error() found it wasn't" \
"perf_stack_validation_frame.mean_per_class_error() = " + \
str(perf_stack_validation_frame.mean_per_class_error()) + \
"stack_mean_per_class_error_test was " + \
str(stack_mean_per_class_error_test)
| 5,344,683
|
def wcenergy(seq: str, temperature: float, negate: bool = False) -> float:
"""Return the wc energy of seq binding to its complement."""
loop_energies = calculate_loop_energies_dict(temperature, negate)
return sum(loop_energies[seq[i:i + 2]] for i in range(len(seq) - 1))
| 5,344,684
|
def getDirectoriesInDir(directory):
"""
Returns all the directories in the specified directory.
"""
directories = {}
for d in os.listdir(directory):
path = os.path.join(directory, d)
if os.path.isdir(path):
directories[d] = path
return directories
| 5,344,685
|
def upload_csv():
"""
Upload csv file
"""
upload_csv_form = UploadCSVForm()
if upload_csv_form.validate_on_submit():
file = upload_csv_form.csv.data
ClassCheck.process_csv_file(file)
flash('CSV file uploaded!', 'success')
return redirect('/')
| 5,344,686
|
async def clean(request: Request) -> RedirectResponse:
"""Access this view (GET "/clean") to remove all session contents."""
request.session.clear()
return RedirectResponse("/")
| 5,344,687
|
def svd(A):
"""
Singular Value Decomposition
Parameters
----------
A: af.Array
A 2 dimensional arrayfire array.
Returns
-------
(U,S,Vt): tuple of af.Arrays
- U - A unitary matrix
- S - An array containing the elements of diagonal matrix
- Vt - A unitary matrix
Note
----
- The original matrix `A` is preserved and additional storage space is required for decomposition.
- If the original matrix `A` need not be preserved, use `svd_inplace` instead.
- The original matrix `A` can be reconstructed using the outputs in the following manner.
>>> Smat = af.diag(S, 0, False)
>>> A_recon = af.matmul(af.matmul(U, Smat), Vt)
"""
U = Array()
S = Array()
Vt = Array()
safe_call(backend.get().af_svd(c_pointer(U.arr), c_pointer(S.arr), c_pointer(Vt.arr), A.arr))
return U, S, Vt
| 5,344,688
|
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
| 5,344,689
|
def check_analyzed_dependency(context, package, version):
"""Check for the existence of analyzed dependency for given package."""
jsondata = context.response.json()
assert jsondata is not None
path = "result/0/user_stack_info/analyzed_dependencies"
analyzed_dependencies = get_value_using_path(jsondata, path)
assert analyzed_dependencies is not None
for analyzed_dependency in analyzed_dependencies:
if analyzed_dependency["name"] == package \
and analyzed_dependency["version"] == version:
break
else:
raise Exception('Package {package} with version {version} not found'.
format(package=package, version=version))
| 5,344,690
|
def csr_matrix_multiply(S, x): # noqa
"""Multiplies a :class:`scipy.sparse.csr_matrix` S by an object-array vector x.
"""
h, w = S.shape
import numpy
result = numpy.empty_like(x)
for i in range(h):
result[i] = sum(S.data[idx]*x[S.indices[idx]] # noqa pylint:disable=unsupported-assignment-operation
for idx in range(S.indptr[i], S.indptr[i+1]))
return result
| 5,344,691
|
def calculate_sem_IoU(pred_np, seg_np, num_classes):
"""Calculate the Intersection Over Union of the predicted classes and the ground truth
Args:
pred_np (array_like): List of predicted class labels
seg_np (array_like): List of ground truth labels
num_classes (int): Number of classes in the dataset
"""
I_all = np.zeros(num_classes)
U_all = np.zeros(num_classes)
for sem_idx in range(len(seg_np)):
for sem in range(num_classes):
I = np.sum(np.logical_and(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem))
U = np.sum(np.logical_or(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem))
I_all[sem] += I
U_all[sem] += U
return I_all / U_all
| 5,344,692
|
def create_mask(imaging, fileID, cleanup=2):
"""separate function to create masks which can be loaded in computrs with problem at masking images"""
imaging_folder = os.path.join(ROOTDIR, FILEDIR, 'preprocessed')
masks_folder = os.path.join(ROOTDIR, FILEDIR, 'masks')
for idx, image in enumerate(imaging):
filename_save = os.path.join(masks_folder, 'mask' + fileID[idx] + '.nii.gz')
if not os.path.isfile(filename_save):
mask = ants.get_mask(ants.image_read(os.path.join(imaging_folder, image)), cleanup=cleanup)
ants.image_write(mask, filename=filename_save)
| 5,344,693
|
def test_remove_with_list_and_set():
"""Testing remove with exclude as a set or list"""
assert remove("example", ['e', 'x']) == "ampl"
assert remove("example", set(['e', 'x'])) == "ampl"
| 5,344,694
|
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-1]
if nb_channels == 1:
# scalar case
invM = 1.0/(M+eps)
elif nb_channels == 2:
# two channels case: analytical expression
det = (
M[..., 0, 0]*M[..., 1, 1] -
M[..., 0, 1]*M[..., 1, 0])
invDet = 1.0/(det)
invM = np.empty_like(M)
invM[..., 0, 0] = invDet*M[..., 1, 1]
invM[..., 1, 0] = -invDet*M[..., 1, 0]
invM[..., 0, 1] = -invDet*M[..., 0, 1]
invM[..., 1, 1] = invDet*M[..., 0, 0]
else:
# general case : no use of analytical expression (slow!)
invM = np.linalg.pinv(M, eps)
return invM
| 5,344,695
|
def read_densecsv_to_anndata(ds_file: Path):
"""Reads a dense text file in csv format into the AnnData format."""
return read_densemat_to_anndata(ds_file, sep=",")
| 5,344,696
|
def get_original_date_jpeg(filepath):
"""
returns the DateTimeOriginal/DateTimeDigitized exif data from the given jpeg file
"""
try:
with Image.open(filepath) as image:
# NOTE: using old "private" method because new public method
# doesn't include this tag. It does include 306 "DateTime"
# though, but "DateTime" might differ from "DateTimeOriginal"
# pylint: disable-next=protected-access
date_created = image._getexif().get(TAG_DATETIME_ORIGINAL)
if not date_created:
date_created = image._getexif().get(TAG_DATETIME_DIGITIZED)
if date_created:
# pylint: disable-next=protected-access
date_created += "." + image._getexif().get(
TAG_SUBSECTIME_ORIGINAL, ""
).zfill(3)
except (UnidentifiedImageError, AttributeError):
logger.debug("unable to parse '%s'", filepath)
return None
if date_created:
date_created = parse_jpeg_date(date_created)
return date_created
| 5,344,697
|
def _singleton(name):
"""Returns a singleton object which represents itself as `name` when printed,
but is only comparable (via identity) to itself."""
return type(name, (), {'__repr__': lambda self: name})()
| 5,344,698
|
def get_zone(*, zone_name: str):
""" Get zone with given zone name.
Args:
zone_name: zone name, e.g. "haomingyin.com"
Returns:
json: zone details
"""
params = dict(name=zone_name)
zones = _get("zones", params=params)
if not zones:
raise CloudflareAPIError(f"Unable to fetch zone {zone_name}")
return zones[0]
| 5,344,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.