content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from datetime import datetime
import calendar
def create_calendar(year=None, month=None):
"""
Create an inline keyboard with the provided year and month
"""
now = datetime.datetime.now()
if year is None:
year = now.year
if month is None:
month = now.month
data_ignore = create_calendar_callback_data("IGNORE", year, month, 0)
keyboard = []
# First row - Month and Year
row = [
InlineKeyboardButton(
calendar.month_name[month] + " " + str(year), callback_data=data_ignore
)
]
keyboard.append(row)
# Second row - Week Days
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=data_ignore))
keyboard.append(row)
my_calendar = calendar.monthcalendar(year, month)
for week in my_calendar:
row = []
for day in week:
if day == 0:
row.append(InlineKeyboardButton(" ", callback_data=data_ignore))
else:
row.append(
InlineKeyboardButton(
str(day),
callback_data=create_calendar_callback_data(
"DAY", year, month, day
),
)
)
keyboard.append(row)
# Last row - Buttons
row = [
InlineKeyboardButton(
"<",
callback_data=create_calendar_callback_data("PREV-MONTH", year, month, day),
),
InlineKeyboardButton(" ", callback_data=data_ignore),
InlineKeyboardButton(
">",
callback_data=create_calendar_callback_data("NEXT-MONTH", year, month, day),
),
]
keyboard.append(row)
return InlineKeyboardMarkup(keyboard)
|
edd7dd0245bbcb4269eaa4767cf16eb382db29e8
| 3,642,600
|
def read_image(link, size):
""" Read image on link and convert it to given size
Usage:
image = readImage(link, size)
Input variables:
link: path to image
size: output size of image
Output variables:
image: read and resized image
"""
image = imageio.imread(link)
image = trf.resize(image, size)
return image
|
8465fc3ef8f6d8829da251cc924b442a4b7f3d07
| 3,642,601
|
def make_send_data(application_preset):
"""Generate the data to send to the protocol."""
if application_preset == 'none':
# data = bytes([i for i in range(32)])
# data = bytes([i for i in range(54)]) # for teensy 4.0
data = bytes([i for i in range(54)]) # for teensy lc & micro
data_kwargs = {}
else:
if application_preset == 'pubsub':
# (data, schema) = (tuple(i for i in range(185 - 18)), 0x60) # for due and teensy 4.0
(data, schema) = (tuple(i for i in range(50 - 24)), 0x60) # for teensy lc & micro
# (data, schema) = (tuple(i for i in range(50 - 18)), 0x60) # for uno
data = (b'echo', data)
# data = (b'copy', data)
# data = (b'reply', data)
# data = (b'ping', data)
# (data, schema) = ((b'blink', True), 0x02)
# (data, schema) = ((b'blink', False), 0x02)
# (data, schema) = ((b'prefix', ('world!', 'Hello, ')), 0x00)
elif application_preset == 'minimal':
# (data, schema) = (tuple(i for i in range(185 - 18)), 0x60) # for due and teensy 4.0
(data, schema) = (tuple(i for i in range(50 - 24)), 0x60) # for teensy lc & micro
# (data, schema) = (tuple(i for i in range(50 - 18)), 0x60) # for uno
(data, schema) = (('hello', True, None, 0.125, b'\x00\x01\x02\x03\x04') + data, 0x00)
# (data, schema) = (('hello', 123, 456, 789), 0x00)
else:
raise NotImplementedError('Unsupported protocol configuration!')
data_kwargs = {
'schema': schema,
'format': SERIALIZATION_FORMATS[('binary', 'dynamic', 'msgpack')],
'type': DATA_TYPES[('presentation', 'document')]
}
return (data, data_kwargs)
|
2b04a0bf977e44c3bd2a2aa6df0834ad458364bc
| 3,642,602
|
def dp4a(x_scope="local", y_scope="local", z_scope="local", dtypes=("int8", "int8")):
"""
Int8 dot product reduced by every 4 elements using __dp4a
Parameters
----------
x_scope : str, optional
The storage scope of buffer for lhs
y_scope : str, optional
The storage scope of buffer for rhs
z_scope : str, optional
The storage scope of buffer for result
dtypes: tuple of strs, optional
The dtype of x and y
Returns
-------
intrin : TensorIntrin
The dp4a TensorIntrin that can be used in tensorizing schedule.
"""
n = 4 # dp4a requires operands packed by 4
result_dtype = "int32" if dtypes[1] == "int8" else "uint32"
x = te.placeholder((n,), name="x", dtype=dtypes[0])
y = te.placeholder((n,), name="y", dtype=dtypes[1])
k = te.reduce_axis((0, n), name="rc")
z = te.compute(
(1,), lambda i: te.sum(x[k].astype(result_dtype) * y[k].astype(result_dtype), axis=[k])
)
def _intrin_func(ins, outs):
def _instr(index):
xx, yy = ins
zz = outs[0]
zz_dtype = zz.dtype
if index == 1:
return zz.vstore(0, tvm.tir.const(0, zz_dtype))
ib = tvm.tir.ir_builder.create()
vec_x_dtype = "int8x4" if xx.dtype == "int8" else "uint8x4"
vec_y_dtype = "int8x4" if yy.dtype == "int8" else "uint8x4"
vec_x = xx.vload(0, dtype=vec_x_dtype)
vec_y = yy.vload(0, dtype=vec_y_dtype)
prev_z = 0 if index == 0 else zz.vload(0)
if is_target("rocm"):
# TODO(masahi): Here we are assuming that we are compiling for gfx10 or later
# We can refine the specification for dot product on rocm if needed later.
# We can just use "llvm.amdgcn.udot4" for u8u8u32, but it is not tested.
assert (
dtypes[0] == "int8" and dtypes[0] == "int8"
), "u8u8u32 dot product for rocm not supported yet"
new_z = tvm.tir.call_llvm_pure_intrin(
zz_dtype,
"llvm.amdgcn.sdot4",
tvm.tir.const(4, "uint32"),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_x),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_y),
prev_z,
True,
)
else:
new_z = tvm.tir.call_pure_extern(zz_dtype, "__dp4a", vec_x, vec_y, prev_z)
ib.emit(zz.vstore(0, new_z))
return ib.get()
return _instr(0), _instr(1), _instr(2) # body, reset, update
default_buffer_params = {"data_alignment": 4, "offset_factor": 1}
scopes = {x: x_scope, y: y_scope, z: z_scope}
binds = {
t: tvm.tir.decl_buffer(
t.shape, t.dtype, t.op.name, scope=scopes[t], **default_buffer_params
)
for t in [x, y, z]
}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds=binds, default_buffer_params=default_buffer_params
)
|
7d384867b9854c880288dd6ad2be6400d861282a
| 3,642,603
|
def fileexists(filename):
"""Replacement method for os.stat."""
try:
f = open( filename, 'r' )
f.close()
return True
except:
pass
return False
|
126460a04e7a8faf7517cb46c480670f5a067b1a
| 3,642,604
|
def test_knowledge_graph_init(graph_mutation_client, graph_mutation_responses):
"""Test knowldge graph client initialization."""
return graph_mutation_client.named_types
|
932a425e4bbdc301ef223e0f91936ecacf3bd5aa
| 3,642,605
|
def read_camera_matrix(filename):
"""
Read camera matrix from text file exported by PhotoScan
"""
with open(filename, 'r') as f:
s = f.read()
s = s.split(',')
s = [x.strip('\Matrix([[') for x in s]
s = [x.strip(']])') for x in s]
s = [x.strip('[') for x in s]
s = [x.strip(']') for x in s]
s = [x.strip('\n [') for x in s]
M = np.array([float(x) for x in s])
return M.reshape((4,4))
|
be01c60d3c8940250c578a67de2c06110fb403e4
| 3,642,606
|
def find_child_joints(model, joint_name):
""" Find all the joints parented to the given joint. """
joint_id = joint_name_to_index(model)
link_id = link_name_to_index(model)
# FIXME : Add exception to catch invalid joint names
joint = model.joints[joint_id[joint_name]]
clink = joint.child
return [
j.name for j in model.joints if j.parent == clink
]
|
35deab9fca062e90547cf6a550f9e14d654f0462
| 3,642,607
|
import pathlib
def stem(path: str) -> str:
"""returns the stem of a path (path without parent directory and without extension)
e.g
j.sals.fs.stem("/tmp/tmp-5383p1GOmMOOwvfi.tpl") -> 'tmp-5383p1GOmMOOwvfi'
Args:
path (str): path we want to get its stem
Returns:
str: path without parent directory and without extension
"""
return pathlib.Path(path).stem
|
ec7507becb31bda7662122668b490148ca15d347
| 3,642,608
|
import json
import re
def fluent_text(field, schema):
"""
Accept multilingual text input in the following forms
and convert to a json string for storage:
1. a multilingual dict, eg.
{"en": "Text", "fr": "texte"}
2. a JSON encoded version of a multilingual dict, for
compatibility with old ways of loading data, eg.
'{"en": "Text", "fr": "texte"}'
3. separate fields per language (for form submissions):
fieldname-en = "Text"
fieldname-fr = "texte"
When using this validator in a ckanext-scheming schema setting
"required" to true will make all form languages required to
pass validation.
"""
# combining scheming required checks and fluent field processing
# into a single validator makes this validator more complicated,
# but should be easier for fluent users and eliminates quite a
# bit of duplication in handling the different types of input
required_langs = []
alternate_langs = {}
if field and field.get('required'):
required_langs = fluent_form_languages(field, schema=schema)
alternate_langs = fluent_alternate_languages(field, schema=schema)
def validator(key, data, errors, context):
# just in case there was an error before our validator,
# bail out here because our errors won't be useful
if errors[key]:
return
value = data[key]
# 1 or 2. dict or JSON encoded string
if value is not missing:
if isinstance(value, basestring):
try:
value = json.loads(value)
except ValueError:
errors[key].append(_('Failed to decode JSON string'))
return
except UnicodeDecodeError:
errors[key].append(_('Invalid encoding for JSON string'))
return
if not isinstance(value, dict):
errors[key].append(_('expecting JSON object'))
return
for lang, text in value.iteritems():
try:
m = re.match(BCP_47_LANGUAGE, lang)
except TypeError:
errors[key].append(_('invalid type for language code: %r')
% lang)
continue
if not m:
errors[key].append(_('invalid language code: "%s"') % lang)
continue
if not isinstance(text, basestring):
errors[key].append(_('invalid type for "%s" value') % lang)
continue
if isinstance(text, str):
try:
value[lang] = text.decode('utf-8')
except UnicodeDecodeError:
errors[key]. append(_('invalid encoding for "%s" value')
% lang)
for lang in required_langs:
if value.get(lang) or any(
value.get(l) for l in alternate_langs.get(lang, [])):
continue
errors[key].append(_('Required language "%s" missing') % lang)
if not errors[key]:
data[key] = json.dumps(value)
return
# 3. separate fields
output = {}
prefix = key[-1] + '-'
extras = data.get(key[:-1] + ('__extras',), {})
for name, text in extras.iteritems():
if not name.startswith(prefix):
continue
lang = name.split('-', 1)[1]
m = re.match(BCP_47_LANGUAGE, lang)
if not m:
errors[name] = [_('invalid language code: "%s"') % lang]
output = None
continue
if output is not None:
output[lang] = text
for lang in required_langs:
if extras.get(prefix + lang) or any(
extras.get(prefix + l) for l in alternate_langs.get(lang, [])):
continue
errors[key[:-1] + (key[-1] + '-' + lang,)] = [_('Missing value')]
output = None
if output is None:
return
for lang in output:
del extras[prefix + lang]
data[key] = json.dumps(output)
return validator
|
e07b8f7d83500bd79662f36df5ff2a4cf42af092
| 3,642,609
|
def palette_color_brewer_q_Set3(reverse=False):
"""Generate set3 Brewer palette of a given size ... interpolate as needed ... best for discrete mapping
Args:
reverse: order the colors backward as compared to standard Brewer palette
Returns:
lambda: generates a list of colors
See Also:
:meth:`gen_node_color_map`, :meth:`gen_edge_color_map`
"""
return lambda value_count: _palette_color_brewer(value_count, colorbrewer.Set3, reverse)
|
38355f8b9f7a69ad7c7f4951d324e9b8bbd233bb
| 3,642,610
|
from magmap.io import export_rois
from magmap.io import export_rois
import os
def process_file(path, proc_mode, series=None, subimg_offset=None,
subimg_size=None, roi_offset=None, roi_size=None):
"""Processes a single image file non-interactively.
Assumes that the image has already been set up.
Args:
path (str): Path to image from which MagellanMapper-style paths will
be generated.
proc_mode (str): Processing mode, which should be a key in
:class:`config.ProcessTypes`, case-insensitive.
series (int): Image series number; defaults to None.
subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
defaults to None.
subimg_size (List[int]): Sub-image size as (z,y,x) to load;
defaults to None.
roi_offset (List[int]): Region of interest offset as (x, y, z) to
process; defaults to None.
roi_size (List[int]): Region of interest size of region to process,
given as (x, y, z); defaults to None.
Returns:
Tuple of stats from processing, or None if no stats, and
text feedback from the processing, or None if no feedback.
"""
# PROCESS BY TYPE
stats = None
fdbk = None
filename_base = importer.filename_to_base(path, series)
proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
print("{}\n".format("-" * 80))
if proc_type is config.ProcessTypes.LOAD:
# loading completed
return None, None
elif proc_type is config.ProcessTypes.LOAD:
# already imported so does nothing
print("imported {}, will exit".format(path))
elif proc_type is config.ProcessTypes.EXPORT_ROIS:
# export ROIs; assumes that info_proc was already loaded to
# give smaller region from which smaller ROIs from the truth DB
# will be extracted
db = config.db if config.truth_db is None else config.truth_db
export_rois.export_rois(
db, config.image5d, config.channel, filename_base,
config.plot_labels[config.PlotLabels.PADDING],
config.unit_factor, config.truth_db_mode,
os.path.basename(config.filename))
elif proc_type is config.ProcessTypes.TRANSFORM:
# transpose, rescale, and/or resize whole large image
transformer.transpose_img(
path, series, plane=config.plane,
rescale=config.transform[config.Transforms.RESCALE],
target_size=config.roi_size)
elif proc_type in (
config.ProcessTypes.EXTRACT, config.ProcessTypes.ANIMATED):
# generate animated GIF or extract single plane
export_stack.stack_to_img(
config.filenames, roi_offset, roi_size, series, subimg_offset,
subimg_size, proc_type is config.ProcessTypes.ANIMATED,
config.suffix)
elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
# export blobs to CSV file
export_rois.blobs_to_csv(config.blobs.blobs, filename_base)
elif proc_type in (
config.ProcessTypes.DETECT, config.ProcessTypes.DETECT_COLOC):
# detect blobs in the full image, +/- co-localization
coloc = proc_type is config.ProcessTypes.DETECT_COLOC
stats, fdbk, _ = stack_detect.detect_blobs_stack(
filename_base, subimg_offset, subimg_size, coloc)
elif proc_type is config.ProcessTypes.COLOC_MATCH:
if config.blobs is not None and config.blobs.blobs is not None:
# colocalize blobs in separate channels by matching blobs
shape = (config.image5d.shape[1:] if subimg_size is None
else subimg_size)
matches = colocalizer.StackColocalizer.colocalize_stack(
shape, config.blobs.blobs)
# insert matches into database
colocalizer.insert_matches(config.db, matches)
else:
print("No blobs loaded to colocalize, skipping")
elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
config.ProcessTypes.EXPORT_PLANES_CHANNELS):
# export each plane as a separate image file
export_stack.export_planes(
config.image5d, config.savefig, config.channel,
proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)
elif proc_type is config.ProcessTypes.EXPORT_RAW:
# export the main image as a raw data file
out_path = libmag.combine_paths(config.filename, ".raw", sep="")
libmag.backup_file(out_path)
np_io.write_raw_file(config.image5d, out_path)
elif proc_type is config.ProcessTypes.PREPROCESS:
# pre-process a whole image and save to file
# TODO: consider chunking option for larger images
profile = config.get_roi_profile(0)
out_path = config.prefix
if not out_path:
out_path = libmag.insert_before_ext(config.filename, "_preproc")
transformer.preprocess_img(
config.image5d, profile["preprocess"], config.channel, out_path)
return stats, fdbk
|
980c30c822119a788e9a0da142b68c0a67506be1
| 3,642,611
|
def get_loss_function(identifier):
"""
Gets the loss function from `identifier`.
:param identifier: the identifier
:type identifier: str or dict[str, str or dict]
:raise ValueError: if the function is not found
:return: the function
:rtype: function
"""
return _get(identifier, loss_functions, tensorflow.keras.losses.get,
name_only=True)
|
060a14f21332ad47273febeb7d4a8347a2fd3957
| 3,642,612
|
def find_defender(ships, side):
"""Crude method to find something approximating the best target when attacking"""
enemies = [x for x in ships if x['side'] != side and x['hp'] > 0]
if not enemies:
return None
# shoot already wounded enemies first
wounded = [x for x in enemies if x['hp'] < x['size']]
if wounded:
found = ships.index(wounded[0])
return found
# shoot boarders in priority (?)
boarding = [x for x in enemies if 'Boarding' in x['name']]
if boarding:
found = ships.index(boarding[0])
return found
# shoot 1 hp ships
hp_1 = [x for x in enemies if x['size'] == 1]
if hp_1:
found = ships.index(hp_1[0])
return found
# shoot 2 hp ships
hp_2 = [x for x in enemies if x['size'] == 2]
if hp_2:
found = ships.index(hp_2[0])
return found
# otherwise just shoot the first one (??!)
found = ships.index(enemies[0])
return found
|
4c58ec01abae1f59ced47e61e257abe7f8923aea
| 3,642,613
|
import os
def get_parameters(image_path):
"""
Parses the image path to dictionary
:param str image_path: image path
:rtype dict
"""
image_path = image_path
image_directory = os.path.dirname(image_path)
image_filename = os.path.basename(image_path)
image_name = image_filename.split('.')[0]
image_extension = image_filename.split('.')[-1]
return {
'directory': image_directory,
'extension': image_extension,
'name': image_name,
'filename': image_filename,
'path': image_path
}
|
cffef64001c81fd3459d49239b1ce863c546b6ca
| 3,642,614
|
import csv
def Create_clump_summaries(feature_file,simplify_threshold):
""" Employs Panda Shapely library to simplify polygons by eliminating almost
colinear vertices. Generates two data structures: First - sort_clump_df: Panda Dataframe
containing the longest line in the simplified and unsimplified polygon, polygon area,
polygon number by order listed in feature_file. The Dataframe is sorted by
length of longest edge of simplified polygon. Second - polygon_dict which
is a dictionary with numpy arrays representing normalized polygons """
i= 0
clump_dict = {'clump':[],'area':[], 'max_line':[], 'max_line_simplify':[]}
polygon_dict ={}
with open(feature_file) as input_file2:
reader = csv.DictReader(input_file2)
for row1 in reader:
row1_polygon = row1['Polygon']
row1_polygon = row1_polygon[1:len(row1_polygon)-1]
row1_polygon_list = row1_polygon.split(':')
row1_polygon_list = [float(x) for x in row1_polygon_list]
even_pts = row1_polygon_list[0:len(row1_polygon_list)-1:2]
odd_pts = row1_polygon_list[1:len(row1_polygon_list):2]
row1_tuples = list(zip(even_pts,odd_pts))
# clump represents the polygon representing each clump
clump = Polygon(row1_tuples)
# Invoke Shapely to generate simplified polygon
clump2 = clump.simplify(simplify_threshold)
# Obtain points defining polygon, compute length of edges
npclump = np.array(clump.exterior)
npclump_shift = np.roll(npclump,1,axis=0)
diff_clump = npclump_shift - npclump
l2_clump = np.sqrt(((diff_clump**2).sum(axis=1)))
max_l2_clump = l2_clump.max()
npclump2 = np.array(clump2.exterior)
npclump2_shift = np.roll(npclump2,1,axis=0)
diff_clump2 = npclump2_shift - npclump2
l2_clump2 = np.sqrt(((diff_clump2**2).sum(axis=1)))
max_l2_clump2 = l2_clump2.max()
clump_dict['clump'].append(i)
clump_dict['max_line'].append(max_l2_clump)
clump_dict['area'].append(clump.area)
clump_dict['max_line_simplify'].append(max_l2_clump2)
# shift x and y polygon axis
polygon_dict[i] = npclump2 - npclump2.min(axis=0)
print('\n number', i, '\n area',clump.area, 'clump max line',max_l2_clump, 'simplified clump max line', max_l2_clump2)
i +=1
num_clumps = i-1
clump_df = pd.DataFrame(clump_dict)
sort_clump_df = clump_df.sort_values(by='max_line_simplify',ascending = False)
sort_clump_df.reset_index(inplace=True)
return sort_clump_df,polygon_dict, num_clumps
|
3b846760d78ad44630de0005e70e26f00b44b7d4
| 3,642,615
|
from re import S
def norm(point):
"""Returns the Euclidean norm of a point from origin.
Parameters
==========
point: This denotes a point in the dimensional space.
Examples
========
>>> from sympy.integrals.intpoly import norm
>>> from sympy.geometry.point import Point
>>> norm(Point(2, 7))
sqrt(53)
"""
half = S(1)/2
if isinstance(point, tuple):
return (point[0] ** 2 + point[1] ** 2) ** half
elif isinstance(point, Point):
return (point.x ** 2 + point.y ** 2) ** half
elif isinstance(point, dict):
return sum(i**2 for i in point.values()) ** half
|
8e341bb7e623d2ef7a998b91a15e8a6735403860
| 3,642,616
|
def read_reducing():
"""Return gas resistance for reducing gases.
Eg hydrogen, carbon monoxide
"""
setup()
return read_all().reducing
|
3e0d78fc999909d29c469f3399e845df542e1e68
| 3,642,617
|
def access_rights_to_metax(data):
"""
Cherry pick access right data from the frontend form data and make it comply with Metax schema.
Arguments:
data {object} -- The whole object sent from the frontend.
Returns:
object -- Object containing access right object that comply to Metax schema.
"""
access_rights = {}
if "license" in data:
access_rights["license"] = []
if "identifier" in data["license"] and data["license"]["identifier"] != 'other':
license_object = {}
license_object["identifier"] = data["license"]["identifier"]
access_rights["license"].append(license_object)
elif "otherLicenseUrl" in data:
license_object = {}
license_object["license"] = data["otherLicenseUrl"]
access_rights["license"].append(license_object)
if "accessType" in data:
access_rights["access_type"] = {}
access_rights["access_type"]["identifier"] = data["accessType"]["url"]
if data["accessType"]["url"] != access_type["OPEN"]:
access_rights["restriction_grounds"] = []
access_rights["restriction_grounds"].append({"identifier": data["restrictionGrounds"]})
if data["accessType"]["url"] == access_type["EMBARGO"] and "embargoDate" in data:
access_rights["available"] = data["embargoDate"]
return access_rights
|
f66167028d86b5af4f15149bd21ab52fab9c3ba4
| 3,642,618
|
def read_toml_file(input_file, config_name = None, confpaths = [".", TCFHOME + "/" + "config"]):
"""
Function to read toml file and returns the toml content as a list
Parameters:
- input_file is any toml file which need to be read
- config_name is particular configuration to pull
- data_dir is the directory structure in which the toml file exists
"""
conffiles = [input_file]
config = pconfig.parse_configuration_files(conffiles, confpaths)
if config_name is None:
return config
else :
result = config.get(config_name)
if result is None:
logger.error("%s is missing in toml file %s", config_name, input_file )
return None
else :
return result
|
9f0e05dfd556f06ed0323e6484e804f7813c626c
| 3,642,619
|
def and_sum (phrase):
"""Returns TRUE iff every element in <phrase> is TRUE"""
for x in phrase:
if not x:
return False
return True
|
d65953c5811aedef0a7c76cd3191aba8236f02fa
| 3,642,620
|
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.g
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
house_data = pd.read_csv(filename)
# drop corrupted data
house_data.dropna(inplace=True)
house_data.drop_duplicates(inplace=True)
# remove the location and id columns
house_data.drop(columns=['lat', 'long', 'id'], inplace=True)
# remove negative and illegal values
for column in ['price', 'sqft_lot', 'sqft_lot15', 'floors', 'yr_built']:
house_data = house_data[house_data[column] > 0]
for column in [('waterfront', range(2)), ('view', range(5)),
('condition', range(1, 6)), ('grade', range(1, 14))]:
house_data = house_data[house_data[column[0]].isin(column[1])]
# categorize the columns 'zipcode', 'date', 'yr_built', 'yr_renovated'
house_data['zipcode'] = house_data['zipcode'].astype(str).str[:3]
house_data = pd.get_dummies(house_data, columns=['zipcode'],
prefix='zipcode_area')
house_data['date'] = house_data['date'].str[:4]
house_data = pd.get_dummies(house_data, columns=['date'])
house_data['yr_built'] = house_data['yr_built'].astype(str).str[:2]
house_data = pd.get_dummies(house_data, columns=['yr_built'])
house_data['yr_renovated'] = house_data['yr_renovated'].astype(str).str[:2]
house_data = pd.get_dummies(house_data, columns=['yr_renovated'])
# is_basement flag
house_data['is_basement'] = (house_data['sqft_basement'] >= 1).astype(int)
return house_data['price'], house_data.drop(columns=['price'])
|
a01179d470262bdc77e9d5417c1bd00415e4ccbd
| 3,642,621
|
def eccanom(M, e):
"""Finds eccentric anomaly from mean anomaly and eccentricity
This method uses algorithm 2 from Vallado to find the eccentric anomaly
from mean anomaly and eccentricity.
Args:
M (float or ndarray):
mean anomaly
e (float or ndarray):
eccentricity (eccentricity may be a scalar if M is given as
an array, but otherwise must match the size of M.
Returns:
E (float or ndarray):
eccentric anomaly
"""
# make sure M and e are of the correct format.
# if 1 value provided for e, array must match size of M
M = np.array(M).astype(float)
if not M.shape:
M = np.array([M])
e = np.array(e).astype(float)
if not e.shape:
e = np.array([e] * len(M))
assert e.shape == M.shape, "Incompatible inputs."
assert np.all((e >= 0) & (e < 1)), "e defined outside [0,1)"
# initial values for E
E = M / (1 - e)
mask = e * E ** 2 > 6 * (1 - e)
E[mask] = (6 * M[mask] / e[mask]) ** (1. / 3)
# Newton-Raphson setup
tolerance = np.finfo(float).eps * 4.01
numIter = 0
maxIter = 200
err = 1.
while err > tolerance and numIter < maxIter:
E = E - (M - E + e * np.sin(E)) / (e * np.cos(E) - 1)
err = np.max(abs(M - (E - e * np.sin(E))))
numIter += 1
if numIter == maxIter:
raise Exception("eccanom failed to converge. Final error of %e" % err)
return E
|
433ee8d5f6c247626316d953ecf35a9805b70390
| 3,642,622
|
def transform_frame(frame: np.array,
transform: AffineTransformation,
rotate: bool = False,
center_crop: bool = False) -> np.array:
""" Perform affine transformation of a single image-frame.
Parameters
----------
frame : array
Image frame.
transform : AffineTransformation
Delta-x, -y, -angle to use for transformation.
rotate : bool
If True, rotation will be used otherwise only translation.
center_crop : bool
If True, the center of the image will be cropped out by a fixed margin to remove border artifacts.
Returns
-------
array
"""
dx, dy, da = transform
height, width = frame.shape[:2]
# Reconstruct transformation matrix accordingly to new values
transformation_matrix = np.zeros((2, 3), np.float32)
if rotate:
transformation_matrix[0, 0] = np.cos(da)
transformation_matrix[0, 1] = -np.sin(da)
transformation_matrix[1, 0] = np.sin(da)
transformation_matrix[1, 1] = np.cos(da)
else:
transformation_matrix[0, 0] = 1
transformation_matrix[0, 1] = 0
transformation_matrix[1, 0] = 0
transformation_matrix[1, 1] = 1
transformation_matrix[0, 2] = dx
transformation_matrix[1, 2] = dy
# Apply affine wrapping to the given frame
stabilized_frame = cv2.warpAffine(frame, transformation_matrix, (width, height), flags=cv2.INTER_NEAREST)
if center_crop:
stabilized_frame = _scale_around_center(stabilized_frame)
return stabilized_frame
|
7e29b2a0abf62e1388c6715cfb30958046f1ff18
| 3,642,623
|
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real
|
0494e2cd6fedfc6b8ea8880808090f481c1128bc
| 3,642,624
|
def get_miner_pool_by_owner_id():
"""
存储提供者详情
:return:
"""
owner_id = request.form.get("owner_id")
data = MinerService.get_miner_pool_by_no(owner_id)
return response_json(data)
|
aa9fb36843d8999ac428d492463432de096628fe
| 3,642,625
|
def notFound(e):
"""View for 404 page."""
return render_template('content/notfound.jinja.html'), 404
|
d1bb53d22351ecbceda60ea28bdf8e7b688ad77d
| 3,642,626
|
from datetime import datetime
def get_stay(admission_date, exit_date):
"""Method to get exit date."""
try:
if not exit_date:
exit_date = datetime.now().date()
no_days = exit_date - admission_date
# Get More
years = ((no_days.total_seconds()) / (365.242 * 24 * 3600))
years_int = int(years)
months = (years - years_int) * 12
months_int = int(months)
days = (months - months_int) * (365.242 / 12)
days_int = int(days)
years_val = '' if years_int == 0 else '%s years ' % (years_int)
mon_check = years_int > 0 and months_int > 0
months_val = '%s months ' % (months_int) if mon_check else ''
pds = '%s%s%s days' % (years_val, months_val, days_int)
except Exception as e:
print('Error calculating exit - %s' % str(e))
return None
else:
return pds
|
bba3aa63884608500793c9f97c0c6e0e5d9e69ef
| 3,642,627
|
def compress(X, halve, g = 0, indices = None):
"""Returns Compress coreset of size 2^g sqrt(n) or, if indices is not None,
of size 2^g sqrt(len(indices)) as row indices into X
Args:
X: Input sequence of sample points with shape (n, d)
halve: Algorithm that takes an input a set of points and returns indices a set of indices to a subset of points with cardinality half of the input set
g: Oversampling parameter, a nonnegative integer
indices: If None, compresses X and returns coreset of size 2^g sqrt(n);
otherwise, compresses X[indices] and returns coreset of size
2^g sqrt(len(indices))
"""
# Check if indices is None in which case it sets it to range(size(X))
if indices is None:
indices = np.arange(X.shape[0], dtype=int)
# If the number of input points matches the target coreset size, we're done
if len(indices) == 4**g:
return indices
else:
# Partition the set input indices into four disjoint sets
partition_set = np.array_split(indices,4)
# Initialize an array to hold outputs of recursive calls
compress_outputs = []
for x in partition_set:
# Recursively call compress on the four subsets and
# add its output to the list
compress_output = compress(X, halve, g, indices = x)
compress_outputs.append(compress_output)
# Merge outputs of the recursive calls to compress
combined_compress_output = np.concatenate(compress_outputs)
# Run halving on the combined output
indices_into_combined = halve(X[combined_compress_output])
# Return indices into the original input set X
return combined_compress_output[indices_into_combined]
|
34503bf6602ef4577811c6fe449ae6109fa25007
| 3,642,628
|
def filter_objects(objs, labels, none_val=0):
"""Keep objects specified by label list"""
out = objs.copy()
all_labels = set(nonzero_unique(out))
labels = set(labels)
remove_labels = all_labels - labels
for l in remove_labels:
remove_object(out, l)
return out
|
8fccf936a5e74e274a2df5c182764e346e39dafb
| 3,642,629
|
def get_index_freq(freqs, fmin, fmax):
"""Get the indices of the freq between fmin and fmax in freqs
"""
f_index_min, f_index_max = -1, 0
for freq in freqs:
if freq <= fmin:
f_index_min += 1
if freq <= fmax:
f_index_max += 1
# Just check if f_index_max is not out of bound
f_index_max = min(len(freqs) - 1, f_index_max)
f_index_min = max(0, f_index_min)
return f_index_min, f_index_max
|
f3e014626d763f18ce6b661cabeb244bfabe9782
| 3,642,630
|
def get_gene_mod(gene_id, marker_id):
"""Retrieves a GeneMod model if the gene / marker pair already exists,
or creates a new one
"""
if gene_id in ("None", None):
gene_id = 0
if marker_id in ("None", None):
marker_id = 0
gene_mod = GeneMod.query.filter_by(gene_id=gene_id, marker_id=marker_id).first()
if gene_mod:
log.info("Found gene_mod for gene %s and marker %s", gene_id, marker_id)
return gene_mod
gene = Gene.get_by_id(gene_id)
marker = Marker.get_by_id(marker_id)
if not (gene or marker):
return None
gene_label = gene.label if gene else ""
gene_id = gene.bioportal_id if gene else ""
marker_label = marker.label if marker else ""
marker_id = marker.bioportal_id if marker else ""
user_id = gene.user_id if gene else marker.user_id
group_id = gene.group_id if gene else marker.group_id
label = f"{gene_label}-{marker_label}"
bioportal_id = f"{gene_id}-{marker_id}"
gene_mod = GeneMod(
label=label,
bioportal_id=bioportal_id,
user_id=user_id,
group_id=group_id,
)
if gene:
gene_mod.update(gene=gene, gene_id=gene_id, commit=False)
if marker:
gene_mod.update(marker=marker, marker_id=marker_id, commit=False)
gene_mod.save()
return gene_mod
|
26f29cbaa33c6fb7d40626152977c583fa5bcf57
| 3,642,631
|
def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
dtype=tf.float32, name='t3f_random_matrix'):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix with specified mean but tt_rank greater by 1 you can call
x = t3f.random_matrix(shape, tt_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix([[2, 2, 2], None])
and
random_matrix([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
var = np.prod(tt_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tt_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tt_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
with tf.name_scope(name):
tt = matrix_with_random_cores(shape, tt_rank=tt_rank, stddev=core_stddev,
dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet')
|
01f81199a71966ce379d2acc01d237ec47d346ec
| 3,642,632
|
def triangulate_polylines(polylines, holePts, lowQuality = False, maxArea = 0.01):
"""
Convenience function for triangulating a polygonal region using the `triangle` library.
Parameters
----------
polylines
List of point lists, each defining a closed polygon (with coinciding
first and last points) to triangulate.
holePts
A single point within each polygonal region that should be interpreted
as a hole. These regions will be omitted from the output triangulation.
lowQuality
Prohibit the insertion of any Steiner points, creating a low-quality
triangulation that be used for traversal/topological queries.
maxArea
Area threshold for refining triangles; ignored if lowQuality is True.
Returns
-------
V, F
Indexed face set representation of the output triangle mesh.
"""
lV, lE = mesh_operations.mergedMesh([mesh_operations.polylineToLineMesh(p) for p in polylines])
omitQualityFlag, flags = False, ""
if lowQuality: omitQualityFlag, flags = True, "YYS0"
V, F, markers = triangulation.triangulate(lV, lE, holePts=holePts, triArea=maxArea, omitQualityFlag=omitQualityFlag, flags=flags)
return V, F
|
e2ae0eb5339d51d02eb1d26302fc18229ff85f83
| 3,642,633
|
def inclination(x, y, z, u, v, w):
"""Compute value of inclination, I.
Args:
x (float): x-component of position
y (float): y-component of position
z (float): z-component of position
u (float): x-component of velocity
v (float): y-component of velocity
w (float): z-component of velocity
Returns:
float: inclination, I
"""
my_hz = x*v-y*u
my_h = np.sqrt( (y*w-z*v)**2 + (z*u-x*w)**2 + (x*v-y*u)**2 )
return np.arccos(my_hz/my_h)
|
bf85358fc6f002cdb4e0d47e6af0a93b8f9e4024
| 3,642,634
|
def OctahedralGraph():
"""
Return an Octahedral graph (with 6 nodes).
The regular octahedron is an 8-sided polyhedron with triangular faces. The
octahedral graph corresponds to the connectivity of the vertices of the
octahedron. It is the line graph of the tetrahedral graph. The octahedral is
symmetric, so the spring-layout algorithm will be very effective for
display.
PLOTTING: The Octahedral graph should be viewed in 3 dimensions. We choose
to use a planar embedding of the graph. We hope to add rotatable,
3-dimensional viewing in the future. In such a case, a argument will be
added to select the desired layout.
EXAMPLES:
Construct and show an Octahedral graph::
sage: g = graphs.OctahedralGraph()
sage: g.show() # long time
Create several octahedral graphs in a Sage graphics array They will be drawn
differently due to the use of the spring-layout algorithm::
sage: g = []
sage: j = []
sage: for i in range(9):
....: k = graphs.OctahedralGraph()
....: g.append(k)
sage: for i in range(3):
....: n = []
....: for m in range(3):
....: n.append(g[3*i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
"""
adj = {0: [1, 2, 3, 4], 1: [2, 3, 5], 2: [4, 5], 3: [4, 5], 4: [5]}
G = Graph(adj, format='dict_of_lists', name="Octahedron")
G._circle_embedding([0, 1, 2], radius=5, angle=pi/2)
G._circle_embedding([4, 3, 5], radius=1, angle=pi/6)
return G
|
d2abce73e747890f992301bb4814cfa52e55bce4
| 3,642,635
|
def peak_detect(y, delta, x=None):
""" Find local maxima in y.
Args:
y (array): intensity data in which to look for peaks
delta (float): a point is considered a maximum peak if it has the maximal value, and was preceded (to the left) by a value lower by DELTA.
x (array, optional): correspond x-axis
Returns:
tuple containing:
- *array*: indices of peaks / the x-values of peaks if x arg was passed
- *array* : y values of peaks
References:
Converted from MATLAB script at http://billauer.co.il/peakdet.html.
"""
maxtab = []
mintab = []
if x is None:
x = np.arange(len(y))
y = np.asarray(y)
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(y)):
this = y[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return np.array(maxtab)
|
def69cfcea7ccaed931fd764eab88de9fbb8773a
| 3,642,636
|
def quadruplet_fixated_egomotion( filename ):
"""
Given a filename that contains 4 different point-view combos, parse the filename
and return the pair-wise camera pose.
Parameters:
-----------
filename: a filename in the specific format.
Returns:
-----------
egomotion: a numpy array of length 36 (6x6).
(a concatanation of 6 6-DOF relative camera pose vector)
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 4 :
raise ValueError("quadruplet first view prediction with list shorter than 4")
# perm = random.shuffle(range(4))
# full_paths = [full_paths[i] for i in perm]
poses = []
for i in range(3):
for j in range(i+1, 4):
pose = calculate_relative_camera_pose(full_paths[i], full_paths[j])
poses.append(pose)
poses = np.hstack(poses)
return poses
|
5b40e8cfac7362c361ebe9119e5e4753c8e03978
| 3,642,637
|
def moshinsky(state1,state2,stater,statec,state,type):
"""
calculates the moshinsky coefficients used to transform between the two-particle and relative-center of mass frames.
w1 x w2->w
wr x wc->w
type can be either "SU3" or "SO3"
if type=="SU3":
state1,state2,stater,statec,state are simply SU3State class
if type=="SO3" then state1 etc are (n1,L1) where N1=2*n1+L1 and w1=SU3State(N1,0)
state=L
"""
mosh=0
if type=="SU3":
#check SU3 coupling
if u3.mult(state1,state2,state)*u3.mult(stater,statec,state)!=0:
mosh=mosh_SU3(state1,state2,stater,statec,state)
if type=="SO3":
#check that angular momentum coupling is allowed and that N1+N2=Nr+Nc
(n1,L1)=state1
(n2,L2)=state2
(nr,Lr)=stater
(nc,Lc)=statec
L=state
if (so3.mult(L1,L2,L)!=0) and (so3.mult(Lc,Lr,L)!=0) and ((2*n1+L1+2*n2+L2)==(2*nr+Lr+2*nc+Lc)):
mosh=mosh_SO3((n1,L1),(n2,L2),(nr,Lr),(nc,Lc),L)
return mosh
|
ee53cc25f2723b95179e1e190d94536de18f6321
| 3,642,638
|
def test_lie_algebra_nqubits_check():
"""Test that we warn if the system is too big."""
@qml.qnode(qml.device("default.qubit", wires=5))
def circuit():
qml.RY(0.5, wires=0)
return qml.expval(qml.Hamiltonian(coeffs=[-1.0], observables=[qml.PauliX(0)]))
with pytest.warns(UserWarning, match="The exact Riemannian gradient is exponentially"):
LieAlgebraOptimizer(circuit=circuit, stepsize=0.001)
|
b4e296eabab9dc1fd250d391e8c12d2b2f12c59c
| 3,642,639
|
def get_elastic_apartments_not_for_sale():
"""
Get apartments not for sale but with published flags
"""
s_obj = (
ApartmentDocument.search()
.filter("term", publish_on_oikotie=True)
.filter("term", publish_on_etuovi=True)
.filter("term", apartment_state_of_sale__keyword=ApartmentStateOfSale.RESERVED)
)
s_obj.execute()
scan = s_obj.scan()
uuids = []
for hit in scan:
uuids.append(hit.uuid)
return uuids
|
080afdfa61f40f966ad498b3e292224bbfac262d
| 3,642,640
|
def promptYesNoCancel(prompt, prefix=''):
"""Simple Yes/No/Cancel prompt
:param prompt: string, message to the user for selecting a menu entry
:param prefix: string, text to print before the menu entry to format the display
:returns: string, menu entry text
"""
menu = [
{'index': 1, 'text': 'Yes', 'type': 'YES'},
{'index': 2, 'text': 'No', 'type': 'NO'},
{'index': 3, 'text': 'Cancel', 'type': 'EXIT'}
]
return promptSimple(menu, prompt, prefix)
|
bf71ac635ef83ce370350bd352a7e0d619a956c7
| 3,642,641
|
def gradcheck_naive(f, x):
"""
Implements a manual gradient check: this functions is used as a helper function in many places
- f should be a function that takes a single argument and outputs the cost and its gradients
- x is the point (numpy array) to check the gradient at
"""
rndstate = random.getstate()
random.setstate(rndstate)
fx, grad = f(x) # Evaluate function value at original point
h = 1e-4
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# modifying x[ix] with h defined above to compute numerical gradients
# make sure you call random.setstate(rndstate) before calling f(x) each time, this will make it
# possible to test cost functions with built in randomness later
oldvalue = x[ix]
x[ix] = oldvalue + h
random.setstate(rndstate)
fxh, _ = f(x)
x[ix] = oldvalue - h
random.setstate(rndstate)
fxnh, _ = f(x)
numgrad = ((fxh - fxnh) / 2.0) / h
x[ix] = oldvalue
# Compare gradients
reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
if reldiff > 1e-5:
print "Gradient check failed."
print "First gradient error found at index %s" % str(ix)
print "Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)
return False
it.iternext() # Step to next dimension
print "Gradient check passed!"
return True
|
c99f687ae8fad8ea890119e014294879f97fce33
| 3,642,642
|
def cal_mae_loss(logits, gts, reduction):
"""
:param preds: (N,C,H,W) logits predicted by the model.
:param gts: (N,1,H,W) ground truths.
:param reduction: specifies how all element-level loss is handled.
:return: mae loss
"""
probs = logits.sigmoid()
loss = (probs - gts).abs()
return reduce_loss(loss, reduction)
|
6cc813552c46c4c6ea92fb1295478e8770cb255c
| 3,642,643
|
def divideByFirstColumn(matrix):
"""This function devide a matrix by its first column to resolve
wrong intemsity problems"""
result = (matrix.T / matrix.sum(axis=1)).T
return result
|
348bbaa1a3c16a42be90978a0fcc65b1a7daf557
| 3,642,644
|
def loglikehood_coefficient(n_items, X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Parameters
----------
n_items: int
Number of items in the model.
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from scikits.crab.metrics.pairwise import loglikehood_coefficient
>>> X = [['a', 'b', 'c', 'd'], ['e', 'f','g', 'h']]
>>> # distance between rows of X
>>> n_items = 7
>>> loglikehood_coefficient(n_items,X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> n_items = 8
>>> loglikehood_coefficient(n_items, X, [['a', 'b', 'c', 'k']])
array([[ 0.67668852],
[ 0. ]])
References
----------
See http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.5962 and
http://tdunning.blogspot.com/2008/03/surprise-and-coincidence.html.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
def safeLog(d):
if d <= 0.0:
return 0.0
else:
return np.log(d)
def logL(p, k, n):
return k * safeLog(p) + (n - k) * safeLog(1.0 - p)
def twoLogLambda(k1, k2, n1, n2):
p = (k1 + k2) / (n1 + n2)
return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2)
- logL(p, k1, n1) - logL(p, k2, n2))
if X is Y:
X = Y = np.asanyarray(X)
else:
X = np.asanyarray(X)
Y = np.asanyarray(Y)
result = []
# TODO: Check if it is possible to optimize this function
i = 0
for arrayX in X:
result.append([])
for arrayY in Y:
XY = np.intersect1d(arrayX, arrayY)
if XY.size == 0:
result[i].append(0.0)
else:
nX = arrayX.size
nY = arrayY.size
if (nX - XY.size == 0) or (n_items - nY) == 0:
result[i].append(1.0)
else:
logLikelihood = twoLogLambda(float(XY.size),
float(nX - XY.size),
float(nY),
float(n_items - nY))
result[i].append(1.0 - 1.0 / (1.0 + float(logLikelihood)))
result[i] = np.asanyarray(result[i])
i += 1
return np.asanyarray(result)
|
220090c944c9e9c6b97fb8a576aa416ec0493c26
| 3,642,645
|
def get_archive_by_path(db, vault_name, path, retrieve_subpath_archs=False):
"""
Will attempt to find the most recent version of an archive representing a given path.
If retrieve_subpath_archs is True, then will also retrieve latest versions of archives representing
subdirs of the path.
:param path: The path whose contents we want to retrieve, relative to the top_dir that was backed up.
:param retrieve_subpath_archs: If True, will return a list of all of the archives of subdirectories below the `path`
in the directory tree
:return: archive, list
"""
if not retrieve_subpath_archs:
get_most_recent_version_of_archive(db, vault_name, path)
else:
# When trying to find subdirectories, the daft assumption that we make is that the 'path' of the archive will
# start with `path` and be longer than `path`. It'll work for now, but seems inelegant...
path_list = get_list_of_paths_in_vault(db, vault_name)
subdir_list = []
while len(path_list):
cur_path = path_list.pop()
if cur_path.startswith(path) and len(cur_path) >= len(path):
subdir_list.append(cur_path)
arch_list = []
for subdir in subdir_list:
arch = get_most_recent_version_of_archive(db, vault_name, subdir)
if arch: arch_list.append(arch)
return arch_list
|
af30d33e90ef1b509a75f1c96a85b0457d454b71
| 3,642,646
|
import random
def superpixel_colors(
num_pix:int = 1536,
schema:str = 'rgb',
interleave:int = 1,
stroke:str = '',
) -> list:
"""
Generate color (attribute) list for superpixel SVG paths
Parameters
----------
num_pix : int
Number of super pixels to account for (default = 1536)
schema : str
Either of 'rgb' or 'random'
interleave : int
RGB interleave value (default = 1)
stroke : str
String that is inserted into ever attribute at the end, e.g.
to account for a stroke, such as 'stroke="#808080"'. Please
note that the entire tag=value (pairs) must be given!
Returns
-------
colors : list
List of attributes suitable for superpixel_outlines (SVG)
"""
colors = [''] * num_pix
if not schema in ['random', 'rgb']:
raise ValueError('invalid schema requested.')
if schema == 'rgb':
if stroke:
for idx in range(num_pix):
val = interleave * idx
colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}" {3:s}'.format(
val % 256, (val // 256) % 256, (val // 65536) % 256, stroke)
else:
for idx in range(num_pix):
val = interleave * idx
colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}"'.format(
val % 256, (val // 256) % 256, (val // 65536) % 256)
else:
# IMPORT DONE HERE TO SAVE TIME AT MODULE INIT
if stroke:
for idx in range(num_pix):
colors[idx] = 'fill="#{0:06x} {1:s}"'.format(
random.randrange(16777216), stroke)
else:
for idx in range(num_pix):
colors[idx] = 'fill="#{0:06x}"'.format(
random.randrange(16777216))
return colors
|
7a574b48dff30126052c2acd5d06e01a9f4a9af0
| 3,642,647
|
from typing import List
def build_module_op_list(m: tq.QuantumModule, x=None) -> List:
"""
serialize all operations in the module and generate a list with
[{'name': RX, 'has_params': True, 'trainable': True, 'wires': [0],
n_wires: 1, 'params': [array([[0.01]])]}]
so that an identity module can be reconstructed
The module needs to have static support
"""
m.static_off()
m.static_on(wires_per_block=None)
m.is_graph_top = False
# forward to register all modules and parameters
if x is None:
m.forward(q_device=None)
else:
m.forward(q_device=None, x=x)
m.is_graph_top = True
m.graph.build_flat_module_list()
module_list = m.graph.flat_module_list
m.static_off()
op_list = []
for module in module_list:
if module.params is not None:
if module.params.shape[0] > 1:
# more than one param, so it is from classical input with
# batch mode
assert not module.has_params
params = None
else:
# has quantum params, batch has to be 1
params = module.params[0].data.cpu().numpy()
else:
params = None
op_list.append({
'name': module.name.lower(),
'has_params': module.has_params,
'trainable': module.trainable,
'wires': module.wires,
'n_wires': module.n_wires,
'params': params
})
return op_list
|
e14bbfe8122e05d0b65e93246494fa1414bcdf1b
| 3,642,648
|
import argparse
def _get_args():
""" Parses the command line arguments and returns them. """
parser = argparse.ArgumentParser(description=__doc__)
# Argument for the mode of execution (human or random):
parser.add_argument(
"--mode", "-m",
type=str,
default="human",
choices=["human", 'random'],
help="The execution mode for the game.",
)
return parser.parse_args()
|
fdccca9d6ba518d7b3c1732070667b0b82018fc5
| 3,642,649
|
import json
def file_to_dict(filename, data):
"""Converts JSON file to dict
:param filename: filename
:param data: string
:return: dict object
"""
try:
try:
json_data = to_json(data)
return json.loads(json_data)
except Exception as _:
return json.loads(data)
except Exception as error:
logger.error("Failed to parse s3 file {}, error: {}".format(filename, str(error)))
raise ValueError("Unable to load JSON file {} error: {}".format(filename, str(error)))
|
3d6ff02246081dce83097545b5290dbbeedddab7
| 3,642,650
|
def reorganizeArray(id_A, id_B, gids_tuple, unordered_coordinates ):
"""
From a tuple of genome ID's representing a coordinate array's structure (gidI, gidJ).
and the corresponding coordinate np-array (n x 4) of design [[locI1, locJ1, fidI1, fidJ2, Kn, Ks],
...
[locIn, locJn, fidIn, fidJn, Kn, Ks]]
* Both of these are returned by getPointCoords - as "ids" and "coords", respectively.
Restructures to represent an (A, B) order (swaps columns 0,1 and 2,3 if A=J and B=I, else unchanged)
:param id_A: GID of genome to appear first
:param id_B: GID of genome to appear second
:param gids_tuple: Ordered tuple of GIDs in original coordinate array (returned by getPointCoords)
:param unordered_coordinates: coordinate (ordered or unordered) to map order to (returned by getPointCoords)
:return: coordinate object with [A_loc, B_loc, A_fid, B_fid] order enforced.
"""
reorg_start = datetime.now()
if id_A == gids_tuple[0] and id_B == gids_tuple[1]:
ordered_coordinates = unordered_coordinates
elif id_A == gids_tuple[1] and id_B == gids_tuple[0]:
ordered_coordinates = unordered_coordinates[:,[1,0,3,2,4,5]]
else:
ordered_coordinates = None
print "ERROR: reorganizeArray (%s, %s)" % (str(id_A), str(id_B))
exit()
reorg_end = datetime.now()
print("Coordinates Reorganization Complete (%s)" % str(reorg_end-reorg_start))
print("--> (%s,%s) to (%s,%s)" % (gids_tuple[0], gids_tuple[1], id_A, id_B))
return ordered_coordinates
|
c4ac1add615f21e4238ad3cd65e1fb9797c02b27
| 3,642,651
|
def compile_model_output(i,j,files,model):
""" compiles the model variables over severl files into a single array at a j,i grid point.
Model can be "Operational", "Operational_old", "GEM".
returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity.
"""
wind=[]; direc=[]; t=[]; pr=[]; sol=[]; the=[]; pre=[]; tem=[]; qr=[];
for f in files:
G = nc.Dataset(f)
u = G.variables['u_wind'][:,j,i]; v=G.variables['v_wind'][:,j,i];
pr.append(G.variables['atmpres'][:,j,i]); sol.append(G.variables['solar'][:,j,i]);
qr.append(G.variables['qair'][:,j,i]); the.append(G.variables['therm_rad'][:,j,i]);
pre.append(G.variables['precip'][:,j,i]);
tem.append(G.variables['tair'][:,j,i])
speed = np.sqrt(u**2 + v**2)
wind.append(speed)
d = np.arctan2(v, u)
d = np.rad2deg(d + (d<0)*2*np.pi);
direc.append(d)
ts=G.variables['time_counter']
if model =='GEM':
torig = nc_tools.time_origin(G)
elif model =='Operational' or model=='Operational_old':
torig = datetime.datetime(1970,1,1) #there is no time_origin attriubte in OP files, so I hard coded this
for ind in np.arange(ts.shape[0]):
t.append((torig + datetime.timedelta(seconds=ts[ind])).datetime)
wind = np.array(wind).reshape(len(filesGEM)*24,)
direc = np.array(direc,'double').reshape(len(filesGEM)*24,)
t = np.array(t).reshape(len(filesGEM)*24,)
pr = np.array(pr).reshape(len(filesGEM)*24,)
tem = np.array(tem).reshape(len(filesGEM)*24,)
sol = np.array(sol).reshape(len(filesGEM)*24,)
the = np.array(the).reshape(len(filesGEM)*24,)
qr = np.array(qr).reshape(len(filesGEM)*24,)
pre = np.array(pre).reshape(len(filesGEM)*24,)
return wind, direc, t, pr, tem, sol, the, qr, pre
|
d42b3b8b4222b1ac15d1f8d7643c994efec0bb5e
| 3,642,652
|
def read_audio(path, Fs=None, mono=False):
"""Read an audio file into a np.ndarray.
Args:
path (str): Path to audio file
Fs (scalar): Resample audio to given sampling rate. Use native sampling rate if None. (Default value = None)
mono (bool): Convert multi-channel file to mono. (Default value = False)
Returns:
x (np.ndarray): Waveform signal
Fs (scalar): Sampling rate
"""
return librosa.load(path, sr=Fs, mono=mono)
|
6b3f88ae00b1d9dab8016b33cc5a2d7c58d5b87e
| 3,642,653
|
import os
import re
import sys
def energies_from_mbe_log(filename):
"""Monomer dimer energies from log file."""
monomers, dimers, trimers, tetramers = {}, {}, {}, {}
hf, os_, ss_ = True, False, False
mons, dims, tris, tets = True, False, False, False
energies = False
def storeEnergy(dict_, key, energy):
"""Store energy in given dict depending on whether HF, OS or SS."""
energy = float(energy)
if hf:
dict_[key] = {'hf': energy, 'os': np.nan, 'ss': np.nan}
elif os_:
dict_[key]['os'] = energy
elif ss_:
dict_[key]['ss'] = energy
return dict_
dir, File = os.path.split(filename)
dir = dir or "."
lines = eof(dir+'/', File, 0.15)
for line in lines:
if not line.strip():
continue
elif '-----ENERGIES OF MONOMERS------' in line:
energies = True
tets = False
tris = False
dims = False
mons = True
elif not energies:
continue
elif 'Final E(HF) =' in line:
break
elif 'DIMER ENERGY CORRECTION' in line:
dims = True
mons = False
elif 'TRIMER ENERGY CORRECTION' in line:
dims = False
tris = True
elif 'TETRAMER ENERGY CORRECTION' in line:
tris = False
tets = True
elif 'RI-MP2 OS energies***' in line:
ss_ = False
os_ = True
hf = False
elif 'RI-MP2 SS energies***' in line:
ss_ = True
os_ = False
hf = False
elif 'ID' in line:
if 'RIJ' in line:
rij = True
else:
rij = False
# ENERGIES
else:
# IF ENERGIES IN LINE
if re.search('^[0-9]', line) or line.startswith('('):
if mons:
if rij:
spl_line = line.split()
if len(spl_line) == 3:
id, e, rij = spl_line
elif len(spl_line) == 2:
id, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
sys.exit("Unexpected number of items in split line")
else:
id, e = line.split()
monomers = storeEnergy(monomers, id, e)
elif dims:
if rij:
spl_line = line.split()
if len(spl_line) == 4:
id1, id2, e, rij = spl_line
elif len(spl_line) == 3:
id1, id2, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
sys.exit("Unexpected number of items in split line")
else:
id1, id2, e = line.split()
key = keyName(id1, id2)
dimers = storeEnergy(dimers, key, e)
elif tris:
if rij:
spl_line = line.split()
if len(spl_line) == 5:
id1, id2, id3, e, rij = spl_line
elif len(spl_line) == 4:
id1, id2, id3, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
id1, id2, id3, e = line.split()
key = keyName(id1, id2, id3)
trimers = storeEnergy(trimers, key, e)
elif tets:
if rij:
if len(spl_line) == 5:
id1, id2, id3, id4, e, rij = spl_line
elif len(spl_line) == 4:
id1, id2, id3, id4, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
id1, id2, id3, id4, e = line.split()
key = keyName(id1, id2, id3, id4)
tetramers = storeEnergy(tetramers, key, e)
return monomers, dimers, trimers, tetramers
|
ef7fdae5580a490423b42714d6f1b5c3a8485b09
| 3,642,654
|
from numpy import array
def build_varied_y_node_mesh(osi, xs, ys, zs=None, active=None):
"""
Creates an array of nodes that in vertical lines, but vary in height
The mesh has len(xs)=ln(ys) nodes in the x-direction and len(ys[0]) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
if not hasattr(zs, '__len__'):
zs = [zs]
sn = []
for xx in range(len(xs)):
sn.append([])
for yy in range(len(ys[xx])):
if len(zs) == 1:
if active is None or active[xx][yy]:
if osi.ndm == 2:
pms = [osi, xs[xx], ys[xx][yy]]
else:
pms = [osi, xs[xx], ys[xx][yy], zs[0]]
sn[xx].append(Node(*pms))
else:
sn[xx].append(None)
else:
sn[xx].append([])
for zz in range(len(zs)):
# Establish left and right nodes
if active is None or active[xx][yy][zz]:
sn[xx][yy].append(Node(osi, xs[xx], ys[xx][yy], zs[zz]))
else:
sn[xx][yy].append(None)
# if len(zs) == 1:
# return sn[0]
return array(sn)
|
878386f1b815840d198de106f57e11883aec3d1c
| 3,642,655
|
def self_quarantine_end_10():
"""
Real Name: b'self quarantine end 10'
Original Eqn: b'50'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 50
|
bb5fa131866d337460a37237b711d8c21588250d
| 3,642,656
|
import analysis as an
import phasecurves as pc
from astropy.io import fits as pyfits
import pyfits
def spexsxd_scatter_model(dat, halfwid=48, xlims=[470, 1024], ylims=[800, 1024], full_output=False, itime=None):
"""Model the scattered light seen in SpeX/SXD K-band frames.
:INPUTS:
dat : str or numpy array
filename of raw SXD frame to be corrected, or a Numpy array
containing its data.
:OPTIONS:
halfwid : int
half-width of the spectral orders. Experience shows this is
approximately 48 pixels. This value is not fit!
xlims : list of length 2
minimum and maximum x-pixel values to use in the fitting
ylims : list of length 2
minimum and maximum y-pixel values to use in the fitting
full_output : bool
whether to output only model, or the tuple (model, fits, chisq, nbad)
itime : float
integration time, in seconds, with which to scale the initial
guesses
:OUTPUT:
scatter_model : numpy array
Model of the scattered light component, for subtraction or saving.
OR:
scatter_model, fits, chis, nbad
:REQUIREMENTS:
:doc:`pyfits`, :doc:`numpy`, :doc:`fit_atmo`, :doc:`analysis`, :doc:`phasecurves`
:TO_DO_LIST:
I could stand to be more clever in modeling the scattered light
components -- perhaps fitting for the width, or at least
allowing the width to be non-integer.
"""
# 2011-11-10 11:10 IJMC: Created
try:
except:
############################################################
# Define some helper functions:
############################################################
def tophat(param, x):
"""Grey-pixel tophat function with set width
param: [cen_pix, amplitude, background]
x : must be array of ints, arange(0, size-1)
returns the model."""
# 2011-11-09 21:37 IJMC: Created
intpix, fracpix = int(param[0]), param[0] % 1
th = param[1] * ((-halfwid <= (x - intpix)) * ((x - intpix) < halfwid))
# th = * th.astype(float)
if (intpix >= halfwid) and ((intpix - halfwid) < x.size):
th[intpix - halfwid] = param[1]*(1. - fracpix)
if (intpix < (x.size - halfwid)) and ((intpix + halfwid) >= 0):
th[intpix + halfwid] = param[1]*fracpix
return th + param[2]
def tophat2g(param, x, p0prior=None):
"""Grey-pixel double-tophat plus gaussian
param: [cen_pix1, amplitude1, cen_pix2, amplitude2, g_area, g_sigma, g_center, background]
x : must be ints, arange(0, size-1)
returns the model.""" # 2011-11-09 21:37 IJMC: Created
#th12 =
#th2 =
#gauss =
# if p0prior is not None:
# penalty =
return tophat([param[0], param[1], 0], x) + \
tophat([param[2], param[3], 0], x) + \
gaussian(param[4:7], x) + param[7]
############################################################
# Parse inputs
############################################################
halfwid = int(halfwid)
if isinstance(dat, np.ndarray):
if itime is None:
itime = 1.
else:
if itime is None:
try:
itime = pyfits.getval(dat, 'ITIME')
except:
itime = 1.
dat = pyfits.getdata(dat)
nx, ny = dat.shape
scatter_model = np.zeros((nx, ny), dtype=float)
chis, fits, nbad = [], [], []
iivals = np.arange(xlims[1]-1, xlims[0], -1, dtype=int)
position_offset = 850 - ylims[0]
est_coefs = np.array([ -5.02509772e-05, 2.97212397e-01, -7.65702234e+01])
estimated_position = np.polyval(est_coefs, iivals) + position_offset
estimated_error = 0.5
# to hold scattered light position fixed, rather than fitting for
# that position, uncomment the following line:
#holdfixed = [0]
holdfixed = None
############################################################
# Start fitting
############################################################
for jj, ii in enumerate(iivals):
col = dat[ylims[0]:ylims[1], ii]
ecol = np.ones(col.size, dtype=float)
x = np.arange(col.size, dtype=float)
if len(fits)==0:
all_guess = [175 + position_offset, 7*itime, \
70 + position_offset, 7*itime, \
250*itime, 5, 89 + position_offset, 50]
else:
all_guess = fits[-1]
all_guess[0] = estimated_position[jj]
model_all = tophat2g(all_guess, x)
res = (model_all - col)
badpix = np.abs(res) > (4*an.stdr(res, nsigma=4))
ecol[badpix] += 1e9
fit = an.fmin(pc.errfunc, all_guess, args=(tophat2g, x, col, 1./ecol**2), full_output=True, maxiter=1e4, maxfun=1e4, disp=False, kw=dict(testfinite=False), holdfixed=holdfixed)
best_params = fit[0].copy()
res = tophat2g(best_params, x) - col
badpix = np.abs(res) > (4*an.stdr(res, nsigma=4))
badpix[((np.abs(np.abs(x - best_params[0]) - 48.)) < 2) + \
((np.abs(np.abs(x - best_params[2]) - 48.)) < 2)] = False
badpix += (np.abs(res) > (20*an.stdr(res, nsigma=4)))
ecol = np.ones(col.size, dtype=float)
ecol[badpix] += 1e9
best_chisq = pc.errfunc(best_params, tophat2g, x, col, 1./ecol**2)
# Make sure you didn't converge on the wrong model:
for this_offset in ([-2, 0, 2]):
this_guess = fit[0].copy()
this_guess[2] += this_offset
this_guess[0] = estimated_position[jj]
#pc.errfunc(this_guess, tophat2g, x, col, 1./ecol**2)
this_fit = an.fmin(pc.errfunc, this_guess, args=(tophat2g, x, col, 1./ecol**2), full_output=True, maxiter=1e4, maxfun=1e4, disp=False, kw=dict(testfinite=False), holdfixed=holdfixed)
#print this_offset1, this_offset2, this_fit[1]
if this_fit[1] < best_chisq:
best_chisq = this_fit[1]
best_params = this_fit[0].copy()
fits.append(best_params)
chis.append(best_chisq)
nbad.append(badpix.sum())
mod2 = tophat2g(best_params, x)
scatter_model[ylims[0]:ylims[1], ii] = tophat(list(best_params[0:2])+[0], x)
if full_output:
return scatter_model, fits, chis, nbad
else:
return scatter_model
|
e4653a62f7d98a253034fd2908d4231ca23cff8b
| 3,642,657
|
def newff(minmax, size, transf=None):
"""
Create multilayer perceptron
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
size: the length of list equal to the number of layers except input layer,
the element of the list is the neuron number for corresponding layer
Contains the number of neurons for each layer
transf: list (default TanSig)
List of activation function for each layer
:Returns:
net: Net
:Example:
>>> # create neural net with 2 inputs
>>> # input range for each input is [-0.5, 0.5]
>>> # 3 neurons for hidden layer, 1 neuron for output
>>> # 2 layers including hidden layer and output layer
>>> net = newff([[-0.5, 0.5], [-0.5, 0.5]], [3, 1])
>>> net.ci
2
>>> net.co
1
>>> len(net.layers)
2
"""
net_ci = len(minmax)
net_co = size[-1]
if transf is None:
transf = [trans.TanSig()] * len(size)
assert len(transf) == len(size)
layers = []
for i, nn in enumerate(size):
layer_ci = size[i - 1] if i > 0 else net_ci
l = layer.Perceptron(layer_ci, nn, transf[i])
l.initf = init.initnw
layers.append(l)
connect = [[i - 1] for i in range(len(layers) + 1)]
net = Net(minmax, net_co, layers, connect, train.train_bfgs, error.SSE())
return net
|
b488298fea98877cd7097374576722ea4b24e9c3
| 3,642,658
|
import importlib
def write(*args, package="gw", file_format="dat", **kwargs):
"""Read in a results file.
Parameters
----------
args: tuple
all args are passed to write function
package: str
the package you wish to use
file_format: str
the file format you wish to use. Default None. If None, the read
function loops through all possible options
kwargs: dict
all kwargs passed to write function
"""
def _import(package, file_format):
"""Import format module with importlib
"""
return importlib.import_module(
"pesummary.{}.file.formats.{}".format(package, file_format)
)
def _write(module, file_format, args, kwargs):
"""Execute the write method
"""
return getattr(module, "write_{}".format(file_format))(*args, **kwargs)
if file_format == "h5":
file_format = "hdf5"
try:
module = _import(package, file_format)
return _write(module, file_format, args, kwargs)
except (ImportError, AttributeError, ModuleNotFoundError):
module = _import("core", file_format)
return _write(module, file_format, args, kwargs)
|
b7246e035f13b60fc8047abd768fae4bb1937600
| 3,642,659
|
def photo(el, dict_class, img_with_alt, base_url=''):
"""Find an implied photo property
Args:
el (bs4.element.Tag): a DOM element
dict_class: a python class used as a dictionary (set by the Parser object)
img_with_alt: a flag to enable experimental parsing of alt attribute with img (set by the Parser object)
base_url (string): the base URL to use, to reconcile relative URLs
Returns:
string or dictionary: the implied photo value or implied photo as a dictionary with alt value
"""
def get_photo_child(children):
"take a list of children and finds a valid child for photo property"
# if element has one image child use source if exists and img is
# not root class
poss_imgs = [c for c in children if c.name == 'img']
if len(poss_imgs) == 1:
poss_img = poss_imgs[0]
if not mf2_classes.root(poss_img.get('class', [])):
return poss_img
# if element has one object child use data if exists and object is
# not root class
poss_objs = [c for c in children if c.name == 'object']
if len(poss_objs) == 1:
poss_obj = poss_objs[0]
if not mf2_classes.root(poss_obj.get('class', [])):
return poss_obj
# if element is an img use source if exists
prop_value = get_img_src_alt(el, dict_class, img_with_alt, base_url)
if prop_value is not None:
return prop_value
# if element is an object use data if exists
prop_value = get_attr(el, "data", check_name="object")
if prop_value is not None:
return text_type(prop_value)
# find candidate child or grandchild
poss_child = None
children = list(get_children(el))
poss_child = get_photo_child(children)
# if no possible child found then look for grandchild if only one child which is not not mf2 root
if poss_child is None and len(children) == 1 and not mf2_classes.root(children[0].get('class', [])):
grandchildren = list(get_children(children[0]))
poss_child = get_photo_child(grandchildren)
# if a possible child was found parse
if poss_child is not None:
# img get src
prop_value = get_img_src_alt(poss_child, dict_class, img_with_alt, base_url)
if prop_value is not None:
return prop_value
# object get data
prop_value = get_attr(poss_child, "data", check_name="object")
if prop_value is not None:
return text_type(prop_value)
|
304fea5700a8fcc2b95636265dbbc9c5c6dd1635
| 3,642,660
|
def calc_TOF(t_pulse, t_signal):
"""Calculate TOF from pulse and signal time arrays."""
tof = []
idxs = [-1]
dbls = []
for t in t_signal:
idx = bisect_left(t_pulse, t)
if idx == len(t_pulse):
t_0 = t_pulse[-1]
else:
t_0 = t_pulse[idx - 1]
if idx == idxs[-1]:
dbls[-1] = 1
dbls.append(1)
else:
dbls.append(0)
idxs.append(idx)
tof.append((t - t_0) / 1e6) # convert to ps to us
return dbls, idxs, tof
|
b58b14396c85f297e6454195fba597635b5fb54d
| 3,642,661
|
def get_nodes_ips(node_subnets):
"""Get the IPs of the trunk ports associated to the deployment."""
trunk_ips = []
os_net = clients.get_network_client()
tags = CONF.neutron_defaults.resource_tags
if tags:
ports = os_net.ports(status='ACTIVE', tags=tags)
else:
# NOTE(ltomasbo: if tags are not used, assume all the trunk ports are
# part of the kuryr deployment
ports = os_net.ports(status='ACTIVE')
for port in ports:
if (port.trunk_details and port.fixed_ips and
port.fixed_ips[0]['subnet_id'] in node_subnets):
trunk_ips.append(port.fixed_ips[0]['ip_address'])
return trunk_ips
|
5472d184a4483355c81679ec6df41bc16ca7b32e
| 3,642,662
|
def get_visualizations_info(exp_id, state_name, interaction_id):
"""Returns a list of visualization info. Each item in the list is a dict
with keys 'data' and 'options'.
Args:
exp_id: str. The ID of the exploration.
state_name: str. Name of the state.
interaction_id: str. The interaction type.
Returns:
list(dict). Each item in the list is a dict with keys representing
- 'id': str. The visualization ID.
- 'data': list(dict). A list of answer/frequency dicts.
- 'options': dict. The visualization options.
An example of the returned value may be:
[{'options': {'y_axis_label': 'Count', 'x_axis_label': 'Answer'},
'id': 'BarChart',
'data': [{u'frequency': 1, u'answer': 0}]}]
"""
if interaction_id is None:
return []
visualizations = interaction_registry.Registry.get_interaction_by_id(
interaction_id).answer_visualizations
calculation_ids = set([
visualization.calculation_id for visualization in visualizations])
calculation_ids_to_outputs = {}
for calculation_id in calculation_ids:
# Don't show top unresolved answers calculation ouutput in stats of
# exploration.
if calculation_id == 'TopNUnresolvedAnswersByFrequency':
continue
# This is None if the calculation job has not yet been run for this
# state.
calc_output_domain_object = _get_calc_output(
exp_id, state_name, calculation_id)
# If the calculation job has not yet been run for this state, we simply
# exclude the corresponding visualization results.
if calc_output_domain_object is None:
continue
# If the output was associated with a different interaction ID, skip the
# results. This filtering step is needed since the same calculation_id
# can be shared across multiple interaction types.
if calc_output_domain_object.interaction_id != interaction_id:
continue
calculation_ids_to_outputs[calculation_id] = (
calc_output_domain_object.calculation_output.to_raw_type())
return [{
'id': visualization.id,
'data': calculation_ids_to_outputs[visualization.calculation_id],
'options': visualization.options,
'addressed_info_is_supported': (
visualization.addressed_info_is_supported),
} for visualization in visualizations
if visualization.calculation_id in calculation_ids_to_outputs]
|
6ce3875be2244bcb3564d1ca006db62f56883a7f
| 3,642,663
|
def push(array, *items):
"""Push items onto the end of `array` and return modified `array`.
Args:
array (list): List to push to.
items (mixed): Items to append.
Returns:
list: Modified `array`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3]
>>> push(array, 4, 5, [6])
[1, 2, 3, 4, 5, [6]]
See Also:
- :func:`push` (main definition)
- :func:`append` (alias)
.. versionadded:: 2.2.0
"""
pyd.each(items, array.append)
return array
|
2c43c6b4c5691d7f40601cdb7747ecf6e063f778
| 3,642,664
|
import csv
def compute_list(commandline_argument):
"""
Returns a list of booking or revenue data opening booking data file with
first parameter
"""
# utf-8_sig
# Open booking CSV and read everything into memory
with open(commandline_argument, "r", encoding="shift_jis") as database:
data = csv.reader(database)
next(data)
list_data = list(data)
return list_data
|
115725f5ab35c04412a2fe4f982a72c6b2e4c297
| 3,642,665
|
import re
def calculate_saving(deal, item_prices):
"""
Parse the deal string and calculate how much money is saved
when this deal gets applied. Also returns deal requirement.
Args:
deal (str): deal information
item_prices (dict): {item: price}
Returns:
requirements (collections.Counter): items and quantity required to complete deal
eg. {'F': 3}
saving (int): total saving this deal gives
cost (int): cost of deal
"""
free_re = re.search(r'(\w+) get one ([^\n]+) free', deal)
if free_re:
# saving is value of free item
saving = item_prices[free_re.group(2)]
requirements = aggregate_requirements(free_re.groups())
quantity, item = parse_deal_code(free_re.group(1))
cost = get_cost(item_prices, item, quantity)
else:
# assuming for now that all other deals are just x-for
# saving is difference between deal price and quantity * base price
[(deal_code_quantity, deal_price)] = re.findall(r'(\w+) for (\w+)', deal)
deal_quantity, deal_item = parse_deal_code(deal_code_quantity)
saving = (deal_quantity * item_prices[deal_item]) - int(deal_price)
requirements = aggregate_requirements([deal_code_quantity])
cost = int(deal_price)
return requirements, saving, cost
|
b67e5c53551866a8b390eb49fa4bc29ff9d3261a
| 3,642,666
|
def fetchone_from_table(database, table, values_dict, returning):
"""
Constructs a generic fetchone database command from a generic table with provided table_column:value_dictionary mapping.
Mostly used for other helper functions.
:param database: Current active database connection.
:param table: Table to insert mapping into.
:param values_dict: A dictionary of table_column:value to ingest into the database.
:param returning: A single column, or list of columns, you want returned.
:return: The row in the database filtered on the column(s) defined.
"""
columns = list(values_dict.keys())
if type(returning) is not list and type(returning) is not tuple:
returning = [returning]
db = database.select(*returning).FROM(table).WHERE(Eq(columns[0], values_dict[columns[0]]))
for column in columns[1:]:
if values_dict[column] is not None:
db = db.AND(Eq(column, values_dict[column]))
else:
db = db.AND(IsNull(column))
return db.fetchone()
|
8b757bb5e9e7ba50a335a4917630543ef6989714
| 3,642,667
|
import os
def recover_buildpack(app_folder):
"""
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
"""
filepath = os.path.join(app_folder, '.buildpack')
with open(filepath) as f:
buildpack_picked = f.read()
buildpack_picked = buildpack_picked.lstrip('/')
buildpack_picked = buildpack_picked.rstrip('\n')
buildpack_picked = os.path.join(os.getcwd(), buildpack_picked)
return BuildPack(buildpack_picked)
|
e82f70b06caee2d275820136e58c2b5b5860f5d5
| 3,642,668
|
def ParseMultiCpuMask(cpu_mask):
"""Parse a multiple CPU mask definition and return the list of CPU IDs.
CPU mask format: colon-separated list of comma-separated list of CPU IDs
or dash-separated ID ranges, with optional "all" as CPU value
Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
@type cpu_mask: str
@param cpu_mask: multiple CPU mask definition
@rtype: list of lists of int
@return: list of lists of CPU IDs
"""
if not cpu_mask:
return []
cpu_list = []
for range_def in cpu_mask.split(constants.CPU_PINNING_SEP):
if range_def == constants.CPU_PINNING_ALL:
cpu_list.append([constants.CPU_PINNING_ALL_VAL, ])
else:
# Uniquify and sort the list before adding
cpu_list.append(sorted(set(ParseCpuMask(range_def))))
return cpu_list
|
c114d931567ba3fdfc50b3eb7cd6cc7764bdd1d6
| 3,642,669
|
def _call(arg):
"""Shortcut for comparing call objects
"""
return _Call(((arg, ), ))
|
d7ec1e0c29b52f7d77daec3d2fa7c5f8ba6ea1f8
| 3,642,670
|
from typing import List
def gather_directives(
type_object: GraphQLNamedType,
) -> List[DirectiveNode]:
"""Get all directive attached to a type."""
directives: List[DirectiveNode] = []
if hasattr(type_object, "extension_ast_nodes"):
if type_object.extension_ast_nodes:
for ast_node in type_object.extension_ast_nodes:
if ast_node.directives:
directives.extend(ast_node.directives)
if hasattr(type_object, "ast_node"):
if type_object.ast_node and type_object.ast_node.directives:
directives.extend(type_object.ast_node.directives)
return directives
|
211108bd35940e412e68e0b092ada050ce745c8e
| 3,642,671
|
def get_request(language=None):
"""
Returns a Request instance populated with cms specific attributes.
"""
request_factory = RequestFactory()
request = request_factory.get("/")
request.session = {}
request.LANGUAGE_CODE = language or settings.LANGUAGE_CODE
request.current_page = None
request.user = AnonymousUser()
return request
|
1ef86666693118ccd36f8d9818e70566e895cb13
| 3,642,672
|
from typing import cast
def validate_hash(value: str) -> bool:
"""
Validates a hash value.
"""
return cast(bool, HASH_RE.match(value))
|
c273e5287e2e0448c2e58173813d725f61f3210a
| 3,642,673
|
def string_to_digit(string, output):
"""Convert string to float/int if possible (designed to extract number
from a price sting, e.g. 250 EUR -> 250)
:argument string: string to convert
:type string: str
:argument output: output type
:type output: type
:returns float/int or None
"""
string = strip_space(string)
if not string[0].isdigit() and not string[1].isdigit():
return None
string_items = []
for index, item in enumerate(string):
if item.isdigit():
string_items.append(item)
else:
if item == ',':
string_items.append('.')
elif item == ' ' and string[index + 1].isdigit():
pass
elif not item.isdigit() and not string[index + 1].isdigit():
break
if '.' in string_items and output == int:
return int(float(''.join(string_items)))
return output(''.join(string_items))
|
dc83485e7ffa4548aee019b67f23ba6db385421d
| 3,642,674
|
def calc_B_phasors(current, xp, yp, cable_array):
"""It calculates the phasors of the x and y components of the
magnetic induction field B in a given point for a given cable.
Given the input, the function rectifies the current phase
extracting respectively the real and imaginary part of it.
Then, both real and imaginary part of x and y components are
multiplied by a transfer function (dependent on the spatial
disposition of the cable in respect to the point of interest)
resulting in the magnetic inductin B phasor components of a
single cable.
Parameters
-------------------
current : int
Current (A) circulating inside the considered power line
(composed of a triad of cables)
xp, yp : float
Abscissa (m) and ordinate (m) of the point of interest where
the magnetic induction field B will be calculated at last
cable_array : numpy.ndarray
First column - Current phase belonging to the n-th cable under consideration
Second and third columns - Abscissa and ordinate of the n-th cable under consideration
Returns
-------------------
B_phasors_n : numpy.ndarray
Respectively the real and imaginary part (columns) of the
x and y components (rows) of the magnetic induction field B
produced by a single cable in a given point
Notes
-------------------
The current function implements the calculations present both in
[1]_"Norma Italiana CEI 106-11" formulas (5) and [2]_"Norma Italiana
CEI 211-4" formulas (16).
References
-------------------
..[1] Norma Italiana CEI 106-11, "Guide for the determination of
the respect widths for power lines and substations according to
DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables",
first edition, 2006-02.
..[2] Norma Italiana CEI 211-4, "Guide to calculation methods of
electric and magnetic fields generated by power-lines and electrical
substations", second edition, 2008-09.
"""
ph_n_rad = radians(cable_array[0])
I_complex = rect(current, ph_n_rad)
I_components = np.array([I_complex.real, I_complex.imag])
coef = (MU_ZERO / (2*PI)) / ((xp - cable_array[1])**2 + (yp - cable_array[2])**2)
transfer_fn_n = np.array([(cable_array[2] - yp) * coef, (xp - cable_array[1]) * coef]).reshape(2, 1)
B_phasors_n = I_components * transfer_fn_n
return B_phasors_n
|
66f696121f3a26a99e4d662b3680d21199bedcc1
| 3,642,675
|
def get_shortest_text_value(entry):
"""Given a JSON-LD entry, returns the text attribute that has the
shortest length.
Parameters
----------
entry: dict
A JSON-LD entry parsed into a nested python directionary via the json
module
Returns
-------
short_text: str
Of the text values, the shortest one
"""
text_attr = 'http://www.ontologyrepository.com/CommonCoreOntologies/has_text_value'
if text_attr in entry:
text_values = entry[text_attr]
text_values = [i['@value'] for i in text_values]
return get_shortest_string(text_values)
else:
return None
|
c63511a830f56c610230e0c7e208a926b0304b3c
| 3,642,676
|
from datetime import datetime
def generate_token(user_id, expires_in=3600):
"""Generate a JWT token.
:param user_id the user that will own the token
:param expires_on expiration time in seconds
"""
secret_key = current_app.config['JWT_SECRET_KEY']
return jwt.encode(
{'user_id': user_id,
'exp': datetime.utcnow() + timedelta(seconds=expires_in)},
secret_key, algorithm='HS256').decode('utf-8')
|
62a92d7903e941446dbef3097b12ba2adf9a1fd5
| 3,642,677
|
import codecs
def debom(s):
"""
此函数是去除字符串中bom字符,
由于此字符出现再文件的头位置
所以对csv 的header造成的影响, 甚至乱码
通过此函数可以避免这种情况
"""
boms = [ k for k in dir(codecs) if k.startswith('BOM') ]
for bom in boms:
s = s.replace(getattr(codecs, bom), '')
return s
|
4ac056a8ba93f00a0a31e3a447e62810c5b68687
| 3,642,678
|
def rolling_window(array, window):
"""
apply a rolling window to a np.ndarray
:param array: (np.ndarray) the input Array
:param window: (int) length of the rolling window
:return: (np.ndarray) rolling window on the input array
"""
shape = array.shape[:-1] + (array.shape[-1] - window + 1, window)
strides = array.strides + (array.strides[-1],)
return np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
|
cde677c20c6a4d5f096d2f36bae2fc1087b73ccf
| 3,642,679
|
def calculate_timezone_distance_matrix(df):
"""
Calculate timezone distance matrix from a given dataframe
"""
n_users = len(df)
timezone_df = df[['idx', 'timezone', 'second_timezone']]
timezone_df.loc[:, 'timezone'] = timezone_df.timezone.map(
lambda t: remove_text_parentheses(t).split(' ')[-1]
)
timezone_df.loc[:, 'second_timezone'] = timezone_df.second_timezone.map(
lambda t: remove_text_parentheses(t).split(' ')[-1].replace('me', ' ')
)
timezone_list = timezone_df.to_dict(orient='records')
D_tz = np.zeros((n_users, n_users))
for d1, d2 in product(timezone_list, timezone_list):
idx1, idx2, tz_dist = compute_tz_distance_dict(d1, d2)
D_tz[idx1, idx2] = tz_dist
return D_tz
|
d788ab8758d0300516ecde2749cd438597775440
| 3,642,680
|
import platform
def pyxstyle_path(x, venv_dir="venv"):
"""Calculate the path to py{x}style in the venv directory relative to project root."""
extension = ".exe" if platform.system() == "Windows" else ""
bin_dir = "Scripts" if platform.system() == "Windows" else "bin"
return [str(path_here / f"{venv_dir}/{bin_dir}/py{x}style{extension}")]
|
2eaf3686f20db6ca26b55dc5cf065e926d6fd5ef
| 3,642,681
|
def update_tc_junit_resultfile(tc_junit_obj, kw_junit_list, tc_timestamp):
"""loop through kw_junit object and attach keyword result to testcase
Arguments:
1. tc_junit_obj = target testcase
2. kw_junit_list = list of keyword junit objects
3. tc_timestamp = target testcase timestamp
"""
for master_tc in tc_junit_obj.root.iter('testcase'):
# make sure we are modifying the correct testcase
if master_tc.get('timestamp') == tc_timestamp:
for kw_junit_obj in kw_junit_list:
for tc_part in kw_junit_obj.root.iter('testcase'):
# make sure we are obtaining only the wanted keywords
if tc_part.get('timestamp') == tc_timestamp:
# add keyword element to testcase, add property result
# to properties, update count
for result in tc_part.find('properties').iter('property'):
if result.get('type') == "keyword":
master_tc.find('properties').append(result)
master_tc.attrib = update_attribute(master_tc.attrib, tc_part.attrib)
return tc_junit_obj
|
97c85b6b197a5e325c51bcb70d0f7e173f1feb1d
| 3,642,682
|
from OpenGL.error import GLError
def checkFramebufferStatus():
"""Utility method to check status and raise errors"""
status = glCheckFramebufferStatus( GL_FRAMEBUFFER )
if status == GL_FRAMEBUFFER_COMPLETE:
return True
description = None
for error_constant in [
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS,
GL_FRAMEBUFFER_INCOMPLETE_FORMATS,
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER,
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER,
GL_FRAMEBUFFER_UNSUPPORTED,
]:
if status == error_constant:
status = error_constant
description = str(status)
raise GLError(
err=status,
result=status,
baseOperation=glCheckFramebufferStatus,
description=description,
)
|
9a603cde0b4f9cda340f969eb8b8137cbf34fede
| 3,642,683
|
def load_sentences(filename):
"""give us a list of sentences where each sentence is a list of tokens.
Assumes the input file is one sentence per line, pre-tokenized."""
out = []
with open(filename) as infile:
for line in infile:
line = line.strip()
tokens = line.split()
out.append(tokens)
return out
|
6a4c458f9a0d9b17eaa38c38570dacc4c40e86c0
| 3,642,684
|
def imageSequenceRepr(files, strFormat='{pre}[{firstNum}:{lastNum}]{post}', forceRepr=False):
""" Takes a list of files and creates a string that represents the sequence.
Args:
files (list): A list of files in the image sequence.
format (str): Used to format the output. Uses str.format() command and requires the
keys [pre, firstNum, lastNum, post]. Defaults to '{pre}[{firstNum}:{lastNum}]{post}'
forceRepr (bool): If False and a single frame is provided, it will return just that frame.
If True and a single frame is provided, it will return a repr with that frame as the
firstNum and lastNum value. False by default.
Returns:
str: A string representation of the Image Sequence.
"""
if len(files) > 1 or (forceRepr and files):
match = imageSequenceInfo(files[0])
if match:
info = {}
for f in files:
frame = imageSequenceInfo(f)
if frame and frame.group('frame'):
frame = frame.group('frame')
info.update({int(frame):frame})
if info:
keys = sorted(info.keys())
low = info[keys[0]]
high = info[keys[-1]]
if forceRepr or low != high:
return strFormat.format(pre=match.group('pre'), firstNum=low, lastNum=high, post=match.group('post'))
if files:
return files[0]
return ''
|
9f8b63594eb86f54bf8d264b12bd006c2c50da20
| 3,642,685
|
def farthest_point_sampling(D, k, random_init=True):
"""
Samples points using farthest point sampling
Parameters
-------------------------
D : (n,n) distance matrix between points
k : int - number of points to sample
random_init : Whether to sample the first point randomly or to
take the furthest away from all the other ones
Output
--------------------------
fps : (k,) array of indices of sampled points
"""
if random_init:
inds = [np.random.randint(D.shape[0])]
else:
inds = [np.argmax(D.sum(1))]
dists = D[inds]
for _ in range(k-1):
newid = np.argmax(dists)
inds.append(newid)
dists = np.minimum(dists,D[newid])
return np.asarray(inds)
|
d5ab5c3f30048cd16bf38bc73bf48f58f88ef383
| 3,642,686
|
def part2(lines):
"""
>>> part2(load_example(__file__, '9'))
982
"""
return run(lines, max)
|
c243792fc4b3e32a5bc41f5185cbbe1ef2d6b840
| 3,642,687
|
import os
import subprocess
import shlex
import pandas
def load_description():
"""
Reads pandas dataframe with name(description) and category for FG phenos
"""
description = "./description.txt"
if not os.path.isfile(description):
cmd = f"gsutil cp {pheno_description} {description}"
subprocess.call(shlex.split(cmd))
d = pandas.read_csv(description,delimiter='\t',encoding= 'unicode_escape',index_col = "phenocode").T
return d
|
91d2cb5bf2a4224ae912db59c417d2ca293edd35
| 3,642,688
|
def get_utility_function(args):
"""
Select the utility function.
:param args: the arguments for the program.
:return: the utility function (handler).
"""
if args.mode == 'entropy':
utility_function = compute_utility_scores_entropy
elif args.mode == 'entropyrev': # Reverse entropy method
utility_function = compute_utility_scores_entropyrev
elif args.mode == 'maxprivacy': # maximize privacy cost
utility_function = compute_utility_scores_privacy2
elif args.mode == 'gap':
utility_function = compute_utility_scores_gap
elif args.mode == 'greedy':
utility_function = compute_utility_scores_greedy
elif args.mode == 'deepfool':
utility_function = compute_utility_scores_deepfool
elif args.mode == 'random':
utility_function = compute_utility_scores_random
elif args.mode == "knockoff": # Knockoff Nets with Random querying
utility_function = compute_utility_scores_random
elif args.mode == "copycat": # CopyCat CNN
utility_function = compute_utility_scores_random
elif args.mode == 'jacobian' or args.mode == 'jacobiantr': # JBDA, JBDA-TR
utility_function = compute_utility_scores_random
elif args.mode == "inoutdist": # Potential attack (combine ID and OOD Data)
utility_function = compute_utility_scores_random
elif args.mode == "worstcase": # Attacker knows exact value of the privacy cost
utility_function = compute_utility_scores_privacy
elif args.mode == "worstcasepate": # Attacker knows exact value of the pate cost
utility_function = compute_utility_scores_pate
else:
raise Exception(f"Unknown query selection mode: {args.mode}.")
return utility_function
|
fc49a9f3456a187b6015c9fbbdaa4faba0dfc778
| 3,642,689
|
def convertRequestToStringWhichMayBeEmpty(paramName, source):
""" Handle strings which may be empty or contain "None".
Empty strings should be treated as "None". The "None" strings are from the timeSlicesValues
div on the runPage.
Args:
paramName (str): Name of the parameter in which we are interested in.
source (dict): Source of the information. Usually request.args or request.form.
This function is fairly similar to `convertRequestToPythonBool`.
"""
paramValue = source.get(paramName, None, type=str)
#logger.info("{0}: {1}".format(paramName, paramValue))
#if paramValue == "" or paramValue == "None" or paramValue == None:
# If we see "None", then we want to be certain that it is None!
# Otherwise, we will interpret an empty string as a None value!
if paramValue == "" or paramValue == "None":
paramValue = None
# To get an empty string, we need to explicitly select one with this contrived value.
# We need to do this because it is possible for the group selection pattern to be an empty string,
# but that is not equal to no hist being selected in a request.
if paramValue == "nonSubsystemEmptyString":
paramValue = ""
logger.info("{0}: {1}".format(paramName, paramValue))
return paramValue
|
d620197e60e15d48d450a9406cbe3f4ea61a64d8
| 3,642,690
|
import re
def find_am(list_tokens:list):
"""[summary]
Parameters
----------
list_tokens : list
list of tokens
Returns
-------
tuple(list,list)
matches tokens,
token indexes
"""
string = "START"+" ".join(list_tokens).lower()
match = re.findall(am_pattern, string)
if match:
matched_tokens = match[0].split()
indexes = list(range(len(matched_tokens)))
return matched_tokens, indexes
else:
return [], []
return match
|
91663e71882764045d679b24c5cb793f1c192410
| 3,642,691
|
import os
def fetch_seq_assembly(mygroup, assembly_final_df, keyargs):
"""
Function that will fetch the genome from the taxid in group and
concat the assembly that is created by ngs
"""
keyargs['taxids'] = [str(taxid) for taxid in mygroup.TaxId.tolist()]
#ngd.download(**keyargs)
get_cmdline_ndg(**keyargs)
# Test if we download genomes else return assembly_final_df
if os.path.isfile(snakemake.output.assembly_output):
# Read the information about the assembly and concatenate with previous one
tmp_assembly = pd.read_table(snakemake.output.assembly_output)
# Remove the file
os.remove(snakemake.output.assembly_output)
return pd.concat([assembly_final_df, tmp_assembly])
else :
return assembly_final_df
|
8e289038d17b9e75ef0d608b63828c82048080cc
| 3,642,692
|
def calculate_min_cost_path(source_node: int, target_node: int, graph: nx.Graph) -> (list, int):
"""
Calculates the minimal cost path with respect to node-weights from terminal1 to terminal2 on the given graph by
converting the graph into a line graph (converting nodes to edges) and solving the respective shortest path problem
on the edges.
:param source_node: the source node from the given graph from which to calculate the min cost path to the target
:param target_node: the target node from the given graph
:param graph: the graph on which we want to find the min cost path from source to target with respect to
the node weights that are labelled with 'cost'
:return: the min cost path from source to target, and the cost of the path, with respect to the node costs
"""
line_graph = nx.line_graph(graph)
line_graph, source, target = adjust_line_graph(source_node, target_node, line_graph, graph)
path = nx.shortest_path(line_graph, source, target, weight="cost")
cost = calculate_path_cost_on_line_graph(path, line_graph)
path = convert_line_graph_path(path)
return path, cost
|
ad48d0930f010ffa04f142710bd7f3aafd932e76
| 3,642,693
|
def promote(name):
"""
Promotes a clone file system to no longer be dependent on its "origin"
snapshot.
.. note::
This makes it possible to destroy the file system that the
clone was created from. The clone parent-child dependency relationship
is reversed, so that the origin file system becomes a clone of the
specified file system.
The snapshot that was cloned, and any snapshots previous to this
snapshot, are now owned by the promoted clone. The space they use moves
from the origin file system to the promoted clone, so enough space must
be available to accommodate these snapshots. No new space is consumed
by this operation, but the space accounting is adjusted. The promoted
clone must not have any conflicting snapshot names of its own. The
rename subcommand can be used to rename any conflicting snapshots.
name : string
name of clone-filesystem
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.promote myzpool/myclone
"""
## Promote clone
res = __salt__["cmd.run_all"](
__utils__["zfs.zfs_command"](
command="promote",
target=name,
),
python_shell=False,
)
return __utils__["zfs.parse_command_result"](res, "promoted")
|
7d6ddd76525096c13f7ff4b1a07121d52e688b24
| 3,642,694
|
def otp_verification(request):
"""Api view for verifying OTPs """
if request.method == 'POST':
serializer = OTPVerifySerializer(data = request.data)
if serializer.is_valid():
data = serializer.verify_otp(request)
return Response(data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
|
513dff831deca69a8087fe99d27f1f54108d4ae2
| 3,642,695
|
import os
def parse_tag_file(doc: ET.ElementTree) -> dict:
"""
Takes in an XML tree from a Doxygen tag file and returns a dictionary that looks something like:
.. code-block:: python
{'PolyVox': Entry(...),
'PolyVox::Array': Entry(...),
'PolyVox::Array1DDouble': Entry(...),
'PolyVox::Array1DFloat': Entry(...),
'PolyVox::Array1DInt16': Entry(...),
'QScriptContext::throwError': FunctionList(...),
'QScriptContext::toString': FunctionList(...)
}
Note the different form for functions. This is required to allow for 'overloading by argument type'.
:Parameters:
doc : xml.etree.ElementTree
The XML DOM object
:return: a dictionary mapping fully qualified symbols to files
"""
mapping = {} # type: MutableMapping[str, Union[Entry, FunctionList]]
function_list = [] # This is a list of function to be parsed and inserted into mapping at the end of the function.
for compound in doc.findall('./compound'):
compound_kind = compound.get('kind')
if compound_kind not in {'namespace', 'class', 'struct', 'file', 'define', 'group', 'page'}:
continue
compound_name = compound.findtext('name')
compound_filename = compound.findtext('filename')
# TODO The following is a hack bug fix I think
# Doxygen doesn't seem to include the file extension to <compound kind="file"><filename> entries
# If it's a 'file' type, check if it _does_ have an extension, if not append '.html'
if compound_kind in ('file', 'page') and not os.path.splitext(compound_filename)[1]:
compound_filename = compound_filename + '.html'
# If it's a compound we can simply add it
mapping[compound_name] = Entry(kind=compound_kind, file=compound_filename)
for member in compound.findall('member'):
# If the member doesn't have an <anchorfile> element, use the parent compounds <filename> instead
# This is the way it is in the qt.tag and is perhaps an artefact of old Doxygen
anchorfile = member.findtext('anchorfile') or compound_filename
member_symbol = compound_name + '::' + member.findtext('name')
member_kind = member.get('kind')
arglist_text = member.findtext('./arglist') # If it has an <arglist> then we assume it's a function. Empty <arglist> returns '', not None. Things like typedefs and enums can have empty arglists
if arglist_text and member_kind not in {'variable', 'typedef', 'enumeration'}:
function_list.append((member_symbol, arglist_text, member_kind, join(anchorfile, '#', member.findtext('anchor'))))
else:
mapping[member_symbol] = Entry(kind=member.get('kind'), file=join(anchorfile, '#', member.findtext('anchor')))
for member_symbol, arglist, kind, anchor_link in function_list:
try:
normalised_arglist = normalise(member_symbol + arglist)[1]
except ParseException as e:
print('Skipping %s %s%s. Error reported from parser was: %s' % (kind, member_symbol, arglist, e))
else:
if mapping.get(member_symbol) and isinstance(mapping[member_symbol], FunctionList):
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
else:
mapping[member_symbol] = FunctionList()
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
return mapping
|
9b35f07a63df87a3aa4f95d5ce87a813d2d73ea3
| 3,642,696
|
def hpd_grid(sample, alpha=0.05, roundto=2):
"""Calculate highest posterior density (HPD) of array for given alpha.
The HPD is the minimum width Bayesian credible interval (BCI).
The function works for multimodal distributions, returning more than one mode
Parameters
----------
sample : Numpy array or python list
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
roundto: integer
Number of digits after the decimal point for the results
Returns
----------
hpd: array with the lower
"""
sample = np.asarray(sample)
sample = sample[~np.isnan(sample)]
# get upper and lower bounds
l = np.min(sample)
u = np.max(sample)
density = kde.gaussian_kde(sample)
x = np.linspace(l, u, 2000)
y = density.evaluate(x)
#y = density.evaluate(x, l, u) waitting for PR to be accepted
xy_zipped = zip(x, y/np.sum(y))
xy = sorted(xy_zipped, key=lambda x: x[1], reverse=True)
xy_cum_sum = 0
hdv = []
for val in xy:
xy_cum_sum += val[1]
hdv.append(val[0])
if xy_cum_sum >= (1-alpha):
break
hdv.sort()
diff = (u-l)/20 # differences of 5%
hpd = []
hpd.append(round(min(hdv), roundto))
for i in range(1, len(hdv)):
if hdv[i]-hdv[i-1] >= diff:
hpd.append(round(hdv[i-1], roundto))
hpd.append(round(hdv[i], roundto))
hpd.append(round(max(hdv), roundto))
ite = iter(hpd)
hpd = list(zip(ite, ite))
modes = []
for value in hpd:
x_hpd = x[(x > value[0]) & (x < value[1])]
y_hpd = y[(x > value[0]) & (x < value[1])]
modes.append(round(x_hpd[np.argmax(y_hpd)], roundto))
return hpd, x, y, modes
|
ee3a24f056a038b5e9ec08875d37f39e0a8180f1
| 3,642,697
|
import sys
def get_internal_modules(key='exa'):
"""
Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa")
"""
key += '.'
return [v for k, v in sys.modules.items() if k.startswith(key)]
|
d97618ba37ad403a74fc13a7587c6369fab540fb
| 3,642,698
|
import re
def emph_rule(phrase: str) -> Rule:
"""
Check if the phrase only ever appears with or without a surrounding \\emph{...}.
For example, "et al." can be spelled like "\\emph{et al.}" or "et al.", but it should be
consistent.
"""
regex = r"(?:\\emph\{)?"
regex += r"(?:" + re.escape(phrase) + r")"
regex += r"(?:\})?"
return Rule(name=phrase, regex=re.compile(regex))
|
a6eb0eb716a265876efd857dd51dc6106a24f35b
| 3,642,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.