content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def zip_dir(dir_path, zip_path):
"""
压缩加密文件夹中的所有文件,以同步开始时间-结束时间命名,然后压缩到上传同步目录
:param dir_path: 目标文件夹路径
:param zip_path: 压缩后的文件夹路径
:return:
"""
try:
zip = zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED)
for root, dirnames, filenames in os.walk(dir_path):
file_path = root.replace(dir_path, '') # 去掉根路径,只对目标文件夹下的文件及文件夹进行压缩
# 循环出一个个文件名
for filename in filenames:
zip.write(os.path.join(root, filename), os.path.join(file_path, filename))
os.remove(dir_path + '/' + filename) # 压缩到zip文件之后删除该文件
zip.close()
except Exception as e:
print(e)
logging.error(e) | 5,326,200 |
def write_svg(
output: TextIO,
document: Document,
page_size: Optional[Tuple[float, float]] = None,
center: bool = False,
source_string: str = "",
layer_label_format: str = "%d",
show_pen_up: bool = False,
color_mode: str = "none",
single_path: bool = False
) -> None:
"""Create a SVG from a :py:class:`Document` instance.
If no page size is provided (or (0, 0) is passed), the SVG generated has bounds tightly
fitted around the geometries. Otherwise the provided size (in pixel) is used. The width
and height is capped to a minimum of 1 pixel.
By default, no translation is applied on the geometry. If `center=True`, geometries are
moved to the center of the page.
No scaling or rotation is applied to geometries.
Layers are named after `layer_label_format`, which may contain a C-style format specifier
such as `%d` which will be replaced by the layer number.
For previsualisation purposes, pen-up trajectories can be added to the SVG and path can
be colored individually (``color_mode="path"``) or layer-by-layer (``color_mode="layer"``).
Args:
output: text-mode IO stream where SVG code will be written
document: geometries to be written
page_size: if provided, overrides document.page_size
center: center geometries on page before export
source_string: value of the `source` metadata
layer_label_format: format string for layer label naming
show_pen_up: add paths for the pen-up trajectories
color_mode: "none" (no formatting), "layer" (one color per layer), "path" (one color
per path)
single_path: if true, we use svg:path elements to write monolithic lines. This is useful
to speed up importing SVG file into InkScape and maybe other vector graphic software.
If false, we use svg:line, svg:polyline and svg:polygon standard elements
"""
# compute bounds
bounds = document.bounds()
if bounds is None:
# empty geometry, we provide fake bounds
bounds = (0, 0, 1, 1)
if page_size:
size = page_size
tight = page_size == (0.0, 0.0)
elif document.page_size:
size = document.page_size
tight = False
else:
size = (bounds[2] - bounds[0], bounds[3] - bounds[1])
tight = True
if center:
corrected_doc = copy.deepcopy(document)
corrected_doc.translate(
(size[0] - (bounds[2] - bounds[0])) / 2.0 - bounds[0],
(size[1] - (bounds[3] - bounds[1])) / 2.0 - bounds[1],
)
elif tight:
corrected_doc = copy.deepcopy(document)
corrected_doc.translate(-bounds[0], -bounds[1])
else:
corrected_doc = document
# output SVG, width/height are capped to 1px
capped_size = tuple(max(1, s) for s in size)
size_cm = tuple(f"{round(s / UNITS['cm'], 8)}cm" for s in capped_size)
dwg = svgwrite.Drawing(size=size_cm, profile="tiny", debug=False)
inkscape = Inkscape(dwg)
dwg.attribs.update(
{
"viewBox": f"0 0 {capped_size[0]} {capped_size[1]}",
"xmlns:dc": "http://purl.org/dc/elements/1.1/",
"xmlns:cc": "http://creativecommons.org/ns#",
"xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
}
)
# add metadata
metadata = ElementTree.Element("rdf:RDF")
work = ElementTree.SubElement(metadata, "cc:Work")
fmt = ElementTree.SubElement(work, "dc:format")
fmt.text = "image/svg+xml"
source = ElementTree.SubElement(work, "dc:source")
source.text = source_string
date = ElementTree.SubElement(work, "dc:date")
date.text = datetime.datetime.now().isoformat()
dwg.set_metadata(metadata)
color_idx = 0
if show_pen_up:
group = inkscape.layer(label="% pen up trajectories")
group.attribs["fill"] = "none"
group.attribs["stroke"] = "black"
group.attribs["style"] = "display:inline; stroke-opacity: 50%; stroke-width: 0.5"
group.attribs["id"] = "pen_up_trajectories"
for layer in corrected_doc.layers.values():
for line in layer.pen_up_trajectories():
if single_path:
group.add(
dwg.path(d='M{:1.3f},{:1.3f} {:1.3f},{:1.3f}'.format(line[0].real, line[0].imag, line[1].real, line[1].imag))
)
else:
group.add(
dwg.line((line[0].real, line[0].imag), (line[-1].real, line[-1].imag))
)
dwg.add(group)
for layer_id in sorted(corrected_doc.layers.keys()):
layer = corrected_doc.layers[layer_id]
group = inkscape.layer(label=str(layer_label_format % layer_id))
group.attribs["fill"] = "none"
if color_mode == "layer":
group.attribs["stroke"] = _COLORS[color_idx % len(_COLORS)]
color_idx += 1
else:
group.attribs["stroke"] = "black"
group.attribs["style"] = "display:inline"
group.attribs["id"] = f"layer{layer_id}"
monolithic_path = []
for line in layer:
if len(line) <= 1:
continue
if single_path:
if len(line) == 2:
monolithic_path.append('M{:1.3f},{:1.3f} {:1.3f},{:1.3f}'.format(line[0].real, line[0].imag, line[1].real, line[1].imag))
elif line[0] == line[-1]:
d = 'M{:1.3f},{:1.3f}'.format(line[0].real, line[0].imag)
for c in line[:-1]:
d += ' {:1.3f},{:1.3f}'.format(c.real, c.imag)
d += ' Z'
monolithic_path.append(d)
else:
d = 'M{:1.3f},{:1.3f}'.format(line[0].real, line[0].imag)
for c in line:
d += ' {:1.3f},{:1.3f}'.format(c.real, c.imag)
monolithic_path.append(d)
else:
if len(line) == 2:
path = dwg.line((line[0].real, line[0].imag), (line[1].real, line[1].imag))
elif line[0] == line[-1]:
path = dwg.polygon((c.real, c.imag) for c in line[:-1])
else:
path = dwg.polyline((c.real, c.imag) for c in line)
if color_mode == "path":
path.attribs["stroke"] = _COLORS[color_idx % len(_COLORS)]
color_idx += 1
if not single_path:
group.add(path)
if single_path:
group.add(dwg.path(' '.join(monolithic_path)))
dwg.add(group)
dwg.write(output, pretty=True) | 5,326,201 |
def get_elevation_data(lonlat, dem_path):
"""
Get elevation data for a scene.
:param lon_lat:
The latitude, longitude of the scene center.
:type lon_lat:
float (2-tuple)
:dem_dir:
The directory in which the DEM can be found.
:type dem_dir:
str
"""
datafile = pjoin(dem_path, "DEM_one_deg.tif")
url = urlparse(datafile, scheme='file').geturl()
try:
data = get_pixel(datafile, lonlat) * 0.001 # scale to correct units
except IndexError:
raise AncillaryError("No Elevation data")
metadata = {'data_source': 'Elevation',
'url': url}
# ancillary metadata tracking
md = extract_ancillary_metadata(datafile)
for key in md:
metadata[key] = md[key]
return data, metadata | 5,326,202 |
def iso8601(dt=None, aware=False):
"""
Returns string datetime stamp in iso 8601 format from datetime object dt
If dt is missing and aware then use now(timezone.utc) else utcnow() naive
YYYY-MM-DDTHH:MM:SS.mmmmmm which is strftime '%Y-%m-%dT%H:%M:%S.%f'
Only TZ aware in python 3.2+
"""
if dt is None:
if aware and hasattr(datetime, "timezone"):
dt = datetime.datetime.now(datetime.timezone.utc) # make it aware
else: # naive
dt = datetime.datetime.utcnow() # naive
return(dt.isoformat()) | 5,326,203 |
def closeinstr(line, local_ns):
"""Close the specified instrument."""
name = line.strip()
if not name:
return
logger.info('Closing {0}.'.format(name))
if name not in __instruments__:
print('Unknown instrument {0}.'.format(name))
return
inst = __instruments__[name]
list_resources = {'VISA':__opened_VISA__,'NIDAQ':__opened_NIDAQ__,'COM':__opened_COM__}
l = list_resources.get(inst.__resource__,None)
if l:
l.remove(inst.__address__)
try:
inst.close()
except:
pass
if name in local_ns:
del local_ns[name]
del __instruments__[name] | 5,326,204 |
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
"""
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line] | 5,326,205 |
def say_date(tts):
"""
The AI gives current date.
Parameters: tts (TextToSpeech)
"""
expression = "We are the " + str(date.today()) + "."
say(ASSISTANT_NAME, expression, tts) | 5,326,206 |
def _check_blacklist_members(rule_members=None, policy_members=None):
"""Blacklist: Check that policy members ARE NOT in rule members.
If a policy member is found in the rule members, add it to the
violating members.
Args:
rule_members (list): IamPolicyMembers allowed in the rule.
policy_members (list): IamPolicyMembers in the policy.
Return:
list: Policy members found in the blacklist (rule members).
"""
violating_members = [
policy_member
for policy_member in policy_members
for rule_member in rule_members
if rule_member.matches(policy_member)
]
return violating_members | 5,326,207 |
def test_title_contains_both_axes_figure(pt_line_plt):
"""Check title_contains tester for combined axes + figure titles"""
pt_line_plt.assert_title_contains(
["My", "Figure", "Plot", "Title"], title_type="either"
)
plt.close() | 5,326,208 |
def list_in_edges(node):
"""Handle Tuple here."""
assert isinstance(node, relay.Call)
for src in list_in_nodes(node):
yield (src, node) | 5,326,209 |
def create_variable_weather(weather_data, original_epw_file, columns: list = ['drybulb'], variation: tuple = None):
"""
Create a new weather file adding gaussian noise to the original one.
Parameters
----------
weather_data : opyplus.WeatherData
Opyplus object with the weather for the simulation
original_epw_file : str
Path to the original EPW file
columns : list
List of columns to be affected
variation : tuple
(mean, std) of the Gaussian noise
Return
------
str
Name of the file created in the same location as the original one.
"""
if variation is None:
return None
else:
# Get dataframe with weather series
df = weather_data.get_weather_series()
# Generate random noise
shape = (df.shape[0], len(columns))
mu, std = variation
noise = np.random.normal(mu, std, shape)
df[columns] += noise
# Save new weather data
weather_data.set_weather_series(df)
filename = original_epw_file.split('.epw')[0]
filename += '_Random_%s_%s.epw' % (str(mu), str(std))
weather_data.to_epw(filename)
return filename | 5,326,210 |
def transform(func, geom):
"""Applies `func` to all coordinates of `geom` and returns a new
geometry of the same type from the transformed coordinates.
`func` maps x, y, and optionally z to output xp, yp, zp. The input
parameters may iterable types like lists or arrays or single values.
The output shall be of the same type. Scalars in, scalars out.
Lists in, lists out.
For example, here is an identity function applicable to both types
of input.
def id_func(x, y, z=None):
return tuple(filter(None, [x, y, z]))
g2 = transform(id_func, g1)
Using pyproj >= 2.1, this example will accurately project Shapely geometries:
import pyproj
wgs84 = pyproj.CRS('EPSG:4326')
utm = pyproj.CRS('EPSG:32618')
project = pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform
g2 = transform(project, g1)
Note that the always_xy kwarg is required here as Shapely geometries only support
X,Y coordinate ordering.
Lambda expressions such as the one in
g2 = transform(lambda x, y, z=None: (x+1.0, y+1.0), g1)
also satisfy the requirements for `func`.
"""
if geom.is_empty:
return geom
if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'):
# First we try to apply func to x, y, z sequences. When func is
# optimized for sequences, this is the fastest, though zipping
# the results up to go back into the geometry constructors adds
# extra cost.
try:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)(zip(*func(*zip(*geom.coords))))
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
zip(*func(*zip(*geom.exterior.coords))))
holes = list(type(ring)(zip(*func(*zip(*ring.coords))))
for ring in geom.interiors)
return type(geom)(shell, holes)
# A func that assumes x, y, z are single values will likely raise a
# TypeError, in which case we'll try again.
except TypeError:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)([func(*c) for c in geom.coords])
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
[func(*c) for c in geom.exterior.coords])
holes = list(type(ring)([func(*c) for c in ring.coords])
for ring in geom.interiors)
return type(geom)(shell, holes)
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
return type(geom)([transform(func, part) for part in geom.geoms])
else:
raise ValueError('Type %r not recognized' % geom.type) | 5,326,211 |
def get_jit(policy_name, asc_location, resource_group_name):
"""Building query
Args:
policy_name: Policy name
asc_location: Machine location
resource_group_name: Resource name group
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/"
"{}?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response | 5,326,212 |
def rank_by_yield(df):
"""
Rank phenotypes by yield only.
Parameters
----------
df : pd.DataFrame
MAIZSIM yield output dataframe.
df_sims or df_mature
"""
# Prep data
groups = ['cvar', 'site']
how = 'mean'
sim = 'dm_ear'
mx_mean = agg_sims(df, groups, how, sim)
df_yield_means = pd.DataFrame(mx_mean)
# Sort data based on mean yield value
df_yield_means['mean'] = df_yield_means.mean(axis=1)
# Rank phenos by yield
phenos_ranked_by_yield = list(df_yield_means.sort_values(by=['mean'],
axis=0, ascending=False).index)
return phenos_ranked_by_yield | 5,326,213 |
def k2lc(epic):
"""
load k2 light curve
"""
prefix = epic[:4]
id = epic[4:]
c = "01"
path = "data/c01/{0}00000/{1}".format(prefix, id)
end = "kepler_v1.0_lc.fits"
file = "{0}/hlsp_everest_k2_llc_{1}-c{2}_{3}".format(path, epic, c, end)
x, y = process_data(file)
return x, y | 5,326,214 |
def caclulateHospitalBedsAvailability():
"""
/**
* @def caclulatHospitalBedsAvailability
* @param sampleCaseData
* @returns hospitalBedsByRequestedTime
* @description This is the estimated a 35% bed availability in hospitals for severe COVID-19 positive patients.
*/
"""
global sampleCaseData, responseJSON
# update impact
HOSPITAL_BEDS_AVAILABLE = sampleCaseData['totalHospitalBeds'] * \
PERCENTAGE_HOSPITAL_BED_AVAILABILITY
saveNormalHospitalBedAvailable = math.trunc(
HOSPITAL_BEDS_AVAILABLE - responseJSON['impact']['severeCasesByRequestedTime'])
responseJSON['impact']['hospitalBedsByRequestedTime'] = saveNormalHospitalBedAvailable
# update severeImpact
saveSevereHospitalBedAvailable = math.trunc(
HOSPITAL_BEDS_AVAILABLE - responseJSON['severeImpact']['severeCasesByRequestedTime'])
responseJSON['severeImpact']['hospitalBedsByRequestedTime'] = saveSevereHospitalBedAvailable | 5,326,215 |
def multi_leave_topics(multileaver, user_id, time):
"""Multileaves a number of suggested topics for a user and returns
the results."""
topics = get_user_suggested_topics(user_id)
if not topics:
return None
ranking, credit = multileaver.team_draft_multileave(topics)
topic_recommendations = []
# prepare results for database insertion
for index, (topic, system) in enumerate(zip(ranking, credit)):
score = multileaver.ranking_length - index
rec = (score, time, user_id, topic, system)
topic_recommendations.append(rec)
return topic_recommendations | 5,326,216 |
def generate_age(sex):
"""Generate the age of a person depending on its sex
Parameters
----------
sex : int
Sex should be either 0 (men) or 1 (women).
Raises
------
ValueError
If sex is not 0 or 1.
Returns
-------
age : int
Generated age of a person.
"""
randunif = np.random.rand(1)
if sex == 0:
age = menecdf.iloc[(menecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
elif sex == 1:
age = womenecdf.iloc[(womenecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
else:
raise ValueError("Sex should be either 0 (men) or 1 (women)")
return age | 5,326,217 |
def _extract_text_Wikilink(node: mwparserfromhell.nodes.wikilink.Wikilink) -> str:
"""
Wikilinks come in 2 formats, thumbnails and actual links.
In the case of thumbnails, if posible pull out the nested caption.
"""
if node.title.startswith('File:') or node.title.startswith('Image:'):
if node.text == None:
return ''
else:
return ''.join(filter(lambda x: 'thumb|' not in x, map(_extract_text, node.text.nodes)))
else:
return ''.join(map(_extract_text, node.title.nodes)) | 5,326,218 |
def dict_zip(*dicts):
"""
Take a series of dicts that share the same keys, and reduce the values
for each key as if folding an iterator.
"""
keyset = set(dicts[0])
for d in dicts:
if set(d) != keyset:
raise KeyError(f"Mismatched keysets in fold_dicts: {sorted(keyset)}, {sorted(set(d))}")
return { key: [d[key] for d in dicts] for key in keyset } | 5,326,219 |
def process_command_line():
"""
Parse command line arguments
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
Return a Namespace representing the argument list.
"""
# Create the parser
parser = argparse.ArgumentParser(prog='obflow_6_output',
description='Run inpatient OB simulation output processor')
# Add arguments
parser.add_argument(
"output_path", type=str,
help="Destination Path for output summary files"
)
parser.add_argument(
"suffix", type=str,
help="String to append to various summary filenames"
)
parser.add_argument('--process_logs', dest='process_logs', action='store_true')
parser.add_argument(
"--stop_log_path", type=str, default=None,
help="Path containing stop logs"
)
parser.add_argument(
"--occ_stats_path", type=str, default=None,
help="Path containing occ stats csvs"
)
parser.add_argument(
"--run_time", type=float, default=None,
help="Simulation run time"
)
parser.add_argument(
"--warmup_time", type=float, default=None,
help="Simulation warmup time"
)
parser.add_argument('--include_inputs', dest='include_inputs', action='store_true')
parser.add_argument(
"--scenario_inputs_path", type=str, default=None,
help="Filename for scenario inputs"
)
#parser.add_argument('--include_qng_approx', dest='include_qng_approx', action='store_true')
# do the parsing
args = parser.parse_args()
return args | 5,326,220 |
def Ustagger_to_mass(U):
"""
U are the data on the left and right of a grid box
A simple conversion of the U stagger grid to the mass points.
Calculates the average of the left and right value of a grid box. Looping
over all columns it reduces the staggered grid to the same dimensions as the
mass point.
Useful for converting U, XLAT_U, and XLONG_U to masspoints
Differnce between XLAT_U and XLAT is usually small, on order of 10e-5
(column_j1+column_j2)/2 = masspoint_incolumn
Input:
Ugrid with size (##, ##+1)
Output:
U on mass points with size (##,##)
"""
# create the first column manually to initialize the array with correct dimensions
U_masspoint = (U[:,0]+U[:,1])/2. # average of first and second row
U_num_cols = int(U.shape[1])-1 # we want one less column than we have
# Loop through the rest of the columns
# We want the same number of columns as we have rows.
# Take the first and second column, average them, and store in first column in U_masspoint
for col in range(1,U_num_cols):
col_avg = (U[:,col]+U[:,col+1])/2.
# Stack those onto the previous for the final array
U_masspoint = np.column_stack((U_masspoint,col_avg))
return U_masspoint | 5,326,221 |
def _linear(args,
output_size,
bias,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=initializer(),
scope=None,
reuse=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: If some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("'args' must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError('linear is expecting 2D arguments: %s' % shapes)
if shape[1].value is None:
raise ValueError('linear expects shape[1] to be provided for shape %s, '
'but saw %s' % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now to computatin.
with tf.variable_scope(scope, reuse=reuse) as outer_scope:
weights = tf.get_variable(
name='linear_kernel',
shape=[total_arg_size, output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, axis=1), weights)
if not bias:
return res
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
name='linear_bias',
shape=[output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
return nn_ops.bias_add(res, biases) | 5,326,222 |
def build_fpn_mask_graph(rois, feature_maps, image_size, num_classes,
pool_size, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P1, P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = ROIAlignLayer([pool_size, pool_size],
name="roi_align_mask")((rois, image_size, feature_maps))
# x [1, num_rois, 14, 14, 64]
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name='mask')(x)
print("pool mask feature map size", x.shape)
return x | 5,326,223 |
def calculate_perf_counter_counter(previous, current, property_name):
"""
PERF_COUNTER_COUNTER
https://technet.microsoft.com/en-us/library/cc740048(v=ws.10).aspx
"""
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
f = current["Frequency_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / ((d1 - d0) / f) | 5,326,224 |
def event_edit(request, id):
"""Edit form for a particular event."""
event = get_object_or_404(Event, id=id)
result = can_edit_event(event, request.user)
if isinstance(result, http.HttpResponse):
return result
if request.user.has_perm('main.change_event_others'):
form_class = forms.EventEditForm
elif request.user.has_perm('main.add_event_scheduled'):
form_class = forms.EventExperiencedRequestForm
else:
form_class = forms.EventRequestForm
curated_groups = (
CuratedGroup.objects.filter(event=event).order_by('created')
)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=event)
if form.is_valid():
event = form.save(commit=False)
_event_process(request, form, event)
if not event.location:
event.start_time = event.start_time.replace(
tzinfo=timezone.utc
)
event.save()
form.save_m2m()
edit_url = reverse('manage:event_edit', args=(event.pk,))
if is_privacy_vidly_mismatch(event):
# We'll need to update the status of token protection
# on Vid.ly for this event.
try:
vidly.update_media_protection(
event.template_environment['tag'],
event.privacy != Event.PRIVACY_PUBLIC,
)
submissions = VidlySubmission.objects.filter(
event=event,
tag=event.template_environment['tag'],
).order_by('-submission_time')
for submission in submissions[:1]:
submission.token_protection = (
event.privacy != Event.PRIVACY_PUBLIC
)
submission.save()
break
except vidly.VidlyUpdateError as x:
messages.error(
request,
'Video protect status could not be updated on '
'Vid.ly\n<code>%s</code>' % x
)
messages.info(
request,
'Event "<a href=\"%s\">%s</a>" saved. [Edit again](%s)' % (
reverse('main:event', args=(event.slug,)),
event.title,
edit_url
)
)
return redirect('manage:events')
else:
initial = {}
initial['curated_groups'] = curated_groups.values_list(
'name',
flat=True
)
curated_groups_choices = [
(x, x) for x in initial['curated_groups']
]
form = form_class(
instance=event,
initial=initial,
curated_groups_choices=curated_groups_choices,
)
context = {
'form': form,
'event': event,
'suggested_event': None,
'suggested_event_comments': None,
'tweets': EventTweet.objects.filter(event=event).order_by('id'),
}
try:
suggested_event = SuggestedEvent.objects.get(accepted=event)
context['suggested_event'] = suggested_event
context['suggested_event_comments'] = (
SuggestedEventComment.objects
.filter(suggested_event=suggested_event)
.select_related('user')
.order_by('created')
)
except SuggestedEvent.DoesNotExist:
pass
context['is_vidly_event'] = False
if event.template and 'Vid.ly' in event.template.name:
context['is_vidly_event'] = True
context['vidly_submissions'] = (
VidlySubmission.objects
.filter(event=event)
.order_by('-submission_time')
)
# Is it stuck and won't auto-archive?
context['stuck_pending'] = False
now = timezone.now()
time_ago = now - datetime.timedelta(minutes=15)
if (
event.status == Event.STATUS_PENDING and
event.template and
'Vid.ly' in event.template.name and
event.template_environment and # can be None
event.template_environment.get('tag') and
not VidlySubmission.objects.filter(
event=event,
submission_time__gte=time_ago
)
):
tag = event.template_environment['tag']
results = vidly.query(tag)
status = results.get(tag, {}).get('Status')
if status == 'Finished':
context['stuck_pending'] = True
try:
discussion = Discussion.objects.get(event=event)
context['discussion'] = discussion
context['comments_count'] = Comment.objects.filter(event=event).count()
except Discussion.DoesNotExist:
context['discussion'] = None
context['approvals'] = (
Approval.objects
.filter(event=event)
.select_related('group')
)
context['chapters_count'] = Chapter.objects.filter(event=event).count()
context['closed_captions'] = ClosedCaptions.objects.filter(event=event)
try:
context['assignment'] = EventAssignment.objects.get(event=event)
except EventAssignment.DoesNotExist:
context['assignment'] = None
try:
context['survey'] = Survey.objects.get(events=event)
except Survey.DoesNotExist:
context['survey'] = None
context['archived_hits'] = 0
context['live_hits'] = 0
for each in EventHitStats.objects.filter(event=event).values('total_hits'):
context['archived_hits'] += each['total_hits']
for each in EventLiveHits.objects.filter(event=event).values('total_hits'):
context['live_hits'] += each['total_hits']
context['count_event_uploads'] = Upload.objects.filter(event=event).count()
context['vidly_tag_domains'] = None
if (
event.template and
'Vid.ly' in event.template.name and
event.template_environment and
event.template_environment.get('tag')
):
context['vidly_tag_domains'] = VidlyTagDomain.objects.filter(
tag=event.template_environment['tag']
)
return render(request, 'manage/event_edit.html', context) | 5,326,225 |
def migrator(from_: str, to_: str) -> Callable[[MigratorF], MigratorF]:
"""Decorate function as migrating settings from v `from_` to v `to_`.
A migrator should mutate a `NapariSettings` model from schema version
`from_` to schema version `to_` (in place).
Parameters
----------
from_ : str
NapariSettings.schema_version version that this migrator expects as
input
to_ : str
NapariSettings.schema_version version after this migrator has been
executed.
Returns
-------
Callable[ [MigratorF], MigratorF ]
_description_
"""
def decorator(migrate_func: MigratorF) -> MigratorF:
_from, _to = Version.parse(from_), Version.parse(to_)
assert _to >= _from, 'Migrator must increase the version.'
_MIGRATORS.append(Migrator(_from, _to, migrate_func))
return migrate_func
return decorator | 5,326,226 |
def rawChipByLocation_query():
"""
Get chips images by parcel id.
Generates a series of extracted Sentinel-2 LEVEL2A segments of 128x128 (10m
resolution bands) or 64x64 (20 m) pixels as list of full resolution GeoTIFFs
---
tags:
- rawChipByLocation
responses:
200:
description: A JSON dictionary with date labels and
relative URLs to cached GeoTIFFs.
"""
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Start by getting the request IP address
if request.environ.get('HTTP_X_FORWARDED_FOR') is None:
rip = request.environ['REMOTE_ADDR']
else:
rip = request.environ['HTTP_X_FORWARDED_FOR']
lon = request.args.get('lon')
lat = request.args.get('lat')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
band = request.args.get('band')
if 'plevel' in request.args.keys():
plevel = request.args.get('plevel')
else:
plevel = 'LEVEL2A'
if 'chipsize' in request.args.keys():
chipsize = request.args.get('chipsize')
else:
chipsize = '1280'
unique_id = f"dump/{rip}E{lon}N{lat}_{plevel}_{chipsize}_{band}".replace(
'.', '_')
data = image_requests.getRawChipByLocation(
lon, lat, start_date, end_date, unique_id, band, chipsize, plevel)
if data:
return send_from_directory(f"files/{unique_id}", 'dump.json')
else:
return json.dumps({}) | 5,326,227 |
def load_adni_longitudinal_av45_pet():
"""Returns paths of longitudinal ADNI AV45-PET
"""
# get file paths and description
(subjects,
subject_paths,
description) = _get_subjects_and_description(base_dir='ADNI_av45_pet',
prefix='I[0-9]*')
# get pet files
pet_files = map(lambda x: _glob_subject_img(x, suffix='pet/wr*.nii',
first_img=False),
subject_paths).tolist()
idx = [0]
pet_files_all = []
for pet_file in pet_files:
idx.append(idx[-1] + len(pet_file))
pet_files_all.extend(pet_file)
pet_files_all = np.array(pet_files_all)
images = [os.path.split(pet_file)[-1].split('_')[-1][:-4]
for pet_file in pet_files_all]
images = np.array(images)
# get phenotype from csv
dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'DXSUM_PDXCONV_ADNIALL.csv'))
roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'ROSTER.csv'))
df = description[description['Image_ID'].isin(images)]
dx_group_all = np.array(df['DX_Group'])
subjects_all = np.array(df['Subject_ID'])
ages = np.array(df['Age'])
exams = np.array(df['Study_Date'])
exams = list(map(lambda e: datetime.strptime(e, '%m/%d/%Y').date(), exams))
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
def _get_ridspet(subjects_all):
return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all))
rids = memory.cache(_get_ridspet)(subjects_all)
def _get_examdatespet(rids):
return list(map(lambda i: _get_dx(
rids[i], dx, exams[i], viscode=None, return_code=True),
range(len(rids))))
exam_dates = np.array(memory.cache(_get_examdatespet)(rids))
def _get_viscodespet(rids):
return list(map(lambda i: _get_vcodes(
rids[i], str(exam_dates[i]), dx), range(len(rids))))
viscodes = np.array(memory.cache(_get_viscodespet)(rids))
if len(viscodes) > 0:
vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1]
else:
vcodes, vcodes2 = None, None
return Bunch(pet=pet_files_all,
dx_group=dx_group_all,
images=images, ages=ages, subjects=subjects_all,
exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2) | 5,326,228 |
def build_settings(
tmp_path: Path,
template: str,
*,
oidc_clients: Optional[List[OIDCClient]] = None,
**settings: str,
) -> Path:
"""Generate a test Gafaelfawr settings file with secrets.
Parameters
----------
tmp_path : `pathlib.Path`
The root of the temporary area.
template : `str`
Settings template to use.
oidc_clients : List[`gafaelfawr.config.OIDCClient`] or `None`
Configuration information for clients of the OpenID Connect server.
**settings : `str`
Any additional settings to add to the settings file.
Returns
-------
settings_path : `pathlib.Path`
The path of the settings file.
"""
bootstrap_token = str(Token()).encode()
bootstrap_token_file = store_secret(tmp_path, "bootstrap", bootstrap_token)
session_secret = Fernet.generate_key()
session_secret_file = store_secret(tmp_path, "session", session_secret)
issuer_key = _ISSUER_KEY.private_key_as_pem()
issuer_key_file = store_secret(tmp_path, "issuer", issuer_key)
influxdb_secret_file = store_secret(tmp_path, "influxdb", b"influx-secret")
github_secret_file = store_secret(tmp_path, "github", b"github-secret")
oidc_secret_file = store_secret(tmp_path, "oidc", b"oidc-secret")
oidc_path = tmp_path / "oidc.json"
if oidc_clients:
clients_data = [
{"id": c.client_id, "secret": c.client_secret}
for c in oidc_clients
]
oidc_path.write_text(json.dumps(clients_data))
settings_path = _build_settings_file(
tmp_path,
template,
database_url=TEST_DATABASE_URL,
bootstrap_token_file=bootstrap_token_file,
session_secret_file=session_secret_file,
issuer_key_file=issuer_key_file,
github_secret_file=github_secret_file,
oidc_secret_file=oidc_secret_file,
influxdb_secret_file=influxdb_secret_file,
oidc_server_secrets_file=oidc_path if oidc_clients else "",
)
if settings:
with settings_path.open("a") as f:
for key, value in settings.items():
f.write(f"{key}: {value}\n")
return settings_path | 5,326,229 |
def test_add_trcvr_init_error():
"""Test error is raised if trcvr is the only input to init."""
pytest.raises(ValueError, DelaySpectrum, trcvr=9 * units.K) | 5,326,230 |
def load_mnist(path, data_type='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels.idx1-ubyte'
% data_type)
images_path = os.path.join(path,
'%s-images.idx3-ubyte'
% data_type)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
labels = labels.reshape(labels.shape[0], 1)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII',
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels | 5,326,231 |
def view(file_name: str, driver: str = "eida50", open_tab: bool = True,
palette: str = "Viridis",
port: int = 1234):
"""
FOREST Lite viewer
A simplified interface to the FOREST Lite server tool
"""
port = scan_port(port)
if open_tab:
url = f"http://localhost:{port}"
thread = browser_thread(url)
thread.start()
callback = get_settings(file_name, driver, palette)
_main.app.dependency_overrides[config.get_settings] = callback
uvicorn.run(_main.app, port=port) | 5,326,232 |
def get_actions_matching_arn(arn):
"""
Given a user-supplied ARN, get a list of all actions that correspond to that ARN.
Arguments:
arn: A user-supplied arn
Returns:
List: A list of all actions that can match it.
"""
raw_arns = get_matching_raw_arns(arn)
results = []
for raw_arn in raw_arns:
resource_type_name = get_resource_type_name_with_raw_arn(raw_arn)
service_prefix = get_service_from_arn(raw_arn)
service_prefix_data = get_service_prefix_data(service_prefix)
for action_name, action_data in service_prefix_data["privileges"].items():
# for some_action in service_prefix_data["privileges"]:
for resource_name, resource_data in action_data["resource_types"].items():
this_resource_type = resource_data["resource_type"].strip("*")
if this_resource_type.lower() == resource_type_name.lower():
results.append(f"{service_prefix}:{action_data['privilege']}")
results = list(dict.fromkeys(results))
results.sort()
return results | 5,326,233 |
def path_splitter(path):
"""
Split a path into its constituent parts.
Might be better written as a recursive function.
:param path: The path to split.
:return: A list of the path's constituent parts.
"""
res = []
while True:
p = os.path.split(path)
if p[0] == path:
# Were done, this is an absolute path.
res.insert(0, p[0])
break
elif p[1] == path:
# Were done, this is a relative path.
res.insert(0, p[0])
break
else:
path = p[0]
res.insert(0, p[1])
return res | 5,326,234 |
def _run_defp(mode,code,time_steps,error_model,decoder,error_probability,perm_rates,code_name,layout,measurement_error_probability,
max_runs=None,max_failures=None,random_seed=None):
"""Implements run and run_ftp functions"""
# assumptions
assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp'
# derived defaults
if max_runs is None and max_failures is None:
max_runs = 1
if logger.isEnabledFor(logging.DEBUG):
logger.debug('run: code={},time_steps={},error_model={},decoder={},error_probability={},'
'measurement_error_probability={} max_runs={},max_failures={},random_seed={}.'
.format(code,time_steps,error_model,decoder,error_probability,
measurement_error_probability,max_runs,max_failures,random_seed))
wall_time_start = time.perf_counter()
runs_data = {
'code': code.label,
'n_k_d': code.n_k_d,
'time_steps': time_steps,
'error_model': error_model.label,
'decoder': decoder.label,
'error_probability': error_probability,
'measurement_error_probability': measurement_error_probability,
'n_run': 0,
'n_success': 0,
'n_fail': 0,
'n_logical_commutations': None,
'custom_totals': None,
'error_weight_total': 0,
'error_weight_pvar': 0.0,
'logical_failure_rate_samples': 0.0,
'logical_failure_rate_samples_errorbar': 0.0,
'coset_ps': 0.0,
'logical_failure_rate_errorbar': 0.0,
'logical_failure_rate': 0.0,
'logical_failure_rate_errorbar': 0.0,
'physical_error_rate': 0.0,
'wall_time': 0.0,
}
# if random_seed is None,unpredictable entropy is pulled from the OS,which we log for reproducibility
seed_sequence = np.random.SeedSequence(random_seed)
logger.info('run: np.random.SeedSequence.entropy={}'.format(seed_sequence.entropy))
rng = np.random.default_rng(seed_sequence)
array_sum_keys = ('n_logical_commutations','custom_totals',) # list of array sum keys
array_val_keys = ('logical_commutations','custom_values',) # list of array value keys
error_weights = [] # list of error_weight from current run
success_list = np.zeros(max_runs)
max_coset_p_list = np.zeros(max_runs)
coset_ps_list = np.zeros((max_runs,4))
perm_mat,perm_vec= deform_matsvecs(code,decoder,error_model,perm_rates,code_name,layout)
while ((max_runs is None or runs_data['n_run'] < max_runs)
and (max_failures is None or runs_data['n_fail'] < max_failures)):
# run simulation
data = _run_once_defp(mode,code,time_steps,error_model,decoder,error_probability,perm_rates,perm_mat,perm_vec,code_name,layout,
measurement_error_probability,rng)
# increment run counts
success_list[runs_data['n_run']] = data['success']
max_coset_p_list[runs_data['n_run']] = data['max_coset_p']
coset_ps_list[runs_data['n_run']] = data['coset_ps']
runs_data['n_run'] += 1
if data['success']:
runs_data['n_success'] += 1
else:
runs_data['n_fail'] += 1
# sum arrays
for array_sum_key,array_val_key in zip(array_sum_keys,array_val_keys):
array_sum = runs_data[array_sum_key] # extract sum
array_val = data[array_val_key] # extract val
if runs_data['n_run'] == 1 and array_val is not None: # first run,so initialize sum,if val not None
array_sum = np.zeros_like(array_val)
if array_sum is None and array_val is None: # both None
array_sum = None
elif (array_sum is None or array_val is None) or (array_sum.shape != array_val.shape): # mismatch
raise QecsimError(
'Mismatch between {} values to sum: {},{}'.format(array_val_key,array_sum,array_val))
else: # match,so sum
array_sum = array_sum + array_val
runs_data[array_sum_key] = array_sum # update runs_data
# append error weight
error_weights.append(data['error_weight'])
##error bar in logical failure rate
runs_data['logical_failure_rate'] = 1 - max_coset_p_list.mean()
runs_data['logical_failure_rate_errorbar'] = max_coset_p_list.std()/np.sqrt(max_runs)
runs_data['logical_failure_rate_samples'] = 1 - success_list.mean()
runs_data['logical_failure_rate_samples_errorbar'] = success_list.std()/np.sqrt(max_runs)
runs_data['coset_ps_list'] = coset_ps_list
return runs_data | 5,326,235 |
def api_retry(func, task_id):
"""
添加api重试机制
:param func: 调用的api函数
:param task_id: 任务id
:return: 重试结果
"""
retry_flag, status_result = False, ""
for i in range(TRANSPORT_RETRY_TIMES):
time.sleep(TRANSPORT_RETRY_INTERVAL)
retry_flag, status_result = func(task_id)
if retry_flag:
break
return retry_flag, status_result | 5,326,236 |
def _step_5(d, n_max, mp_max, Hwedge, Hv):
"""Recursively compute H^{m'−1, m}_{n}(β) for m'=−1,...,−n+1, m=−m',...,n using relation (50)
resolved with respect to H^{m'−1, m}_{n}:
d^{m'−1}_{n} H^{m'−1, m}_{n} = d^{m'}_{n} H^{m'+1, m}_{n}
+ d^{m−1}_{n} H^{m', m−1}_{n}
− d^{m}_{n} H^{m', m+1}_{n}
(where the last term drops out for m=n).
NOTE: Although arxiv:1403.7698 specifies the loop over mp to start at -1, I
find it necessary to start at 0, or there will be missing information. This
also requires setting the (m',m)=(0,-1) components before beginning this loop.
"""
if n_max > 0 and mp_max > 0:
for n in range(0, n_max+1):
for mp in range(0, -min(n, mp_max), -1):
# m = -m', ..., n-1
# i1 = WignerHindex(n, mp-1, -mp, mp_max)
i1 = WignerHindex(n, mp-1, -mp+1, mp_max) - 1
# i2 = WignerHindex(n, mp+1, -mp, mp_max)
i2 = WignerHindex(n, mp+1, -mp+1, mp_max) - 1
# i3 = WignerHindex(n, mp, -mp-1, mp_max)
i3 = WignerHindex(n, mp, -mp, mp_max) - 1
i4 = WignerHindex(n, mp, -mp+1, mp_max)
i5 = nm_index(n, mp-1)
i6 = nm_index(n, mp)
i7 = nm_index(n, -mp-1)
i8 = nm_index(n, -mp)
inverse_d5 = 1.0 / d[i5]
d6 = d[i6]
for i in [0]:
d7 = d[i+i7]
d8 = d[i+i8]
if mp == 0:
Hv[i+nm_index(n, mp-1)] = inverse_d5 * (
d6 * Hv[i+nm_index(n, mp+1)]
+ d7 * Hv[i+nm_index(n, mp)]
- d8 * Hwedge[i+i4]
)
else:
Hv[i+nm_index(n, mp-1)] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ d7 * Hv[i+nm_index(n, mp)]
- d8 * Hwedge[i+i4]
)
for i in range(1, n+mp):
d7 = d[i+i7]
d8 = d[i+i8]
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ d7 * Hwedge[i+i3]
- d8 * Hwedge[i+i4]
)
# m = n
i = n+mp
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ d[i+i7] * Hwedge[i+i3]
) | 5,326,237 |
def test_bound_callables():
"""Test that we can use a callable as a bound value."""
@magicgui(x={"bind": lambda x: 10})
def f(x: int = 5):
return x
assert f() == 10
f.x.unbind()
assert f() == 5 | 5,326,238 |
def delete_role(user_id: str, role_id: str):
""" Removes a role from a user """
print(user_id)
print(role_id)
return jsonify(), HTTPStatus.NO_CONTENT | 5,326,239 |
def remove_degenerate_seqs(
alignment_obj: BaseAlignment, identity_limit: int, show_id_array: bool = False
) -> BaseAlignment:
"""
Filter high similarity sequences from a list of Seq objects
Returns: BaseAlignment
"""
import multiprocessing as mp
from functools import partial
from jinfo.utils.percentage_identity import percentage_identity
seq_list = alignment_obj.seqs
identity_array = []
filtered_seqs = []
pool = mp.Pool(mp.cpu_count()) # Set up cpu pool for parallel calculation
for seq_obj in seq_list:
id_partial = partial(percentage_identity, seq2=seq_obj)
identity_array_row = pool.map(id_partial, seq_list)
identity_array.append(identity_array_row)
if show_id_array:
print("Calculated alignment identity array:")
for i, row in enumerate(identity_array):
print(f"{seq_list[i].label}\t{row}")
for i, row in enumerate(identity_array):
row.remove(100) # remove seq 100% match with itself
if max(row) < float(identity_limit):
filtered_seqs.append(seq_list[i])
return BaseAlignment(filtered_seqs) | 5,326,240 |
def api_add_resource():
"""
The resource has multiple URLs and you can pass multiple URLs to the
add_resource() method on the Api object. Each one will be routed to
your Resource
"""
for url_pattern in URLPATTERNS:
try:
API.add_resource(
get_resource(url_pattern.target), url_pattern.url,
endpoint=get_endpoint(url_pattern.url))
except StopIteration:
LOGGER.error('url resource not found: %s', url_pattern.url) | 5,326,241 |
def get_domain(url):
""" Get the domain from a URL.
Parameters
----------
url : string
HTTP URL
Returns
-------
domain : string
domain of the URL
"""
o = urlparse(url)
scheme = o.scheme
if not o.scheme:
scheme = "http"
link = scheme + "://" + o.netloc
return link | 5,326,242 |
def push_activations(activations, from_layer, to_layer):
"""Push activations from one model to another using prerecorded correlations"""
inverse_covariance_matrix = layer_inverse_covariance(from_layer)
activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T
covariance_matrix = layer_covariance(from_layer, to_layer)
activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix)
return activation_recorrelated | 5,326,243 |
def p_path_primary_0(p):
"""
path_primary : iri
"""
p[0] = p[1] | 5,326,244 |
def pid_calibration_allfills(mydir='/Users/kocolosk/data/run5/hist-by-fill'):
"""generates a PDF of triple-Gaussian fits for all fills, plus a histogram of pion means"""
allFiles = os.listdir(mydir)
hfill = ROOT.TH1D('hfill','mean of pion Gaussian by RHIC Fill', len(allFiles), 0.5, len(allFiles)+0.5)
ps = ROOT.TPostScript('pid.ps')
c = ROOT.TCanvas('c','',100,100,600,800)
pad = 1
## some cumulative stats
nEntries = 0
nTotalPions = 0
nOldPions = 0
nNewPions = 0
nOldBg = 0
nNewBg = 0
nOldCounts = 0
nNewCounts = 0
myrecords = [] ## fname, pi mean, pi sigma
counter = 0
for fname in allFiles:
if not fname.endswith('.root'): continue
if counter % 15 == 0:
c.Update()
ps.NewPage()
c.Clear()
c.Divide(3,5)
pad = 1
counter += 1
c.cd(pad)
print fname
tfile = ROOT.TFile(os.path.join(mydir, fname))
mgr = analysis.HistogramManager(tfile, ['nSigmaPion'])
h = mgr.anyspin['alltrigs'].tracks_sum['nSigmaPion']
h.SetTitle('n#sigma(#pi) calibration for F%s' % (fname[-14:-10],))
fit, pifit, pkfit, elefit = pid_calibration(h)
mean = fit.GetParameter(1)
error = fit.GetParError(1)
sigma = fit.GetParameter(2)
hfill.SetBinContent(counter+1, mean)
hfill.SetBinError(counter+1, error)
myrecords.append((fname, mean, sigma))
pad += 1
## stats
lowBound = pifit.GetParameter(1) - 1.0 * pifit.GetParameter(2)
highBound = pifit.GetParameter(1) + 2.0 * pifit.GetParameter(2)
binWidth = h.GetBinWidth(1)
nEntries += h.GetEntries()
nTotalPions += pifit.Integral(-6.0, 6.0) / binWidth
nOldPions += pifit.Integral(-1.0, 2.0) / binWidth
nNewPions += pifit.Integral(lowBound, highBound) / binWidth
nOldBg += (pkfit.Integral(-1.0, 2.0) + elefit.Integral(-1.0, 2.0)) / binWidth
nNewBg += (pkfit.Integral(lowBound, highBound) + elefit.Integral(lowBound, highBound)) / binWidth
nOldCounts += fit.Integral(-1.0, 2.0) / binWidth
nNewCounts += fit.Integral(lowBound, highBound) / binWidth
ps.Close()
c = ROOT.TCanvas()
hfill.GetYaxis().SetRangeUser(-0.5, 0.8)
hfill.SetXTitle('fill index')
hfill.Draw('e')
for r in myrecords:
print '%d : (% 1.6f, %1.6f),' % (int(r[0][13:17]), r[1], r[2])
print 'Old Total Efficiency = %.4f' % (nOldPions/nTotalPions,)
print 'New Total Efficiency = %.4f' % (nNewPions/nTotalPions,)
print 'Old Background Fraction = %.4f' % (nOldBg/nOldCounts,)
print 'New Background Fraction = %.4f' % (nNewBg/nNewCounts,)
print 'Total Statistics Old = %.0f New %.0f' % (nOldCounts, nNewCounts)
raw_input('press enter:') | 5,326,245 |
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score using cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : numpy.array, shape=(len(list(cv)), 2)
Array of scores of the estimator for each run of the cross validation
with their corresponding uncertainty.
See Also
---------
:func:`skpro.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
if n_jobs == 1:
# If we are not multiprocessing it's possible to
# use a wrapper function to retrieve the std values
test_scores = []
def scoring_task(estimator, X, y):
score, std = scorer(estimator, X, y, return_std=True)
test_scores.append([score, std])
return score
else:
# We allow multiprocessing by passing in two scoring functions.
# That is far from ideal since we call the scorer twice,
# so any improvement is welcome
score_scorer = RetrievesScores(scorer, score=True, std=False)
std_scorer = RetrievesScores(scorer, score=False, std=True)
scoring_task = {'score': score_scorer, 'std': std_scorer}
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring=scoring_task, cv=cv,
return_train_score=False,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch)
if n_jobs == 1:
return np.array(test_scores)
else:
return np.column_stack((cv_results['test_score'], cv_results['test_std'])) | 5,326,246 |
def test_empty():
""" Empty relevancy """
assert relevancy_to_adjust('') == list() | 5,326,247 |
def load_graph(N, M):
"""
Builds an adjacency list representation of a graph with N vertices. Each
graph[i][j] is the minimum length of an edge between vertice i and j.
:rtype List[int, Dict[int, int]]
"""
graph = [dict() for i in range(0, N)]
for i in range(0, M):
(x, y, r) = read(int)
x -= 1
y -= 1
# Ignore all edges except minimum length edge.
r = r if y not in graph[x] else min(r, graph[x][y])
graph[x][y] = r
graph[y][x] = r
return graph | 5,326,248 |
def quartile_range(arr):
"""
Find out the Interquartile Range
"""
#if it is odd
if len(arr)%2 != 0:
left=median(arr[:len(arr)/2])
right=median(arr[len(arr)/2 + 1:])
else:
#if array is even
left = median(arr[:len(arr)/2])
right = median(arr[len(arr)/2:])
return left, abs(right - left), right | 5,326,249 |
def border (surface, rect, data):
"""Fill with a border.
Takes (border_width, border_colour, inner_colour). If inner_colour is omitted
or width is 0, border colour is used for the whole tile.
"""
if len(data) == 2:
border = data[1]
width = 0
else:
width, border, inner = data
surface.fill(border, rect)
if width != 0:
width *= 2
surface.fill(inner, Rect(rect).inflate(-width, -width)) | 5,326,250 |
def check_syntax(filename, raise_error=False):
"""Return True if syntax is okay."""
with autopep8.open_with_encoding(filename) as input_file:
try:
compile(input_file.read(), '<string>', 'exec', dont_inherit=True)
return True
except (SyntaxError, TypeError, UnicodeDecodeError):
if raise_error:
raise
else:
return False | 5,326,251 |
def test_process_image_pixels():
"""Check the example how to implement convolution given in the docstring"""
from astropy.convolution import convolve as astropy_convolve
def convolve(image, kernel):
'''Convolve image with kernel'''
from ..utils import process_image_pixels
images = dict(image=np.asanyarray(image))
kernel = np.asanyarray(kernel)
out = dict(image=np.empty_like(image))
def convolve_function(images, kernel):
value = np.sum(images['image'] * kernel)
return dict(image=value)
process_image_pixels(images, kernel, out, convolve_function)
return out['image']
np.random.seed(0)
image = np.random.random((7, 10))
kernel = np.random.random((3, 5))
actual = convolve(image, kernel)
desired = astropy_convolve(image, kernel, boundary='fill')
assert_allclose(actual, desired) | 5,326,252 |
def im2vec(im, bsize, padsize=0):
"""
Converts image to vector.
Args:
im: Input image to be converted to a vector.
bsize: Size of block of im to be converted to vec. Must be 1x2 non-negative int array.
padsize (optional, default=0): Must be non-negative integers in a 1x2 array. Amount of zeros padded on each
Returns:
v: Output vector.
rows: Number of rows of im after bsize and padsize are applied (before final flattening to vector).
cols: Number of cols of im after bsize and padsize are applied (before final flattening to vector).
"""
bsize = bsize+np.zeros((1, 2), dtype=int)[0]
padsize = padsize+np.zeros((1, 2), dtype=int)[0]
if(padsize.any() < 0):
raise Exception("Pad size must not be negative")
imsize = np.shape(im)
y = bsize[0]+padsize[0]
x = bsize[1]+padsize[1]
rows = math.floor((imsize[0]+padsize[0])/y)
cols = math.floor((imsize[1]+padsize[1])/x)
t = np.zeros((y*rows, x*cols))
imy = y*rows-padsize[0]
imx = x*cols-padsize[1]
t[:imy, :imx] = im[:imy, :imx]
t = np.reshape(t, (y, rows, x, cols), order='F')
t = np.reshape(np.transpose(t, [0, 2, 1, 3]), (y, x, rows*cols), order='F')
v = t[:bsize[0], :bsize[1], :rows*cols]
v = np.reshape(v, (y*x, rows*cols), order='F')
return [v, rows, cols] | 5,326,253 |
async def test_enable_service_call(hass):
"""Test enable service call with no Pi-hole named."""
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
assert await async_setup_component(
hass,
pi_hole.DOMAIN,
{
pi_hole.DOMAIN: [
{"host": "pi.hole1", "api_key": "1"},
{"host": "pi.hole2", "name": "Custom", "api_key": "2"},
]
},
)
await hass.async_block_till_done()
await hass.services.async_call(
pi_hole.DOMAIN, pi_hole.SERVICE_ENABLE, {}, blocking=True
)
await hass.async_block_till_done()
assert mocked_hole.enable.call_count == 2 | 5,326,254 |
def mobilenetV2_block(
input_layer,
filters: int = 32,
dropout_ratio: float = DEFAULT_DROPOUT_RATIO,
use_batchnorm: bool = False,
prefix: str = "mobilenetV2_",
initializer=DEFAULT_KERNEL_INITIALIZER,
regularizer=DEFAULT_KERNEL_REGULARIZER,
channels_index: int = DEFAULT_CHANNEL_INDEX):
"""
Build a mobilenet V2 bottleneck with residual block
:param input_layer:
:param filters:
:param initializer:
:param regularizer:
:param prefix:
:param channels_index:
:param use_batchnorm:
:param dropout_ratio:
:return: mobilenet V2 bottleneck with residual block
"""
# --- argument checking
if input_layer is None:
raise ValueError("input_layer cannot be empty")
if filters <= 0:
raise ValueError("Filters should be > 0")
if dropout_ratio is not None:
if dropout_ratio > 1.0 or dropout_ratio < 0.0:
raise ValueError("Dropout ration must be [0, 1]")
# --- build block
previous_no_filters = K.int_shape(input_layer)[channels_index]
x = keras.layers.Conv2D(
filters=filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="linear",
name=prefix + "conv0",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(input_layer)
x = keras.layers.DepthwiseConv2D(
depth_multiplier=1,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv1",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm0")(x)
x = keras.layers.Conv2D(
filters=previous_no_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv2",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm1")(x)
# --- build skip layer and main
x = keras.layers.Add(name=prefix + "add")([
x,
input_layer
])
if dropout_ratio is not None and dropout_ratio > 0.0:
x = keras.layers.Dropout(
name=prefix + "dropout",
rate=dropout_ratio)(x)
return x | 5,326,255 |
def test_regex():
"""Runs simple tests on the Regex filter."""
filt = RegexFilter(pattern="foo")
test_cases = {
"foo.py": True,
"bar/foo.py": True,
"bar/foo": True,
"bar/baz": False,
"baz": False,
"oof": False,
}
test_cases = [(FileItem(key=path), result) for path, result in test_cases.items()]
run_filter_tests(filt, test_cases) | 5,326,256 |
def update_git_config_context_schemas(repository_record, job_result):
"""Refresh any config context schemas provided by this Git repository."""
config_context_schema_path = os.path.join(repository_record.filesystem_path, "config_context_schemas")
if not os.path.isdir(config_context_schema_path):
return
managed_config_context_schemas = set()
for file_name in os.listdir(config_context_schema_path):
if not os.path.isfile(os.path.join(config_context_schema_path, file_name)):
continue
job_result.log(
f"Loading config context schema from `{file_name}`",
grouping="config contexts",
logger=logger,
)
try:
with open(os.path.join(config_context_schema_path, file_name), "r") as fd:
# The data file can be either JSON or YAML; since YAML is a superset of JSON, we can load it regardless
try:
context_schema_data = yaml.safe_load(fd)
except Exception as exc:
raise RuntimeError(f"Error in loading config context schema data from `{file_name}`: {exc}")
# A file can contain one config context dict or a list thereof
if isinstance(context_schema_data, dict):
context_name = import_config_context_schema(context_schema_data, repository_record, job_result, logger)
managed_config_context_schemas.add(context_name)
elif isinstance(context_schema_data, list):
for context_schema in context_schema_data:
if isinstance(context_schema, dict):
context_name = import_config_context_schema(
context_schema, repository_record, job_result, logger
)
managed_config_context_schemas.add(context_name)
else:
raise RuntimeError(
f"Error in loading config context schema data from `{file_name}`: data must be a dict or list of dicts"
)
else:
raise RuntimeError(
f"Error in loading config context schema data from `{file_name}`: data must be a dict or list of dicts"
)
except Exception as exc:
job_result.log(
str(exc),
level_choice=LogLevelChoices.LOG_FAILURE,
grouping="config context schemas",
logger=logger,
)
job_result.save()
# Delete any prior contexts that are owned by this repository but were not created/updated above
delete_git_config_context_schemas(
repository_record,
job_result,
preserve=managed_config_context_schemas,
) | 5,326,257 |
def _grid_archive():
"""Deterministically created GridArchive."""
# The archive must be low-res enough that we can tell if the number of cells
# is correct, yet high-res enough that we can see different colors.
archive = GridArchive([10, 10], [(-1, 1), (-1, 1)], seed=42)
archive.initialize(solution_dim=2)
_add_uniform_sphere(archive, (-1, 1), (-1, 1))
return archive | 5,326,258 |
def check_flush(hand):
"""Check whether the hand has a flush; returns a boolean."""
if len(hand) == len(hand.by_suit(hand[0].suit)):
return True
return False | 5,326,259 |
def binary_accuracy(preds, y):
"""
Returns accuracy per batch
:param preds: prediction logits
:param y: target labels
:return: accuracy = percentage of correct predictions
"""
# round predictions to the closest integer
rounded_predictions = torch.round(torch.sigmoid(preds))
correct = (rounded_predictions == y).float()
acc = correct.sum() / len(correct)
return acc | 5,326,260 |
def load_taxondump(idpath):
"""Importing the Acidobacteria taxon IDs"""
taxons = {}
with open(idpath) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
taxons[row[1]] = row[0]
return taxons | 5,326,261 |
def _to_numeric_range(cell):
"""
Translate an Excel cell (eg 'A1') into a (col, row) tuple indexed from zero.
e.g. 'A1' returns (0, 0)
"""
match = re.match("^\$?([A-Z]+)\$?(\d+)$", cell.upper())
if not match:
raise RuntimeError("'%s' is not a valid excel cell address" % cell)
col, row = match.groups()
# A = 1
col_digits = map(lambda c: ord(c) - ord("A") + 1, col)
col = 0
for digit in col_digits:
col = (col * 26) + digit
row = int(row) - 1
col = col - 1
return col, row | 5,326,262 |
def LUCroutDecompose(A):
"""
Implementação do método de Crout para decomposição LU.
"""
assert A.shape[0] == A.shape[1] and type(A) is matrix, "'A' deve ser NxN."
L = zeros(A.shape)
n = A.shape[0]
U = L.copy()
lib.LUDec(n, byref(ctypeslib.as_ctypes(A)),
byref(ctypeslib.as_ctypes(L)),
byref(ctypeslib.as_ctypes(U)))
return L, U | 5,326,263 |
def protocol(recarr, design_type, *hrfs):
""" Create an object that can evaluate the FIAC
Subclass of formulae.Formula, but not necessary.
Parameters
----------
recarr : (N,) structured array
with fields 'time' and 'event'
design_type : str
one of ['event', 'block']. Handles how the 'begin' term is
handled. For 'block', the first event of each block is put in
this group. For the 'event', only the first event is put in this
group. The 'begin' events are convolved with hrf.glover.
hrfs: symoblic HRFs
Each event type ('SSt_SSp','SSt_DSp','DSt_SSp','DSt_DSp') is
convolved with each of these HRFs in order.
Returns
-------
f: Formula
Formula for constructing design matrices.
contrasts : dict
Dictionary of the contrasts of the experiment.
"""
event_types = np.unique(recarr['event'])
N = recarr.size
if design_type == 'block':
keep = np.not_equal((np.arange(N)) % 6, 0)
else:
keep = np.greater(np.arange(N), 0)
# This first frame was used to model out a potentially
# 'bad' first frame....
_begin = recarr['time'][~keep]
termdict = {}
termdict['begin'] = utils.define('begin', utils.events(_begin, f=hrf.glover))
drift = formulae.natural_spline(utils.T,
knots=[N_ROWS/2.+1.25],
intercept=True)
for i, t in enumerate(drift.terms):
termdict['drift%d' % i] = t
# After removing the first frame, keep the remaining
# events and times
times = recarr['time'][keep]
events = recarr['event'][keep]
# Now, specify the experimental conditions. This creates expressions named
# SSt_SSp0, SSt_SSp1, etc. with one expression for each (eventtype, hrf)
# pair
for v in event_types:
k = np.array([events[i] == v for i in range(times.shape[0])])
for l, h in enumerate(hrfs):
# Make sure event type is a string (not byte string)
term_name = '%s%d' % (to_str(v), l)
termdict[term_name] = utils.define(term_name,
utils.events(times[k], f=h))
f = formulae.Formula(termdict.values())
Tcontrasts = {}
Tcontrasts['average'] = (termdict['SSt_SSp0'] + termdict['SSt_DSp0'] +
termdict['DSt_SSp0'] + termdict['DSt_DSp0']) / 4.
Tcontrasts['speaker'] = (termdict['SSt_DSp0'] - termdict['SSt_SSp0'] +
termdict['DSt_DSp0'] - termdict['DSt_SSp0']) * 0.5
Tcontrasts['sentence'] = (termdict['DSt_DSp0'] + termdict['DSt_SSp0'] -
termdict['SSt_DSp0'] - termdict['SSt_SSp0']) * 0.5
Tcontrasts['interaction'] = (termdict['SSt_SSp0'] - termdict['SSt_DSp0'] -
termdict['DSt_SSp0'] + termdict['DSt_DSp0'])
# Ftest
Fcontrasts = {}
Fcontrasts['overall1'] = formulae.Formula(Tcontrasts.values())
return f, Tcontrasts, Fcontrasts | 5,326,264 |
def iter_children(param,childlist=[]):
"""
| Iterator over all sub children of a given parameters.
| Returns all childrens names.
=============== ================================= ====================================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph parameter the root node to be coursed
*childlist* list the child list recetion structure
=============== ================================= ====================================
Returns
-------
childlist : parameter list
The list of the children from the given node.
Examples
--------
>>> import custom_parameter_tree as cpt
>>> from pyqtgraph.parametertree import Parameter
>>> #Creating the example tree
>>> settings=Parameter(name='settings')
>>> child1=Parameter(name='child1', value=10)
>>> child2=Parameter(name='child2',value=10,visible=True,type='group')
>>> child2_1=Parameter(name='child2_1', value=10)
>>> child2_2=Parameter(name='child2_2', value=10)
>>> child2.addChildren([child2_1,child2_2])
>>> settings.addChildren([child1,child2])
>>> #Get the child list from the param argument
>>> childlist=cpt.iter_children(settings)
>>> #Verify the integrity of result
>>> print(childlist)
['child1', 'child2', 'child2_1', 'child2_2']
"""
for child in param.children():
childlist.append(child.name())
if child.type()=='group':
childlist.extend(iter_children(child,[]))
return childlist | 5,326,265 |
def shell__shell_hook(callback: Callable[[int, WPARAM, LPARAM], Optional[str]]) -> Union[HHOOK, WindowsErrorMessage]:
"""
Adds a global shell hook, called when any key is pressed in any
context. The callback is called directly in the thread that invoked
this method. It is up to the callback to be as responsive as possible.
If the callback explicitly returns the value "Cancel"
(SHELL__CANCEL_CALLBACK_CHAIN), then the next hook in the chain of
listeners will not be called. This is made very explicit because
most circumstances dictate that the next chained handler should be
called. Even then, there are circumstances in which the next hook
will still need to be called.
The callback takes the parameters (code, wparam, lparam). It is up
to the callback to correctly parse the values. Note that this is
really windows specific, so it's a good idea to read through the docs.
:param callback:
:return: hook handle
"""
# Without some tricky logic, the shell hook will always fail.
# Specifically, it must use a DLL to perform the hook. One for 64-bit
# applications, and one for 32-bit applications.
# See https://www.codeproject.com/Articles/18638/Using-Window-Messages-to-Implement-Global-System-H
# Otherwise, this error is encountered.
# ERROR_HOOK_NEEDS_HMOD (1428):
# Cannot set nonlocal hook without a module handle.
#
hmod = GetModuleHandleW(None)
if hmod is None is None:
return WindowsErrorMessage('GetModuleHandleW')
# See https://msdn.microsoft.com/en-us/library/windows/desktop/ms644991(v=vs.85).aspx
hook_id = None
def shell_handler(code: int, wparam: WPARAM, lparam: LPARAM) -> LRESULT:
print("[Shell handler] {0} {1} {2}".format(code, wparam, lparam))
call_next = True
try:
# From the docs: If nCode is less than zero, the hook
# procedure must return the value returned by CallNextHookEx.
if code >= 0:
ret = callback(code, wparam, lparam)
if ret == SHELL__CANCEL_CALLBACK_CHAIN:
call_next = False
except: # pylint: broad-except
print("Unexpected error: {0}".format(sys.exc_info()[0]))
raise
finally:
if call_next:
return t_cast(LRESULT, CallNextHookEx(hook_id, code, wparam, lparam))
else:
# print("Canceling callback chain")
# From the docs:
# If the hook procedure processed the message, it may return
# a nonzero value to prevent the system from passing the
# message to the rest of the hook chain or the target window
# procedure.
return LRESULT(1)
callback_pointer = HOOK_CALLBACK_TYPE(shell_handler)
hook_id = t_cast(HHOOK, SetWindowsHookExW(WH_SHELL, callback_pointer, hmod, 0))
if hook_id == 0:
return WindowsErrorMessage('SetWindowsHookExW / shell')
print("started shell hook " + repr(hook_id))
_CALLBACK_POINTERS[hook_id] = callback_pointer
# Ensure that the hook is *always* uninstalled at exit to prevent OS
# resource leaks.
atexit.register(shell__unhook, hook_id)
return hook_id | 5,326,266 |
def depth_first_graph_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Does not get trapped by loops.
If two paths reach a state, only use the first one.
"""
frontier = [(Node(problem.initial))] # Stack
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and child not in frontier)
return None | 5,326,267 |
def check_matrix_equality(A, B, tol=None):
"""
Checks the equality of two matrices.
:param A: The first matrix
:param B: The second matrix
:param tol: The decimal place tolerance of the check
:return: The boolean result of the equality check
"""
if len(A) != len(B) or len(A[0]) != len(B[0]):
return False
for i in range(len(A)):
for j in range(len(A[0])):
if tol == None:
if A[i][j] != B[i][j]:
return False
else:
if round(A[i][j], tol) != round(B[i][j], tol):
return False
return True | 5,326,268 |
def about(topic):
"""Return a select function that returns whether
a paragraph contains one of the words in TOPIC.
Arguments:
topic: a list of words related to a subject
>>> about_dogs = about(['dog', 'dogs', 'pup', 'puppy'])
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup!'], about_dogs, 0)
'Cute Dog!'
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup.'], about_dogs, 1)
'Nice pup.'
"""
assert all([lower(x) == x for x in topic]), 'topics should be lowercase.'
# BEGIN PROBLEM 2
def func(p):
p = [remove_punctuation(lower(x)) for x in split(p)]
for x in topic:
for y in p:
if x == y:
return True
return False
return func
# END PROBLEM 2 | 5,326,269 |
def calcBarycentricCoords(pt, verts):
"""calculate the Barycentric coordinates"""
verts = np.array(verts) # vertices formed by N+1 nearest voxels
pt = np.array(pt) # voxel of interest
A = np.transpose(np.column_stack((verts, np.ones(verts.shape[0]))))
b = np.append(pt, 1)
return np.linalg.lstsq(A, b)[0] | 5,326,270 |
def get_masked_fastas(bed):
"""create the masked fasta files per chromosome. needed to run bl2seq.
and puts it into a dictionary seqid to path to genomic masked fasta"""
f = bed.fasta.fasta_name
fname = op.splitext(op.basename(f))[0]
d = op.dirname(f) + "/%s_split" % fname
try: os.mkdir(d)
except OSError: pass
fastas = {}
for seqid, seq in bed.mask_cds():
f = d + "/%s.fasta" % seqid
fastas[seqid] = f
if op.exists(f): continue
fh = open(f, "wb")
print >>fh, seq
fh.close()
return fastas | 5,326,271 |
def parse_regex(ctx, param, values):
"""Compile a regex if given.
:param click.Context ctx: click command context.
:param click.Parameter param: click command parameter (in this case,
``ignore_regex`` from ``-r|--ignore-regiex``).
:param list(str) values: list of regular expressions to be compiled.
:return: a list of compiled regular expressions.
.. versionchanged:: 1.1.3 parameter value (``values``) must be a
``list`` of ``str``s.
"""
if not values:
return
return [re.compile(v) for v in values] | 5,326,272 |
def FileJustRoot(fileName):
""" Gets just the root of the file name """
try:
return os.path.splitext(fileName)[0]
except:
return "" | 5,326,273 |
def generate_command(pbs_script, pbs_config, pbs_vars=None, python_exe=None):
"""Prepare a correct PBS command string"""
pbs_env_init()
# Initialisation
if pbs_vars is None:
pbs_vars = {}
# Necessary for testing
if python_exe is None:
python_exe = sys.executable
pbs_flags = []
pbs_queue = pbs_config.get('queue', 'normal')
pbs_flags.append('-q {queue}'.format(queue=pbs_queue))
pbs_project = pbs_config.get('project', os.environ['PROJECT'])
pbs_flags.append('-P {project}'.format(project=pbs_project))
pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs']
for res_key in pbs_resources:
res_flags = []
res_val = pbs_config.get(res_key)
if res_val:
res_flags.append('{key}={val}'.format(key=res_key, val=res_val))
if res_flags:
pbs_flags.append('-l {res}'.format(res=','.join(res_flags)))
# TODO: Need to pass lab.config_path somehow...
pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd()))
if pbs_jobname:
# PBSPro has a 15-character jobname limit
pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15]))
pbs_priority = pbs_config.get('priority')
if pbs_priority:
pbs_flags.append('-p {priority}'.format(priority=pbs_priority))
pbs_flags.append('-l wd')
pbs_join = pbs_config.get('join', 'n')
if pbs_join not in ('oe', 'eo', 'n'):
print('payu: error: unknown qsub IO stream join setting.')
sys.exit(-1)
else:
pbs_flags.append('-j {join}'.format(join=pbs_join))
# Append environment variables to qsub command
# TODO: Support full export of environment variables: `qsub -V`
pbs_vstring = ','.join('{0}={1}'.format(k, v)
for k, v in pbs_vars.items())
pbs_flags.append('-v ' + pbs_vstring)
storages = set()
storage_config = pbs_config.get('storage', {})
mounts = set(['/scratch', '/g/data'])
for mount in storage_config:
mounts.add(mount)
for project in storage_config[mount]:
storages.add(make_mount_string(encode_mount(mount), project))
# Append any additional qsub flags here
pbs_flags_extend = pbs_config.get('qsub_flags')
if pbs_flags_extend:
pbs_flags.append(pbs_flags_extend)
payu_path = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0]))
pbs_script = check_exe_path(payu_path, pbs_script)
# Check for storage paths that might need to be mounted in the
# python and script paths
extra_search_paths = [python_exe, payu_path, pbs_script]
laboratory_path = pbs_config.get('laboratory', None)
if laboratory_path is not None:
extra_search_paths.append(laboratory_path)
short_path = pbs_config.get('shortpath', None)
if short_path is not None:
extra_search_paths.append(short_path)
storages.update(find_mounts(extra_search_paths, mounts))
storages.update(find_mounts(get_manifest_paths(), mounts))
# Add storage flags. Note that these are sorted to get predictable
# behaviour for testing
pbs_flags_extend = '+'.join(sorted(storages))
if pbs_flags_extend:
pbs_flags.append("-l storage={}".format(pbs_flags_extend))
# Set up environment modules here for PBS.
envmod.setup()
envmod.module('load', 'pbs')
# Construct job submission command
cmd = 'qsub {flags} -- {python} {script}'.format(
flags=' '.join(pbs_flags),
python=python_exe,
script=pbs_script
)
return cmd | 5,326,274 |
def get_query_string(**kwargs):
"""
Concatenates the non-None keyword arguments to create a query string for ElasticSearch.
:return: concatenated query string or None if not arguments were given
"""
q = ['%s:%s' % (key, value) for key, value in kwargs.items() if value not in (None, '')]
return ' AND '.join(q) or None | 5,326,275 |
def check_bin(img):
"""Checks whether image has been properly binarized. NB: works on the assumption that there should be more
background pixels than element pixels.
Parameters
----------
img : np.ndarray
Description of parameter `img`.
Returns
-------
np.ndarray
A binary array of the image.
"""
img_bool = np.asarray(img, dtype=np.bool)
# Gets the unique values in the image matrix. Since it is binary, there should only be 2.
unique, counts = np.unique(img_bool, return_counts=True)
print(unique)
print("Found this many counts:")
print(len(counts))
print(counts)
# If the length of unique is not 2 then print that the image isn't a binary.
if len(unique) != 2:
print("Image is not binarized!")
hair_pixels = len(counts)
print("There is/are {} value(s) present, but there should be 2!\n".format(hair_pixels))
# If it is binarized, print out that is is and then get the amount of hair pixels to background pixels.
if counts[0] < counts[1]:
print("{} is not reversed".format(str(img)))
img = skimage.util.invert(img_bool)
print("Now {} is reversed =)".format(str(img)))
return img
else:
print("{} is already reversed".format(str(img)))
img = img_bool
print(type(img))
return img | 5,326,276 |
def add_quotes(path):
"""Return quotes if needed for spaces on path."""
quotes = '"' if ' ' in path and '"' not in path else ''
return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path) | 5,326,277 |
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
if len(actions(board)) == 0:
return True
if winner(board) is not None:
return True
return False
#raise NotImplementedError | 5,326,278 |
def login():
"""LogIn Page"""
if request.method == "GET":
return render_template("login.html")
email = request.form.get("email")
password = request.form.get("password")
remember = bool(request.form.get("remember"))
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("auth.login"))
login_user(user, remember=remember)
return redirect(url_for("main.games")) | 5,326,279 |
def apply_modifiers(membership: npt.ArrayLike, modifiers: List[str]) -> npt.ArrayLike:
"""
Apply a list of modifiers or hedges to a numpy array.
:param membership: Membership values to be modified.
:param modifiers: List of modifiers or hedges.
>>> from fuzzy_expert.operators import apply_modifiers
>>> x = [0.0, 0.25, 0.5, 0.75, 1]
>>> apply_modifiers(x, ('not', 'very'))
array([1. , 0.9375, 0.75 , 0.4375, 0. ])
"""
if modifiers is None:
return membership
fn = {
"EXTREMELY": extremely,
"INTENSIFY": intensify,
"MORE_OR_LESS": more_or_less,
"NORM": norm,
"NOT": not_,
"PLUS": plus,
"SLIGHTLY": slightly,
"SOMEWHAT": somewhat,
"VERY": very,
}
membership = membership.copy()
modifiers = list(modifiers)
modifiers.reverse()
for modifier in modifiers:
membership = fn[modifier.upper()](membership)
return membership | 5,326,280 |
def send_file_to_developers(filename):
"""
Loads a file to send data to the developers.
@param filename: file to send
@return:
"""
try:
with open(filename, "r") as f:
data = f.read()
except:
if data is None:
return # There is no file, there is no data.
send_data_to_developers(filename, data) | 5,326,281 |
def test_labels_new_label_unauthorized(client):
"""Tests adding a new label with an unauthorized key"""
response = client.post(
'/ny/panel/labels',
headers={'X-AUTH': 'WRITE'},
json={'action': 'create', 'label': 'bar', 'icon': '🔮'})
assert(response.status_code == 401) | 5,326,282 |
def unitY(m=1.0):
"""Return an unit vector on Y"""
return np.array((0, m, 0)) | 5,326,283 |
def create_suction_model(radius):
"""Create a suction model"""
hm = np.zeros((2 * radius + 1, 2 * radius + 1))
hm1 = np.tile(np.arange(-radius, radius + 1), (2 * radius + 1, 1))
hm2 = hm1.T
d = np.sqrt(hm1**2 + hm2**2)
return np.where(d < radius, 1, 0).astype(np.float64) | 5,326,284 |
def model_entrypoint(model_name):
"""Fetch a model entrypoint for specified model name
"""
return _model_entrypoints[model_name] | 5,326,285 |
def test_oais_fail_transfer(db):
"""Test the oais_fail_transfer function."""
# let's create a SIP
sip = SIP.create()
Archive.create(sip)
db.session.commit()
# we fail the transfer
oais_fail_transfer(sip.id)
assert Archive.query.count() == 1
ark = Archive.get_from_sip(sip.id)
assert ark.status == ArchiveStatus.FAILED | 5,326,286 |
def _scan_duplicates(rootpath: str, files_dict: dict):
""" Scan rootpath for duplicates. """
if not os.path.isabs(rootpath):
rootpath = os.path.abspath(rootpath)
for dirpath, _, filenames in os.walk(rootpath):
for filename in filenames:
absfilename = os.path.join(dirpath, filename)
key = get_simple_key(absfilename)
files_dict[key] = absfilename | 5,326,287 |
def encrypt(data, password):
"""Enrcrypt data and return content in binary"""
try:
cipher = AES.new(password.encode(), AES.MODE_CBC)
cypher_text_bytes = cipher.encrypt(pad(data.encode(), AES.block_size))
return b'' + cipher.iv + b':' + cypher_text_bytes
except ValueError:
print("There was an error")
raise ValueError | 5,326,288 |
def change_datetime_to_str(input_time=None, str_format="%Y-%m-%d"):
"""
:param input_time: 指定需要转换的时间, 默认当前时间
:param str_format: 字符时间的格式, 默认%Y-%m-%d
:return:
"""
spec_time = input_time or datetime.datetime.now()
return spec_time.strftime(str_format) | 5,326,289 |
def find_possible_words(word: str, dictionary: list) -> list:
"""Return all possible words from word."""
possible_words = []
first_character = word[0]
last_character = word[len(word) - 1]
for dictionary_entry in dictionary:
if (dictionary_entry.startswith(first_character) and
dictionary_entry.endswith(last_character)):
for character in dictionary_entry:
if character in word:
continue
else:
break
else:
possible_words.append(dictionary_entry)
return possible_words | 5,326,290 |
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
import numpy as np
# Batches
for i, (images, labels) in enumerate(train_loader):
#CHECK / REMOVE THIS CODE!
data_time.update(time.time() - start)
#print(len(images))
#print(labels)
# Move to default device
data = images
a = np.asarray(data)
#print(a.shape)
#a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)
#image = torch.from_numpy(a)
#image = image.permute(0,3,1,2)
#print(image.shape)
#Pre-processing:
from torchvision import transforms as transf
preprocess = transf.Compose([
transf.ToPILImage(),
transf.Resize(300),
transf.CenterCrop(300),
transf.ToTensor(),
transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
for j in range(batch_size):
if j == 0:
input_tensor = preprocess(images[j])
input_tensor = input_tensor.unsqueeze(0)
input_batch = input_tensor
else:
input_tensor = preprocess(images[j])
#print(input_tensor)
input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
input_batch = torch.cat((input_batch, input_tensor), 0)
#print("shape images: ",input_batch.shape)
# In the Active Vision Dataset we have this formatting:
# [xmin ymin xmax ymax instance_id difficulty]
""" From the Tutorial:
Since the number of objects in any given image can vary, we can't use a fixed
size tensor for storing the bounding boxes for the entire batch of N images.
Therefore, ground truth bounding boxes fed to the model must be a list of
length N, where each element of the list is a Float tensor of dimensions
N_o, 4, where N_o is the number of objects present in that particular image.
Therefore, ground truth labels fed to the model must be a list of length N,
where each element of the list is a Long tensor of dimensions N_o, where N_o
is the number of objects present in that particular image.
"""
#Prints to test
#print(j)
box_id_diff = [b for b in labels[j][0]]
box = [l[0:4] for l in box_id_diff]
#print('before:',box) #To check
#Boundary coordinates as requested
for k in range(len(box)):
box[k][0] = box[k][0]/1920.0
box[k][2] = box[k][2]/1920.0
box[k][1] = box[k][1]/1080.0
box[k][3] = box[k][3]/1080.0
#print('after:',box) #To check
box_tensor = torch.FloatTensor(box).to(device)
#Done with the parameter in AVD method
"""
#Check if there are objects in the images
if j == 0:
start = True
if len(box_tensor) > 0:
if start == True:
box_list = box_tensor
start = False
elif start == False:
box_list = [box_list, box_tensor]
#box_list = torch.cat((box_list,box_tensor),0)
else:
start = True
"""
#print(box_tensor) #To check
if j == 0:
box_list = [box_tensor]
else:
box_list.append(box_tensor)
label = [l[4] for l in box_id_diff]
label_tensor = torch.LongTensor(label).to(device)
if j == 0:
label_list = [label_tensor]
else:
label_list.append(label_tensor)
#print(box_id_diff[0][0:4])
"""
if len(box_id_diff.size())-1 != 0:
if j == 0:
box = box_id_diff[0][0:4]
print("asad:",box)
#box = box.unsqueeze(0)
boxes = box
else:
box = [l[0:4] for l in box_id_diff]
#box = box.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
boxes = torch.cat((boxes, box), 0)
print("boxes:", boxes)
"""
#box = torch.split(box_id_diff, 2)
#print(box)
"""
if not labels[j][0]:
labels = []
print("coasc")
else:
labels = [l.to(device) for l in torch.tensor(labels[j][0][4])]
"""
#print("list of boxes:",box_list)
#print("list of labels:", label_list)
images = input_batch.to(device) # (batch_size (N), 3, 300, 300)
#print(images.shape)
boxes = box_list
labels = label_list
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
#Prints to check the dimensions
#print(predicted_locs.shape) #correct
#print(predicted_scores.shape) #correct
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses))
"""
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
"""
del predicted_locs, predicted_scores, images, boxes, labels | 5,326,291 |
def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin):
"""Log-likelhood under a multidimensional Gaussian distribution with diagonal covariance.
Returns the log-likelihood for the multidim distribution.
"""
return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0) | 5,326,292 |
def _shared_galaxy_properties(config_directory, kwds, for_tests):
"""Setup properties useful for local and Docker Galaxy instances.
Most things related to paths, etc... are very different between Galaxy
modalities and many taken care of internally to the container in that mode.
But this method sets up API stuff, tool, and job stuff that can be shared.
"""
master_api_key = _get_master_api_key(kwds)
user_email = _user_email(kwds)
properties = {
'master_api_key': master_api_key,
'admin_users': "%s,test@bx.psu.edu" % user_email,
'expose_dataset_path': "True",
'cleanup_job': 'never',
'collect_outputs_from': "job_working_directory",
'allow_path_paste': "True",
'check_migrate_tools': "False",
'use_cached_dependency_manager': str(kwds.get("conda_auto_install", False)),
'brand': kwds.get("galaxy_brand", DEFAULT_GALAXY_BRAND),
'strict_cwl_validation': str(not kwds.get("non_strict_cwl", False)),
}
if kwds.get("galaxy_single_user", True):
properties['single_user'] = user_email
if for_tests:
empty_dir = os.path.join(config_directory, "empty")
_ensure_directory(empty_dir)
properties["tour_config_dir"] = empty_dir
properties["interactive_environment_plugins_directory"] = empty_dir
properties["visualization_plugins_directory"] = empty_dir
properties["refgenie_config_file"] = kwds.get('refgenie_config_file', '')
return properties | 5,326,293 |
def copyPoses(nodeA, nodeB, emptyPoseValues=True):
"""Copy poses from nodeA to nodeB with the option to be blank or node
for syncing nodes OF EQUAL LENGTH IN POSE INFO
Args:
nodeA (str): name of weightedNode
nodeB (str): name of weightedNode
emptyPoseValues (bool, optional): should the copy just be the same
number of poses but blank output value
Returns:
n/a: n/a
"""
posesIndices = pm.getAttr("{}.poses".format(nodeA), mi=True) or [None]
if len(posesIndices) == 1 and posesIndices[0] is None:
return
nodeA_poseInfo = getPoseInfo(nodeA)
drivenAttrs = getDrivenNodeAttributes(nodeB)
nodeBdrivenIndex = range(len(drivenAttrs))
for attr, value in nodeA_poseInfo.iteritems():
if value == ():
continue
numberOfPoses = len(value)
for poseIndex in range(numberOfPoses):
poseValues = value[poseIndex]
for index, pIndexValue in enumerate(poseValues):
pathToAttr = "{}.poses[{}].{}[{}]".format(nodeB,
poseIndex,
attr,
index)
if attr == "poseInput":
valueToSet = pIndexValue
elif attr == "poseValue" and emptyPoseValues:
if drivenAttrs[index] in rbf_node.SCALE_ATTRS:
valueToSet = 1.0
else:
valueToSet = 0.0
if index > nodeBdrivenIndex:
continue
pm.setAttr(pathToAttr, valueToSet) | 5,326,294 |
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client) | 5,326,295 |
def sentence_tokenize_cmd(lines: types.Lines, language: str) -> types.Sentences:
"""Tokenizes lines into sentences. Downloads nltk resources if they don't exist."""
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
line = ''
try:
for line in lines:
for sent in nltk.sent_tokenize(line, language=language):
yield sent
except Exception as e:
click.echo('Could not tokenize line into sentences "%s": %s' % (line[:min(10, len(line))], e), err=True) | 5,326,296 |
def createRegionLabeledSet(setname, entity, label, mesh, format="Exodus II"):
"""Create a labeled set region.
setname | string, name of the region
entity | string, entity (see mesh_entity.py)
label | string, label id in mesh file (note this is usually a string containing an integer)
mesh | string, mesh filename
fomat | string, format of mesh (currently only 'Exodus II' supported)
returns the region xml
"""
e = extractDoxygenXML(os.path.join(AMANZI_SRC_DIR, 'src', 'geometry', 'RegionLabeledSet.hh'))
search.replace_by_name(e, "label", label)
search.replace_by_name(e, "entity", mesh_entity.valid_mesh_entity(entity))
search.replace_by_name(e, "format", format)
search.replace_by_name(e, "mesh", mesh)
pl = parameter_list.ParameterList(setname)
pl.append(e)
return pl | 5,326,297 |
def set_up_cube(
zero_point_indices=((0, 0, 7, 7),),
num_time_points=1,
num_grid_points=16,
num_realization_points=1,
):
"""Set up a cube with equal intervals along the x and y axis."""
zero_point_indices = list(zero_point_indices)
for index, indices in enumerate(zero_point_indices):
if len(indices) == 3:
indices = (0,) + indices
zero_point_indices[index] = indices
zero_point_indices = tuple(zero_point_indices)
data = np.ones(
(num_realization_points, num_time_points, num_grid_points, num_grid_points),
dtype=np.float32,
)
for indices in zero_point_indices:
realization_index, time_index, lat_index, lon_index = indices
data[realization_index][time_index][lat_index][lon_index] = 0
cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2")
cube.add_dim_coord(
DimCoord(range(num_realization_points), standard_name="realization"), 0
)
tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian")
time_points = [402192.5 + _ for _ in range(num_time_points)]
cube.add_dim_coord(DimCoord(time_points, standard_name="time", units=tunit), 1)
step_size = 2000
y_points = np.arange(0.0, step_size * num_grid_points, step_size, dtype=np.float32)
cube.add_dim_coord(
DimCoord(
y_points,
"projection_y_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
2,
)
x_points = np.arange(
-50000.0, (step_size * num_grid_points) - 50000, step_size, dtype=np.float32
)
cube.add_dim_coord(
DimCoord(
x_points,
"projection_x_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
3,
)
return cube | 5,326,298 |
def messageBox(self, title, text, icon=QMessageBox.Information):
"""
Working on generic message box
"""
m = QMessageBox(self)
m.setWindowTitle(title)
m.setText(text)
m.setIcon(icon)
# yesButton = m.addButton('Yes', QMessageBox.ButtonRole.YesRole)
# noButton = m.addButton('No', QMessageBox.ButtonRole.NoRole)
m.setDefaultButton(QMessageBox.Ok)
m.setFont(self.font())
m.exec_()
return QMessageBox.Ok | 5,326,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.