content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def ReadExactly(from_stream, num_bytes):
"""Reads exactly num_bytes from a stream."""
pieces = []
bytes_read = 0
while bytes_read < num_bytes:
data = from_stream.read(min(MAX_READ, num_bytes - bytes_read))
bytes_read += len(data)
pieces.append(data)
return ''.join(pieces) | 5fcd6f204734779e81e7c4b9f263ad4534426278 | 23,700 |
import json
import phantom.rules as phantom
from hashlib import sha256
def indicator_collect(container=None, artifact_ids_include=None, indicator_types_include=None, indicator_types_exclude=None, indicator_tags_include=None, indicator_tags_exclude=None, **kwargs):
"""
Collect all indicators in a container and separate them by data type. Additional output data paths are created for each data type. Artifact scope is ignored.
Args:
container (CEF type: phantom container id): The current container
artifact_ids_include (CEF type: phantom artifact id): Optional parameter to only look for indicator values that occur in the artifacts with these IDs. Must be one of: json serializable list, comma separated integers, or a single integer.
indicator_types_include: Optional parameter to only include indicators with at least one of the provided types in the output. If left empty, all indicator types will be included except those that are explicitly excluded. Accepts a comma-separated list.
indicator_types_exclude: Optional parameter to exclude indicators with any of the provided types from the output. Accepts a comma-separated list.
indicator_tags_include: Optional parameter to only include indicators with at least one of the provided tags in the output. If left empty, tags will be ignored except when they are excluded. Accepts a comma-separated list.
indicator_tags_exclude: Optional parameter to exclude indicators with any of the provided tags from the output. Accepts a comma-separated list.
Returns a JSON-serializable object that implements the configured data paths:
all_indicators.*.cef_key
all_indicators.*.cef_value
all_indicators.*.data_types
all_indicators.*.artifact_id
domain.*.cef_key
domain.*.cef_value (CEF type: domain)
domain.*.artifact_id
file_name.*.cef_key (CEF type: file name)
file_name.*.cef_value (CEF type: file name)
file_name.*.artifact_id
"""
############################ Custom Code Goes Below This Line #################################
outputs = {'all_indicators': []}
def grouper(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_indicator_json(value_set):
value_list = list(value_set)
indicator_url = phantom.build_phantom_rest_url('indicator') + '?page_size=0&timerange=all'
hashed_list = [sha256(item.encode('utf-8')).hexdigest() for item in value_list]
indicator_dictionary = {}
for group in grouper(hashed_list, 100):
query_url = indicator_url + f'&_filter_value_hash__in={group}'
indicator_response = phantom.requests.get(query_url, verify=False)
indicator_json = indicator_response.json() if indicator_response.status_code == 200 else {}
for data in indicator_json.get('data', []):
indicator_dictionary[data['value_hash']] = data
return indicator_dictionary
def check_numeric_list(input_list):
return (all(isinstance(x, int) for x in input_list) or all(x.isnumeric() for x in input_list))
def is_valid_indicator(list_1=None, list_2=None, check_type="include"):
list_1 = [] if not list_1 else list_1
list_2 = [] if not list_2 else list_2
if check_type == 'exclude':
if list_1 and any(item in list_1 for item in list_2):
return False
elif check_type == 'include':
if list_1 and not any(item in list_1 for item in list_2):
return False
return True
# validate container and get ID
if isinstance(container, dict) and container['id']:
container_dict = container
container_id = container['id']
elif isinstance(container, int):
rest_container = phantom.requests.get(uri=phantom.build_phantom_rest_url('container', container), verify=False).json()
if 'id' not in rest_container:
raise RuntimeError('Failed to find container with id {container}')
container_dict = rest_container
container_id = container
else:
raise TypeError("The input 'container' is neither a container dictionary nor a valid container id, so it cannot be used")
if indicator_types_include:
indicator_types_include = [item.strip(' ') for item in indicator_types_include.split(',')]
if indicator_types_exclude:
indicator_types_exclude = [item.strip(' ') for item in indicator_types_exclude.split(',')]
if indicator_tags_include:
indicator_tags_include = [item.strip(' ').replace(' ', '_') for item in indicator_tags_include.split(',')]
if indicator_tags_exclude:
indicator_tags_exclude = [item.strip(' ').replace(' ', '_') for item in indicator_tags_exclude.split(',')]
if artifact_ids_include:
# Try to convert to a valid list
if isinstance(artifact_ids_include, str) and artifact_ids_include.startswith('[') and artifact_ids_include.endswith(']'):
artifact_ids_include = json.loads(artifact_ids_include)
elif isinstance(artifact_ids_include, str):
artifact_ids_include = artifact_ids_include.replace(' ','').split(',')
elif isinstance(artifact_ids_include, int):
artifact_ids_include = [artifact_ids_include]
# Check validity of list
if isinstance(artifact_ids_include, list) and not check_numeric_list(artifact_ids_include):
raise ValueError(
f"Invalid artifact_ids_include entered: '{artifact_ids_include}'. Must be a list of integers."
)
artifact_ids_include = [int(art_id) for art_id in artifact_ids_include]
indicator_set = set()
# fetch all artifacts in the container
container_artifact_url = phantom.build_phantom_rest_url('artifact')
container_artifact_url += f'?_filter_container={container_id}&page_size=0&include_all_cef_types'
artifacts = phantom.requests.get(container_artifact_url, verify=False).json()['data']
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
data_types = artifact['cef_types'].get(cef_key, [])
# get indicator details if valid type
if (
(
is_valid_indicator(indicator_types_exclude, data_types, check_type='exclude')
and is_valid_indicator(indicator_types_include, data_types, check_type='include')
)
and
(
isinstance(cef_value, str) or isinstance(cef_value, bool) or isinstance(cef_value, int) or isinstance(cef_value, float)
)
):
indicator_set.add(str(cef_value))
indicator_dictionary = get_indicator_json(indicator_set)
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
cef_value_hash = sha256(str(cef_value).encode('utf-8')).hexdigest()
data_types = artifact['cef_types'].get(cef_key, [])
if indicator_dictionary.get(cef_value_hash):
tags = indicator_dictionary[cef_value_hash]['tags']
if (
is_valid_indicator(indicator_tags_exclude, tags, check_type='exclude')
and is_valid_indicator(indicator_tags_include, tags, check_type='include')
):
outputs['all_indicators'].append({
'cef_key': cef_key,
'cef_value': cef_value,
'artifact_id': artifact_id,
'data_types': data_types,
'tags': tags
})
for data_type in data_types:
# outputs will have underscores instead of spaces
data_type_escaped = data_type.replace(' ', '_')
if data_type_escaped not in outputs:
outputs[data_type_escaped] = []
outputs[data_type_escaped].append(
{'cef_key': cef_key, 'cef_value': cef_value, 'artifact_id': artifact_id, 'tags': tags}
)
if outputs.get('all_indicators'):
# sort the all_indicators outputs to make them more consistent
outputs['all_indicators'].sort(key=lambda indicator: str(indicator['cef_value']))
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs | 1e7681f66231e856a9f6a264884556c44fa5b42d | 23,701 |
def remove_duplicates(iterable):
"""Removes duplicates of an iterable without meddling with the order"""
seen = set()
seen_add = seen.add # for efficiency, local variable avoids check of binds
return [x for x in iterable if not (x in seen or seen_add(x))] | d98fdf8a4be281008fa51344610e5d052aa77cae | 23,702 |
def verify_my_token(user: User = Depends(auth_user)):
"""
Verify a token, and get basic user information
"""
return {"token": get_token(user),
"email": user.email,
"is_admin": user.is_admin,
"restricted_job": user.restricted_job} | ee628ab199c7b60ee5fd79103735f6bba51e26a0 | 23,703 |
def inv_partition_spline_curve(x):
"""The inverse of partition_spline_curve()."""
c = lambda z: tf.cast(z, x.dtype)
assert_ops = [tf.Assert(tf.reduce_all(x >= 0.), [x])]
with tf.control_dependencies(assert_ops):
alpha = tf.where(
x < 8,
c(0.5) * x + tf.where(
x <= 4,
c(1.25) - tf.sqrt(c(1.5625) - x + c(.25) * tf.square(x)),
c(-1.25) + tf.sqrt(c(9.5625) - c(3) * x + c(.25) * tf.square(x))),
c(3.75) + c(0.25) * util.exp_safe(x * c(3.6) - c(28.8)))
return alpha | 815b91cff13aea862fe1681eed33ebf6497a047b | 23,704 |
def _orbit_bbox(partitions):
""" Takes a granule's partitions 'partitions' and returns the bounding box
containing all of them. Bounding box is ll, ur format
[[lon, lat], [lon, lat]]. """
lon_min = partitions[0]['lon_min']
lat_min = partitions[0]['lat_min']
lon_max = partitions[0]['lon_max']
lat_max = partitions[0]['lat_max']
for p in partitions[1:]:
if p['lon_min'] < lon_min:
lon_min = p['lon_min']
if p['lat_min'] < lat_min:
lat_min = p['lat_min']
if p['lon_max'] > lon_max:
lon_max = p['lon_max']
if p['lat_max'] > lat_max:
lat_max = p['lat_max']
return [[lon_min, lat_min], [lon_max, lat_max]] | 8e040b549cbdf9587f08a285bd6f867ae580d584 | 23,705 |
def GetModel(name: str) -> None:
"""
Returns model from model pool that coresponds
to the given name. Raises GraphicsException
if certain model cannot be found.
param name: Name of a model.
"""
if not name in _models:
raise GraphicsException(f"No such model '{name}'.")
return _models[name] | 162b7279f7491c614a72bbb9dc6bbdfd591a7c9c | 23,706 |
def db_to_dict(s_str, i = 0, d = {}):
""" Converts a dotbracket string to a dictionary of indices and their pairs
Args:
s_str -- str: secondary_structure in dotbracket notation
KWargs:
i -- int: start index
d -- dict<index1, index2>: the dictionary so far
Returns:
dictionary
"""
j = i
while j < len(s_str):
c = s_str[j]
if c == "(":
d = db_to_dict(s_str, j + 1, d)
j = d[j]
elif c == ")":
d[i - 1] = j
d[j] = i - 1
if(i != 0): return d # Don't return from the first iteration yet
else:
d[j] = None
j = j + 1
return d | 5440bc318b0b5c8a137e0a3f739031603994e89c | 23,707 |
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown" | d6c504e4edd2993a407ce36eea7688010a46c2be | 23,708 |
def pcolormesh_nan(x: np.ndarray, y: np.ndarray, c: np.ndarray, cmap=None, axis=None):
"""handles NaN in x and y by smearing last valid value in column or row out,
which doesn't affect plot because "c" will be masked too
"""
mask = np.isfinite(x) & np.isfinite(y)
top = None
bottom = None
for i, m in enumerate(mask):
good = m.nonzero()[0]
if good.size == 0:
continue
elif top is None:
top = i
else:
bottom = i
x[i, good[-1] :] = x[i, good[-1]]
y[i, good[-1] :] = y[i, good[-1]]
x[i, : good[0]] = x[i, good[0]]
y[i, : good[0]] = y[i, good[0]]
x[:top, :] = np.nanmean(x[top, :])
y[:top, :] = np.nanmean(y[top, :])
x[bottom:, :] = np.nanmean(x[bottom, :])
y[bottom:, :] = np.nanmean(y[bottom, :])
if axis is None:
axis = figure().gca()
return axis.pcolormesh(x, y, np.ma.masked_where(~mask, c), cmap=cmap) | cfd26ee1b110099220390c6771668ba1b422278a | 23,709 |
def delete_post(post_id):
"""Delete a post
:param post_id: id of the post object
:return: redirect or 404
"""
if Post.delete_post(post_id):
logger.warning('post %d has been deleted', post_id)
return redirect(url_for('.posts'))
else:
return render_template('page_not_found.html'), 404 | 0511287930d66143ee152c5f670918b73fb34250 | 23,710 |
from typing import Callable
import functools
from typing import Any
def log_arguments(func: Callable) -> Callable:
"""
decorate a function to log its arguments and result
:param func: the function to be decorated
:return: the decorator
"""
@functools.wraps(func)
def wrapper_args(*args, **kwargs) -> Any: # type: ignore
result = func(*args, **kwargs)
log_args_kwargs_results(func, result, -1, None, *args, **kwargs)
return result
return wrapper_args | a50af7d31049c0da929f649affbd51c12aa6d810 | 23,711 |
from importlib import import_module
from typing import Union
from pathlib import Path
from typing import Dict
import sys
def huggingface_from_pretrained_custom(
source: Union[Path, str], tok_config: Dict, trf_config: Dict
) -> HFObjects:
"""Create a Huggingface transformer model from pretrained weights. Will
download the model if it is not already downloaded.
source (Union[str, Path]): The name of the model or a path to it, such as
'bert-base-cased'.
tok_config (dict): Settings to pass to the tokenizer.
trf_config (dict): Settings to pass to the transformer.
"""
if hasattr(source, "absolute"):
str_path = str(source.absolute())
else:
str_path = source
try:
tokenizer = AutoTokenizer.from_pretrained(str_path, **tok_config)
except ValueError as e:
if "tokenizer_class" not in tok_config:
raise e
tokenizer_class_name = tok_config["tokenizer_class"].split(".")
tokenizer_module = import_module(".".join(tokenizer_class_name[:-1]))
tokenizer_class = getattr(tokenizer_module, tokenizer_class_name[-1])
tokenizer = tokenizer_class(vocab_file=str_path + "/vocab.txt", **tok_config)
vocab_file_contents = None
if hasattr(tokenizer, "vocab_file"):
with open(tokenizer.vocab_file, "rb") as fileh:
vocab_file_contents = fileh.read()
try:
trf_config["return_dict"] = True
config = AutoConfig.from_pretrained(str_path, **trf_config)
transformer = AutoModel.from_pretrained(str_path, config=config)
except OSError as e:
try:
transformer = AutoModel.from_pretrained(str_path, local_files_only=True)
except OSError as e2:
model_name = str(source)
print("trying to download model from huggingface hub:", model_name, "...", file=sys.stderr)
transformer = AutoModel.from_pretrained(model_name)
print("succeded", file=sys.stderr)
ops = get_current_ops()
if isinstance(ops, CupyOps):
transformer.cuda()
return HFObjects(tokenizer, transformer, vocab_file_contents) | 40a3070985fa298939a03010f0229e835f6e23c8 | 23,712 |
import sys
def recv_categorical_matrix(socket):
"""
Receives a matrix of type string from the getml engine
"""
# -------------------------------------------------------------------------
# Receive shape
# By default, numeric data sent over the socket is big endian,
# also referred to as network-byte-order!
if sys.byteorder == 'little':
shape = np.frombuffer(
socket.recv(np.nbytes[np.int32] * 2),
dtype=np.int32
).byteswap().astype(np.uint64)
size = shape[0] * shape[1]
else:
shape = np.frombuffer(
socket.recv(np.nbytes[np.int32] * 2),
dtype=np.int32
).astype(np.uint64)
size = shape[0] * shape[1]
# -------------------------------------------------------------------------
# Receive actual strings
mat = []
for i in range(size):
mat.append(recv_string(socket))
# -------------------------------------------------------------------------
# Cast as numpy.array and reshape
mat = np.asarray(mat)
return mat.reshape(shape[0], shape[1]) | d6d02ff96b2d6e1eb1303db3161155adc96c3e38 | 23,713 |
import time
def collect_gsso_dict(gsso):
""" Export gsso as a dict: keys are cls, ind, all (ie cls+ind)"""
print('Importing gsso as dict')
t0 = time.time()
gsso_cls_dict, gsso_ind_dict = _create_gsso_dict(gsso)
gsso_all_dict = _create_gsso_dict_all(gsso)
print("Executed in %s seconds." % str(time.time()-t0))
return gsso_cls_dict, gsso_ind_dict, gsso_all_dict | cdf14ae2ea6e5fe6e445d7b95a93b0df6423901c | 23,714 |
def H_squared(omega):
"""Square magnitude of the frequency filter function."""
return 1 / (
(1 + (omega * tau_a) ** 2) * (1 + (omega * tau_r) ** 2)
) * H_squared_heaviside(omega) | 60cda08d097901f679ce0fade20b062cb409bbae | 23,715 |
def get_neighbor_distances(ntw, v0, l):
"""Get distances to the nearest vertex neighbors along
connecting arcs.
Parameters
----------
ntw : spaghetti.Network
spaghetti Network object.
v0 : int
vertex id
l : dict
key is tuple (start vertex, end vertex); value is ``float``.
Cost per arc to travel, e.g. distance.
Returns
-------
neighbors : dict
key is int (vertex id); value is ``float`` (distance)
Examples
--------
>>> import spaghetti as spgh
>>> from libpysal import examples
>>> ntw = spgh.Network(examples.get_path('streets.shp'))
>>> neighs = spgh.util.get_neighbor_distances(ntw, 0, ntw.arc_lengths)
>>> neighs[1]
102.62353453439829
"""
# fetch links associated with vertices
arcs = ntw.enum_links_vertex(v0)
# create neighbor distance lookup
neighbors = {}
# iterate over each associated link
for arc in arcs:
# set distance from vertex1 to vertex2 (link length)
if arc[0] != v0:
neighbors[arc[0]] = l[arc]
else:
neighbors[arc[1]] = l[arc]
return neighbors | a7ec81a0c258a691786557e0f66e8ae17c5bbb86 | 23,716 |
from typing import Any
from typing import List
def is_generic_list(annotation: Any):
"""Checks if ANNOTATION is List[...]."""
# python<3.7 reports List in __origin__, while python>=3.7 reports list
return getattr(annotation, '__origin__', None) in (List, list) | 0ed718eed16e07c27fd5643c18a6e63dc9e38f69 | 23,717 |
from pathlib import Path
def create_folder(base_path: Path, directory: str, rtn_path=False):
""" Recursive directory creation function. Like mkdir(), but makes all intermediate-level directories needed to
contain the leaf directory
Parameters
-----------
base_path : pathlib.PosixPath
Global Path to be root of the created directory(s)
directory : str
Location in the Songbird-LFP-Paper the new directory is meant to be made
rtn_path : bool, optional
If True it returns a Path() object of the path to the Directory requested to be created
Returns
--------
location_to_save : class, (Path() from pathlib)
Path() object for the Directory requested to be created
Example
--------
# Will typically input a path using the Global Paths from paths.py
>>> create_folder('/data/')
"""
location_to_save = base_path / directory
# Recursive directory creation function
location_to_save.mkdir(parents=True, exist_ok=True)
if rtn_path:
return location_to_save.resolve() | 7c3724b009ef03fc6aa4fbc2bf9da2cbfa4c784d | 23,718 |
import sys
import subprocess
import time
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
**kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
assert not 'stderr' in kwargs
kid = Popen(args, bufsize=0,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
# Do a flush of stdout before we begin reading from the subprocess's stdout
last_flushed_at = time.time()
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
# Flush at least 10 seconds between line writes. We wait at least 10
# seconds to avoid overloading the reader that called us with output,
# which can slow busy readers down.
if (time.time() - last_flushed_at) > 10:
last_flushed_at = time.time()
stdout.flush()
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
if rv:
raise Error('failed to run command: %s' % ' '.join(args))
return 0 | 80752fe76efbe9970f390244fb561695572b96f7 | 23,719 |
import numpy
def extract_track_from_cube(nemo_cube, track_cube, time_pad, dataset_id,
nn_finder=None):
"""
Extract surface track from NEMO 2d cube
"""
# crop track time
st = ga.get_cube_datetime(nemo_cube, 0)
et = ga.get_cube_datetime(nemo_cube, -1)
# NOTE do not include start instant to have non-overlapping windows
target = ga.constrain_cube_time(
track_cube, st - time_pad, et + time_pad, include_start=False
)
def find_nearest_index(src, dst, coord_name):
src_arr = src.coord(coord_name).points
dst_arr = dst.coord(coord_name).points
time_tree = cKDTree(src_arr[:, numpy.newaxis])
d, index = time_tree.query(dst_arr[:, numpy.newaxis], k=1)
return index
if nn_finder is None:
nn_finder = NearestNeighborLatLon(nemo_cube[0, ...])
target_lat = target.coord('latitude').points
target_lon = target.coord('longitude').points
i_lat, i_lon = nn_finder.search(target_lon, target_lat)
ntime = len(nemo_cube.coord('time').points)
if ntime == 1:
i_time = numpy.zeros_like(i_lat)
else:
i_time = find_nearest_index(nemo_cube, target, 'time')
values = nemo_cube.data[i_time, i_lat, i_lon]
sname = ga.nemo_reader.map_nemo_sname_to_standard[nemo_cube.standard_name]
cube = iris.cube.Cube(values, standard_name=sname, units=nemo_cube.units)
# copy coordinates
cube.add_dim_coord(target.coord('time'), 0)
cube.add_aux_coord(target.coord('latitude'), 0)
cube.add_aux_coord(target.coord('longitude'), 0)
cube.add_aux_coord(target.coord('depth'))
for coord_name in ['time', 'latitude', 'longitude', 'depth']:
cube.coord(coord_name).attributes = {} # discard coord attributes
# add attributes
cube.attributes['location_name'] = target.attributes['location_name']
cube.attributes['dataset_id'] = dataset_id
return cube | ebe226ee7fca3507cebd2d936ef2419c2ec7413a | 23,720 |
import re
def get_mean_series_temp(log_frame: pd.DataFrame):
"""Get temperature time series as mean over CPU cores."""
columns_temp = [c for c in log_frame.columns if re.fullmatch(r"Temp:Core\d+,0", c)]
values_temp = log_frame[columns_temp].mean(axis=1)
return values_temp | 2da22c316433460a8b9f9ec53a8e6542bd6da699 | 23,721 |
def new_channel():
"""Instantiates a dict containing a template for an empty single-point
channel.
"""
return {
"channel_name": "myChannel",
"after_last": "Goto first point",
"alternate_direction": False,
"equation": "x",
"final_value": 0.0,
"optimizer_config": {
"Enabled": False,
"Initial step size": 1.0,
"Max value": 1.0,
"Min value": 0.0,
"Precision": 0.001,
"Start value": 0.5
},
"relation_parameters": [
{
"channel_name": "Step values",
"lookup": None,
"use_lookup": False,
"variable": "x"
}
],
"show_advanced": False,
"step_items": [
{
"center": 0.0,
"interp": "Linear",
"n_pts": 1,
"range_type": "Single",
"single": 1.0,
"span": 0.0,
"start": 1.0,
"step": 0.0,
"step_type": "Fixed # of pts",
"stop": 1.0,
"sweep_rate": 0.0
}
],
"step_unit": "Instrument",
"sweep_mode": "Off",
"sweep_rate_outside": 0.0,
"use_outside_sweep_rate": False,
"use_relations": False,
"wait_after": 0.0
} | af05dfda58a0e14f7448f59b057546728dbbeba7 | 23,722 |
from typing import Optional
def Log1p(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log1pVertex, label, cast_to_vertex(input_vertex)) | fddb06841e528ed7014ef75ecab3354d53e4b901 | 23,723 |
def test(request):
"""
Controller for the app home page.
"""
context = {}
return render(request, 'ueb_app/test.html', context) | 3d578e9acbcdec1467162f22d71e1c01979ed778 | 23,724 |
import os
def pre_process_flights(flights_folder):
"""
Imports and merges flight files inside input folder.
"""
df_flights = pd.DataFrame()
for flight_file in os.listdir(flights_folder):
print('Processing flight: '+flight_file)
df_flight = pd.read_csv(os.path.join(flights_folder, flight_file))
df_flight['flight_id'] = flight_file.split('.')[0]
df_flight = distance_from_touchdown(df_flight)
print(df_flight.head())
df_flights = df_flights.append(df_flight, ignore_index=True)
return df_flights | befad76520a0111a25bc86a1d79ee7a7c74174f2 | 23,725 |
def get_node_backups(request, queryset):
"""
Return dict with backups attribute.
"""
user_order_by, order_by = get_order_by(request, api_view=VmBackupList,
db_default=('-id',), user_default=('-created',))
bkps = get_pager(request, queryset.order_by(*order_by), per_page=50)
return {
'order_by': user_order_by,
'pager': bkps,
'backups': bkps,
'backups_count': bkps.paginator.count,
'backups_size': queryset.exclude(size__isnull=True).aggregate(Sum('size')).get('size__sum'),
} | 5c5c92b1221037805182efeed6da38d413aa5f16 | 23,726 |
def xpath(elt, xp, ns, default=None):
"""Run an xpath on an element and return the first result. If no results
were returned then return the default value."""
res = elt.xpath(xp, namespaces=ns)
if len(res) == 0: return default
else: return res[0] | 2252a15d621d01b58c42790622ffa66022e90dac | 23,727 |
from app.controller import Hand
def discard(hand):
"""
Given six cards, return the four to keep
"""
cut_card = {
"value": 16,
"suit": "none",
"rank": 0,
"name": "none",
"id": 'uhgfhc'
}
max_points = -1
card_ids = []
for set_of_four in permutations(hand, 4):
cards = [deck.get(c) for c in set_of_four]
hand = Hand(cards, cut_card)
try:
hand_points = hand.calculate_points()
except Exception as e:
# TODO: why does this happen??
print('Exception calculating bot points: {}'.format(e))
continue
if hand_points > max_points:
max_points = hand_points
card_ids = set_of_four
return card_ids | 8d50899bd02a1743128e8d87e74dafc8deea6a76 | 23,728 |
def check_response_stimFreeze_delays(data, **_):
""" Checks that the time difference between the visual stimulus freezing and the
response is positive and less than 100ms.
Metric: M = (stimFreeze_times - response_times)
Criterion: 0 < M < 0.100 s
Units: seconds [s]
:param data: dict of trial data with keys ('stimFreeze_times', 'response_times', 'intervals',
'choice')
"""
# Calculate the difference between stimOn and goCue times.
# If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.
metric = np.nan_to_num(data["stimFreeze_times"] - data["response_times"], nan=np.inf)
# Test for valid values
passed = ((metric < 0.1) & (metric > 0)).astype(float)
# Finally remove no_go trials (stimFreeze triggered differently in no_go trials)
# These values are ignored in calculation of proportion passed
passed[data["choice"] == 0] = np.nan
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | 9abe61acd4ce085eb6e9f7b7deb06f6a6bcb8a46 | 23,729 |
import vtool.keypoint as ktool
def get_invVR_aff2Ds(kpts, H=None):
"""
Returns matplotlib keypoint transformations (circle -> ellipse)
Example:
>>> # Test CV2 ellipse vs mine using MSER
>>> import vtool as vt
>>> import cv2
>>> import wbia.plottool as pt
>>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
>>> imgBGR = vt.imread(img_fpath)
>>> imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
>>> mser = cv2.MSER_create()
>>> regions, bboxs = mser.detectRegions(imgGray)
>>> region = regions[0]
>>> bbox = bboxs[0]
>>> vis = imgBGR.copy()
>>> vis[region.T[1], region.T[0], :] = 0
>>> hull = cv2.convexHull(region.reshape(-1, 1, 2))
>>> cv2.polylines(vis, [hull], 1, (0, 255, 0))
>>> ell = cv2.fitEllipse(region)
>>> cv2.ellipse(vis, ell, (255))
>>> ((cx, cy), (rx, ry), degrees) = ell
>>> # Convert diameter to radians
>>> rx /= 2
>>> ry /= 2
>>> # Make my version of ell
>>> theta = np.radians(degrees) # opencv lives in radians
>>> S = vt.scale_mat3x3(rx, ry)
>>> T = vt.translation_mat3x3(cx, cy)
>>> R = vt.rotation_mat3x3(theta)
>>> #R = np.eye(3)
>>> invVR = T.dot(R).dot(S)
>>> kpts = vt.flatten_invV_mats_to_kpts(np.array([invVR]))
>>> pt.imshow(vis)
>>> # MINE IS MUCH LARGER (by factor of 2)) WHY?
>>> # we start out with a unit circle not a half circle
>>> pt.draw_keypoints(pt.gca(), kpts, pts=True, ori=True, eig=True, rect=True)
"""
# invVR_mats = ktool.get_invV_mats(kpts, with_trans=True, with_ori=True)
invVR_mats = ktool.get_invVR_mats3x3(kpts)
if H is None:
invVR_aff2Ds = [mpl.transforms.Affine2D(invVR) for invVR in invVR_mats]
else:
invVR_aff2Ds = [HomographyTransform(H.dot(invVR)) for invVR in invVR_mats]
return invVR_aff2Ds | c32f2d3b833ebc7212dec95f0ead393847297be7 | 23,730 |
import sys
def is_reload(module_name: str) -> bool:
"""True if the module given by `module_name` should reload the
modules it imports. This is the case if `enable_reload()` was called
for the module before.
"""
mod = sys.modules[module_name]
return hasattr(mod, module_name.replace('.', '_') + "_DO_RELOAD_MODULE") | 76e169d6e55203c921dc09cc4c9530c1cf104516 | 23,731 |
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return string_name | 18ed37668394e40bf70110d9dd26f2a739a6e2e3 | 23,732 |
import math
import logging
def build_streambed(x_max, set_diam):
""" Build the bed particle list.
Handles calls to add_bed_particle, checks for
completness of bed and updates the x-extent
of stream when the packing exceeds/under packs
within 8mm range.
Note: the updates to x-extent are only required
when variable particle diameter is being used.
Return values:
bed_particles -- list of bed particles
bed_vertices -- list of available vertices
based on bed list
"""
max_particles = int(math.ceil( x_max / set_diam ))
bed_particles = np.zeros([max_particles, 7],dtype=float)
running_id = 0
running_pack_idx = 0
# This probably doesn't need to be a loop. NumPy!
while True:
running_id, running_pack_idx = add_bed_particle(set_diam,
bed_particles,
running_id,
running_pack_idx)
if bed_complete(running_pack_idx, x_max):
break
else: continue
# Bed packing does not always match x_max. Adjust if off
bed_max = int(math.ceil(bed_particles[running_id-1][1]
+ bed_particles[running_id-1][3]))
if x_max != bed_max:
msg = (
f'Bed packing could not match x_max parameter... Updating '
f'x_max to match packing extent: {bed_max}....'
)
logging.warning(msg)
x_max = bed_max
else: x_max = x_max
# strip zero element particles tuples from the original array
valid = ((bed_particles==0).all(axis=(1)))
bed_particles = bed_particles[~valid]
return bed_particles, x_max | 1a4093ebf31b2f19c1144c332addaf5dadad5eee | 23,733 |
def rotate_around_point_highperf_Numpy(xy, radians, origin):
"""
Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
adjust_xy = xy - origin
rotate_matrix_X = np.array( (np.cos(radians), np.sin(radians)) )
rotate_matrix_Y = np.array( (-np.sin(radians), np.cos(radians)) )
rotate_xy = origin + np.array( (sum(adjust_xy * rotate_matrix_X), sum(adjust_xy * rotate_matrix_Y)) )
return rotate_xy | 068651134692976e01530a986d6257a45939d741 | 23,734 |
def eval(cfg, env, agent):
"""
Do the evaluation of the current agent
:param cfg: configuration of the agent
:param env:
:param agent:
:return:
"""
print("========= Start to Evaluation ===========")
print("Environment:{}, Algorithm:{}".format(cfg.env, cfg.algo))
for i_episode in range(cfg.eval_eps):
temp_ep_reward = 0
state = env.reset()
while True:
action = agent.predict(state)
next_state, reward, done, _ = env.step(action)
state = next_state
temp_ep_reward += reward
if done:
break
rewards.append(temp_ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+temp_ep_reward*0.1)
else:
ma_rewards.append(temp_ep_reward)
print("Episode:{}/{} : reward:{:.1f}".format(i_episode, cfg.eval_eps, temp_ep_reward))
print("============ Evaluation Complete =================")
return rewards, ma_rewards | f0f5f2bf4eabba13fabfd782de53f8a5ef0db982 | 23,735 |
def phi(input):
"""Phi function.
:param input:
Float (scalar or array) value.
:returns:
phi(input).
"""
return 0.5 * erfc(-input/np.sqrt(2)) | fd9988c4257c82697a46bee71eb1e67aab286353 | 23,736 |
def _is_correct_task(task: str, db: dict) -> bool:
"""
Check if the current data set is compatible with the specified task.
Parameters
----------
task
Regression or classification
db
OpenML data set dictionary
Returns
-------
bool
True if the task and the data set are compatible
"""
if task == "classification":
return db['NumberOfSymbolicFeatures'] == 1 and db['NumberOfClasses'] > 0
elif task == "regression":
return True
else:
return False | 49790d8e2b7a16ee9b3ca9c8bc6054fde28b3b6f | 23,737 |
import re
def is_valid_semver(version: str) -> bool:
"""return True if a value is a valid semantic version
"""
match = re.match(r'^[0-9]+\.[0-9]+\.[0-9]+(-([0-9a-z]+(\.[0-9a-z]+)*))?$', version)
return match is not None | 811a29a497515d23169916b9d9450fed6364c966 | 23,738 |
from typing import Optional
from typing import List
async def role_assignments_for_team(
name: str, project_name: Optional[str] = None
) -> List[RoleAssignment]:
"""Gets all role assignments for a team."""
try:
return zen_store.get_role_assignments_for_team(
team_name=name, project_name=project_name
)
except KeyError as error:
raise not_found(error) from error | 3ba5336882978109e4333aead0bf8d5990a52880 | 23,739 |
def set_nested_dict_value(input_dict, key, val):
"""Uses '.' or '->'-splittable string as key and returns modified dict."""
if not isinstance(input_dict, dict):
# dangerous, just replace with dict
input_dict = {}
key = key.replace("->", ".") # make sure no -> left
split_key = key.split('.', 1)
if len(split_key) == 2:
key_prefix, key_suffix = split_key[0], split_key[1]
if key_prefix not in input_dict:
input_dict[key_prefix] = {}
input_dict[key_prefix] = set_nested_dict_value(
input_dict[key_prefix], key_suffix, val)
else: # not enough values to unpack
input_dict[key] = val
return input_dict | 2f2a160348b0c5d5fac955a8c6cec6c0ec0d5f0d | 23,740 |
from unittest.mock import Mock
def cube_1(cube_mesh):
""" Viewable cube object shifted to 3 on x """
obj = Mock()
obj.name = 'cube_1'
obj.mode = 'OBJECT'
obj.mesh_mock = cube_mesh
obj.to_mesh.return_value = cube_mesh
obj.matrix_world = Matrix.Identity(4)
obj.mesh_mock.vertices = cube_vertices(3)
obj.update_from_editmode = Mock()
obj.evaluated_get = lambda s: s
obj.visible_get.return_value = False
obj.hide_viewport = False
obj.hide_render = True
obj.children = None
return obj | 7d60199dcf41a818346e91014b4f041ab14313da | 23,741 |
def deserialize_model_fixture():
"""
Returns a deserialized version of an instance of
the Model class. This simulates the idea that a
model instance would be serialized and loaded
from disk.
"""
class Model:
def predict(self, values):
return [1]
return Model() | 946e0cc67e4cb14da9b08e6790d336126bb9e43a | 23,742 |
def _get_bfp_op(op, name, bfp_args):
"""
Create the bfp version of the operation op
This function is called when a bfp layer is defined. See BFPConv2d and BFPLinear below
"""
op_name = _get_op_name(name, **bfp_args)
if op_name not in _bfp_ops:
_bfp_ops[name] = _gen_bfp_op(op, name, bfp_args)
return _bfp_ops[name] | 27cac342cbb30159ce7d0bbda8c42df4cefea118 | 23,743 |
from typing import Sequence
def compute_dmdt(jd: Sequence, mag: Sequence, dmdt_ints_v: str = "v20200318"):
"""Compute dmdt matrix for time series (jd, mag)
See arXiv:1709.06257
:param jd:
:param mag:
:param dmdt_ints_v:
:return:
"""
jd_diff = pwd_for(jd)
mag_diff = pwd_for(mag)
dmdt, ex, ey = np.histogram2d(
jd_diff,
mag_diff,
bins=[
DMDT_INTERVALS[dmdt_ints_v]["dt_intervals"],
DMDT_INTERVALS[dmdt_ints_v]["dm_intervals"],
],
)
dmdt = np.transpose(dmdt)
norm = np.linalg.norm(dmdt)
if norm != 0.0:
dmdt /= np.linalg.norm(dmdt)
else:
dmdt = np.zeros_like(dmdt)
return dmdt | af6f7c59de8ec7b38f22f3ffa5e3d17641b9ed32 | 23,744 |
def all_bin_vecs(arr, v):
"""
create an array which holds all 2^V binary vectors
INPUT
arr positive integers from 1 to 2^V, (2^V, ) numpy array
v number of variables V
OUTPUT
edgeconfs all possible binary vectors, (2^V, V) numpy array
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(v))
strs = to_str_func(arr)
edgeconfs = np.zeros((arr.shape[0], v), dtype=np.int8)
for bit_ix in range(0, v):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
edgeconfs[:,bit_ix] = fetch_bit_func(strs)[:,0]
return edgeconfs | 1844545f85a1404a0c2bcb094e28e993e369f6df | 23,745 |
def unpack_domains(df):
"""Unpack domain codes to values.
Parameters
----------
df : DataFrame
"""
df = df.copy()
for field, domain in DOMAINS.items():
if field in df.columns:
df[field] = df[field].map(domain)
return df | 9c6c9607439aa24e944d9a8055e741ae3454d0cb | 23,746 |
from bs4 import BeautifulSoup
import codecs
import logging
import sys
def validate_saml_response(html):
"""Parse html to validate that saml a saml response was returned."""
soup = BeautifulSoup(html, "html.parser")
xml = None
for elem in soup.find_all("input", attrs={"name": "SAMLResponse"}):
saml_base64 = elem.get("value")
xml = codecs.decode(saml_base64.encode("ascii"), "base64").decode("utf-8")
if xml is None:
logging.error(
"Invalid data detected in SAML response."
" View the response with the DEBUG loglevel."
)
logging.debug(html)
sys.exit(1)
return xml | 9a9a8f753433fef78b95d3898df933a8bab287db | 23,747 |
def generate_region_info(region_params):
"""Generate the `region_params` list in the tiling parameter dict
Args:
region_params (dict):
A `dict` mapping each region-specific parameter to a list of values per FOV
Returns:
list:
The complete set of `region_params` sorted by region
"""
# define the region params list
region_params_list = []
# iterate over all the region parameters, all parameter lists are the same length
for i in range(len(region_params['region_start_row'])):
# define a dict containing all the region info for the specific FOV
region_info = {
rp: region_params[rp][i] for rp in region_params
}
# append info to region_params
region_params_list.append(region_info)
return region_params_list | aa80e1e4ea9693b362fa18a435d886a09ecff533 | 23,748 |
def is_decorator(tree, fname):
"""Test tree whether it is the decorator ``fname``.
``fname`` may be ``str`` or a predicate, see ``isx``.
References of the forms ``f``, ``foo.f`` and ``hq[f]`` are supported.
We detect:
- ``Name``, ``Attribute`` or ``Captured`` matching the given ``fname``
(non-parametric decorator), and
- ``Call`` whose ``.func`` matches the above rule (parametric decorator).
"""
return isx(tree, fname) or \
(type(tree) is Call and isx(tree.func, fname)) | f4fdd760aefae9c1be3d40cc249b242e0be65db5 | 23,749 |
def Nbspld1(t, x, k=3):
"""Same as :func:`Nbspl`, but returns the first derivative too."""
kmax = k
if kmax > len(t)-2:
raise Exception("Input error in Nbspl: require that k < len(t)-2")
t = np.array(t)
x = np.array(x)[:, np.newaxis]
N = 1.0*((x > t[:-1]) & (x <= t[1:]))
dN = np.zeros_like(N)
for k in xrange(1, kmax+1):
dt = t[k:] - t[:-k]
_dt = dt.copy()
_dt[dt != 0] = 1./dt[dt != 0]
dN = dN[:,:-1]*(x-t[:-k-1])*_dt[:-1] - dN[:,1:]*(x-t[k+1:])*_dt[1:]
dN += N[:,:-1]*_dt[:-1] - N[:,1:]*_dt[1:]
N = N[:,:-1]*(x-t[:-k-1])*_dt[:-1] - N[:,1:]*(x-t[k+1:])*_dt[1:]
return N, dN | f2535888715ec28c2b089c7f92b692b14c26bea7 | 23,750 |
def getStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName="Helvetica",
fontSize=10,
leading=12))
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=14))
stylesheet.add(ParagraphStyle(name='Bold',
parent=stylesheet['BodyText'],
fontName="Helvetica-Bold"))
return stylesheet | fcdb8cc7792254c4c7fb6a55333ad037c914b647 | 23,751 |
def parse_faq_entries(entries):
"""
Iterate through the condensed FAQ entries to expand all of the keywords and answers
"""
parsed_entries = {}
for entry in entries:
for keyword in entry["keywords"]:
if keyword not in parsed_entries:
parsed_entries[keyword] = entry["answer"]
else:
print("Error: Found duplicate keyword '{}' in pre-configured FAQ entries.".format(keyword))
exit(1)
return parsed_entries | 5258802d9384502f8a00692080cc9ae6ae7e9591 | 23,752 |
from datetime import datetime
def dh_to_dt(day_str, dh):
"""decimal hour to unix timestamp"""
# return dt.replace(tzinfo=datetime.timezone.utc).timestamp()
t0 = datetime.datetime.strptime(day_str, '%Y%m%d') - datetime.datetime(1970, 1, 1)
return datetime.datetime.strptime(day_str, '%Y%m%d') + datetime.timedelta(seconds=float(dh*3600)) | f87ec634f49400c178b6cad84f50426f67342868 | 23,753 |
def get_statistic(key, key_type, fromtime, endtime, var_names):
"""
根据key和时间戳 来获取对应小时统计的报表数据
Paramters:
key:
key_type: ip, ipc, page, user, did
timestamp:
t: 生成统计key 对应的type段, 现在默认为None是因为暂时查询key只有ip,ipc 类型的, @todo 视图函数里面扔进来
Return:
if key is None:
{ key(统计leveldb的索引中除了开头type的部分): {var_name1: , var_name2:} }
else:
{var_name1:, var_name2:}
"""
var_names_set = set(var_names)
logger.debug(DEBUG_PREFIX+ 'in get_statistic...')
try:
db_path = get_stat_db_path(fromtime)
# logger.debug(DEBUG_PREFIX+"传入的fromtime:%s, 获取的对应统计的数据库地址是: %s", fromtime, db_path)
except Exception as e:
return None
if key:
logger.debug(DEBUG_PREFIX+" 有指定特定的key")
logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key的参数key:%s, key_type:%s", str(key), str(key_type))
key = get_click_stat_key(key, key_type)
if key is None:
return None
logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key是 %s", (key,))
try:
db = get_db(db_path)
return get_key_stat(key, db, var_names_set)
except KeyError:
logger.error("db:%s don't have key: %s", db_path, key)
return None
except LevelDBError:
logger.error("db:%s 统计结果不正确", db_path)
return None
finally:
if locals().has_key('db'):
del db
else:
logger.debug(DEBUG_PREFIX+"会遍历所有的key")
# url: {var_name1: , var_name2:}
ret = dict()
# 当传入的key为空时, 来遍历所有的page维度的key, 从里面load所有的var_names
# 目前只有page维度会传入空的key
prefix = get_stat_key_prefix(key_type)
try:
db = get_db(db_path)
keys = scan_keys(prefix, db, include_value=False)
# logger.debug(DEBUG_PREFIX+"将会遍历的统计key_type:%s, prefix:%s 扫到的keys: %s", key_type, (prefix,), keys)
for key in keys:
key_stat = get_key_stat(key, db, var_names_set)
# logger.debug(DEBUG_PREFIX+"key: %s, key in ret? %s ,查询到的数据是:%s",(key,), key in ret.keys(), key_stat)
ret[key[1:]] = key_stat
except LevelDBError:
logger.error("db:%s 统计结果不正确", db_path)
return None
except Exception as e:
logger.error(e)
return None
finally:
if locals().has_key('db'):
del db
return ret
return None | 15f6e0873e5ada69e4ce319756244f6a6ced0a08 | 23,754 |
from typing import Sequence
from typing import Union
from pathlib import Path
def run(cmd: Sequence[Union[str, Path]], check=True) -> int:
"""Run arbitrary command as subprocess"""
returncode = run_subprocess(
cmd, capture_stdout=False, capture_stderr=False
).returncode
if check and returncode:
cmd_str = " ".join(str(c) for c in cmd)
raise PipxError(f"{cmd_str!r} failed")
return returncode | 985eea94264b72db88ae23ebcfdb2d7413390488 | 23,755 |
def getDict(fname):
"""Returns the dict of values of the UserComment"""
s = getEXIF(fname, COMMENT_TAG)
try:
s = s.value
except Exception: pass
return getDictFromString(s) | 9601103a03a97964b2b29379ce21e6710de6a376 | 23,756 |
from hetmatpy.degree_weight import default_dwwc_method
import inspect
import functools
import time
def path_count_cache(metric):
"""
Decorator to apply caching to the DWWC and DWPC functions from
hetmatpy.degree_weight.
"""
def decorator(user_function):
signature = inspect.signature(user_function)
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
bound_args = signature.bind(*args, **kwargs)
bound_args.apply_defaults()
arguments = bound_args.arguments
graph = arguments["graph"]
metapath = graph.metagraph.get_metapath(arguments["metapath"])
arguments["metapath"] = metapath
damping = arguments["damping"]
cached_result = None
start = time.perf_counter()
supports_cache = (
isinstance(graph, hetmatpy.hetmat.HetMat) and graph.path_counts_cache
)
if supports_cache:
cache_key = {"metapath": metapath, "metric": metric, "damping": damping}
cached_result = graph.path_counts_cache.get(**cache_key)
if cached_result:
row_names, col_names, matrix = cached_result
matrix = sparsify_or_densify(matrix, arguments["dense_threshold"])
matrix = matrix.astype(arguments["dtype"])
if cached_result is None:
if arguments["dwwc_method"] is None:
# import default_dwwc_method here to avoid circular dependencies
arguments["dwwc_method"] = default_dwwc_method
row_names, col_names, matrix = user_function(**arguments)
if supports_cache:
runtime = time.perf_counter() - start
graph.path_counts_cache.set(**cache_key, matrix=matrix, runtime=runtime)
return row_names, col_names, matrix
return wrapper
return decorator | 0872b15d52fef0289a72d87632c95a676291dffb | 23,757 |
from typing import Mapping
from typing import Set
import tqdm
def get_metabolite_mapping() -> Mapping[str, Set[Reference]]:
"""Make the metabolite mapping."""
metabolites_df = get_metabolite_df()
smpdb_id_to_metabolites = defaultdict(set)
for pathway_id, metabolite_id, metabolite_name in tqdm(metabolites_df.values, desc='mapping metabolites'):
smpdb_id_to_metabolites[pathway_id].add(Reference(
prefix=PREFIX, identifier=metabolite_id, name=metabolite_name,
))
return smpdb_id_to_metabolites | ceca1f2bfc993249d9424abec0c5e67b1d456af4 | 23,758 |
def has_merge_conflict(commit: str, target_branch: str, remote: str = 'origin') -> bool:
""" Returns true if the given commit hash has a merge conflict with the given target branch.
"""
try:
# Always remove the temporary worktree. It's possible that we got
# interrupted and left it around. This will raise an exception if the
# worktree doesn't exist, which can be safely ignored.
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null())
except GitError:
pass
git('worktree', 'add', '.git/temp-worktree', f'{remote}/{target_branch}', '--detach',
stdout=get_dev_null(), stderr=get_dev_null())
try:
git('merge', '--no-commit', commit,
git_dir='.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null())
return False
except GitError:
return True
finally:
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null()) | 2136f1b60201bd33c3e854ed4df372e0196ea62f | 23,759 |
import os
def create_folder():
"""Creates a temp_folder on the users desktop"""
new_folder_path = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\\temp_folder')
try:
if not os.path.exists(new_folder_path):
os.makedirs(new_folder_path)
except OSError:
print("Error: Creating directory: " + new_folder_path)
return new_folder_path | 6e241891b415649902d522dc336d4f75970284c3 | 23,760 |
def load_csr(data):
"""
Loads a PEM X.509 CSR.
"""
return x509.load_pem_x509_csr(data, default_backend()) | edf07190243d7990d2782df240044572243f770b | 23,761 |
def parseIMACS(hdul):
"""
Parses information from a given HDU, for data produced at IMACS
"""
start = hdul[0].header['CRVAL1']
step = hdul[0].header['CDELT1']
total = hdul[0].header['NAXIS1']
corr = (hdul[0].header['CRPIX1'] - 1) * step
wave = np.arange(start - corr, start + total*step - corr, step)
wave = np.reshape(wave, (1, wave.shape[0]))
flux = np.reshape(hdul[0].data, (1, hdul[0].data.shape[0]))
error = flux * .1
return (wave, flux, error) | 35d45a5842977d71375eaa9d07df6051d45ed075 | 23,762 |
def boll_cross_func_jit(data:np.ndarray,) -> np.ndarray:
"""
布林线和K线金叉死叉 状态分析 Numba JIT优化
idx: 0 == open
1 == high
2 == low
3 == close
"""
BBANDS = TA_BBANDS(data[:,3], timeperiod=20, nbdevup=2)
return ret_boll_cross | 8fc68429f5ea94e462327fa57926742161d49911 | 23,763 |
from cuml.linear_model import LogisticRegression
def rank_genes_groups(
X,
labels, # louvain results
var_names,
groups=None,
reference='rest',
n_genes=100,
**kwds,
):
"""
Rank genes for characterizing groups.
Parameters
----------
X : cupy.ndarray of shape (n_cells, n_genes)
The cellxgene matrix to rank genes
labels : cudf.Series of size (n_cells,)
Observations groupings to consider
var_names : cudf.Series of size (n_genes,)
Names of genes in X
groups : Iterable[str] (default: 'all')
Subset of groups, e.g. ['g1', 'g2', 'g3'], to which comparison
shall be restricted, or 'all' (default), for all groups.
reference : str (default: 'rest')
If 'rest', compare each group to the union of the rest of the group.
If a group identifier, compare with respect to this group.
n_genes : int (default: 100)
The number of genes that appear in the returned tables.
"""
#### Wherever we see "adata.obs[groupby], we should just replace w/ the groups"
# for clarity, rename variable
if groups == 'all':
groups_order = 'all'
elif isinstance(groups, (str, int)):
raise ValueError('Specify a sequence of groups')
else:
groups_order = list(groups)
if isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (
reference != 'rest'
and reference not in set(labels.cat.categories)
):
cats = labels.cat.categories.tolist()
raise ValueError(
f'reference = {reference} needs to be one of groupby = {cats}.'
)
groups_order, groups_masks = select_groups(labels, groups_order)
original_reference = reference
n_vars = len(var_names)
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
n_groups = groups_masks.shape[0]
ns = cp.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = cp.where(mask)[0].size
if reference != 'rest':
ireference = cp.where(groups_order == reference)[0][0]
reference_indices = cp.arange(n_vars, dtype=int)
rankings_gene_scores = []
rankings_gene_names = []
# Perform LogReg
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
reference = groups_order[0]
if len(groups) == 1:
raise Exception('Cannot perform logistic regression on a single cluster.')
grouping_mask = labels.astype('int').isin(cudf.Series(groups_order).astype('int'))
grouping = labels.loc[grouping_mask]
X = X[grouping_mask.values, :] # Indexing with a series causes issues, possibly segfault
y = labels.loc[grouping]
clf = LogisticRegression(**kwds)
clf.fit(X.get(), grouping.to_array().astype('float32'))
scores_all = cp.array(clf.coef_).T
for igroup, group in enumerate(groups_order):
if len(groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:]
partial_indices = cp.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device
rankings_gene_names.append(var_names[global_indices].to_pandas())
if len(groups_order) <= 2:
break
groups_order_save = [str(g) for g in groups_order]
if (len(groups) == 2):
groups_order_save = [g for g in groups_order if g != reference]
scores = np.rec.fromarrays(
[n for n in rankings_gene_scores],
dtype=[(rn, 'float32') for rn in groups_order_save],
)
names = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save],
)
return scores, names, original_reference | bd2230d2be098677f62a46becd766edcc1fea36f | 23,764 |
def init_graph_handler():
"""Init GraphHandler."""
graph = get_graph_proto()
graph_handler = GraphHandler()
graph_handler.put({graph.name: graph})
return graph_handler | 66b7f9d0b30c435fc3e6fe1152b24d663c31ac6e | 23,765 |
def add_average_column(df, *, copy: bool = False):
"""Add a column averaging the power on all channels.
Parameters
----------
%(df_psd)s
An 'avg' column is added averaging the power on all channels.
%(copy)s
Returns
-------
%(df_psd)s
The average power across channels has been added in the column 'avg'.
"""
_check_type(copy, (bool,), item_name="copy")
df = df.copy() if copy else df
ch_names = [
col
for col in df.columns
if col not in ("participant", "session", "run", "phase", "idx")
]
df["avg"] = df[ch_names].mean(axis=1)
return df | 0ff995d660ba71bd42ea7ae886b79631e3bd4509 | 23,766 |
import fnmatch
def _is_globbed(name, glob):
"""
Return true if given name matches the glob list.
"""
if not glob:
return True
return any((fnmatch.fnmatchcase(name, i) for i in glob)) | 305116367884c8acc9c6f52a73c2cb116abaadbe | 23,767 |
import struct
def read_vec_flt(file_or_fd):
"""[flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
Raises
------
ValueError
Unsupported data-type of the input file.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2)
if binary == b"\0B": # binary flag
# Data type,
type = fd.read(3)
if type == b"FV ":
sample_size = 4 # floats
if type == b"DV ":
sample_size = 8 # doubles
assert sample_size > 0
# Dimension,
assert fd.read(1) == b"\4" # int-size
vec_size = struct.unpack("<i", fd.read(4))[0] # vector dim
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4:
ans = np.frombuffer(buf, dtype="float32")
elif sample_size == 8:
ans = np.frombuffer(buf, dtype="float64")
else:
raise ValueError("BadSampleSize")
return ans
else: # ascii,
arr = (binary + fd.readline()).strip().split()
try:
arr.remove("[")
arr.remove("]") # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd:
fd.close() # cleanup
return ans | f12218f029e18a91666b99e9994ba29d67d62d5a | 23,768 |
def arg_export(name):
"""Export an argument set."""
def _wrapper(func):
_ARG_EXPORTS[name] = func
if 'arg_defs' not in dir(func):
func.arg_defs = []
return func
return _wrapper | a713b22a7fffda50f8a9581362d8fd5ca807cef3 | 23,769 |
from typing import OrderedDict
def get_od_base( mode = "H+S & B3LYP+TPSS0"): # od is OrderedDict()
"""
initial parameters are prepared.
mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S
"H+S & B3LYP" --> ["B3LYP"] with speration of H and S
"H+S & TPSSO" --> ["TPSS0"] with speration of H and S
"""
if mode == "H+S&B3LYP+TPSS0":
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + mode + ".csv"
else:
raise ValueError("Not supported: {}".format( mode))
return od, aod | 6a7aa100d8d244d9a0606a08188153e95a0df44b | 23,770 |
import itertools
def generate_all_specs(
population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs
):
"""
Generate all combinations of population, treatment,
outcome, causal model and estimator
"""
causal_graph = CausalGraph(treatment_specs, outcome_specs, model_specs)
model_specs = causal_graph.create_gml_model_specs()
specs = itertools.product(
population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs
)
return [spec for spec in specs if is_valid_spec(spec)] | 8180bd19d87b69d346edc4fd4442430e0c951873 | 23,771 |
def wsFoc(r,psi,L1,z0,alpha):
"""Return optimum focal surface height at radius r
as given by Chase & Van Speybroeck
"""
return .0625*(psi+1)*(r**2*L1/z0**2)/tan(alpha)**2 | 90076856f2fbef0cea3d662d1789d8392e9b19e0 | 23,772 |
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, draw_function=draw_lines, **kwargs):
"""
`img` should be the output of a Canny transform.
draw_function: Which which accepts image & line to render lanes. Default: draw_lines()
Returns an image with hough lines drawn.
"""
rho = max(rho, 1)
lines = cv2.HoughLinesP(
img,
rho,
theta,
threshold,
np.array([]),
minLineLength=min_line_len,
maxLineGap=max_line_gap,
)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_function(line_img, lines, **kwargs)
return line_img | 586de545e1ad51c5495047f5d513b17ca2f7e369 | 23,773 |
from .models import Topography
def instances_to_topographies(topographies, surfaces, tags):
"""Returns a queryset of topographies, based on given instances
Given topographies, surfaces and tags are resolved and
all topographies are returned which are either
- explicitly given
- given indirectly by a surface
- given indirectly by a tag, if the topography is tagged accordingly
- given indirectly by a tag, if its surface is tagged accordingly
Parameters
----------
topographies: sequence of topographies
surfaces: sequence of surfaces
tags: sequence of tags
Returns
-------
Queryset of topography, distinct
"""
topography_ids = [topo.id for topo in topographies]
surface_ids = [s.id for s in surfaces]
tag_ids = [tag.id for tag in tags]
topographies = Topography.objects.filter(id__in=topography_ids)
topographies |= Topography.objects.filter(surface__in=surface_ids)
topographies |= Topography.objects.filter(surface__tags__in=tag_ids)
topographies |= Topography.objects.filter(tags__in=tag_ids)
return topographies.distinct().order_by('id') | a5d94de84046a7218f92fb3f75320b7f78bde446 | 23,774 |
from typing import Optional
from typing import Union
from typing import Tuple
import math
def plot_histogram(
s: pd.Series,
*,
number_bins: Optional[int] = None,
bin_range: Union[Tuple[int, int], Tuple[int, int]] = None,
figsize: Optional[Tuple[int, int]] = (8, 6),
bin_width: Optional[int] = None,
edgecolor: Optional[str] = '#ffffff',
linewidth: Optional[int] = 1,
bin_label_bool: Optional[bool] = False,
color: Optional[str] = '#0077bb'
) -> Tuple[plt.Figure, axes.Axes]:
"""
Parameters
----------
s : pd.Series
The input series.
number_bins : Optional[int] = None
The number of equal-width bins in the range s.max() - s.min().
bin_range : Union[Tuple[int, int],Tuple[int, int]] = None,
The lower and upper range of the bins. If not provided, range is
(s.min(), s.max()).
figsize : Optional[Tuple[int, int]] = (8, 6),
The figure size width, height (inch).
bin_width : Optional[int] = None,
The width of the bin in same units as the series s.
edgecolor : Optional[str] = '#ffffff',
The hexadecimal color value for the bar edges.
linewidth : Optional[int] = 1,
The bar edges line width (point).
bin_label_bool : Optional[bool] = False
If True, label the bars with count and percentage of total.
color : Optional[str] = '#0077bb'
The color of the bar faces.
Returns
-------
fig, ax : Tuple[plt.Figure, axes.Axes]
Examples
--------
Example 1
# Create a series of random floats, normal distribution,
# with the default parameters.
>>> import datasense as ds
>>> s = ds.random_data()
>>> fig, ax = ds.plot_histogram(s=s)
Example 2
# Create a series of random integers, integer distribution, size = 113,
# min = 0, max = 13.
>>> import datasense as ds
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=14
>>> )
>>> fig, ax = ds.plot_histogram(s=s)
Example 3
# Create a series of random integers, integer distribution, size = 113,
# min = 0, max = 13.
# Set histogram parameters to control bin width.
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=14
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=1
)
Example 4
# Create a series of random integers, integer distribution, size = 113,
# min = 0, hight = 14,
# Set histogram parameters to control bin width and plotting range.
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=1,
>>> bin_range=(0, 10)
>>> )
Example 5
# Create a series of random floats, size = 113,
# average = 69, standard deviation = 13.
# Set histogram parameters to control bin width and plotting range.
>>> s = ds.random_data(
>>> distribution='norm',
>>> size=113,
>>> loc=69,
>>> scale=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=5,
>>> bin_range=(30, 110)
>>> )
Example 6
# Create a series of random floats, size = 113,
# average = 69, standard deviation = 13.
# Set histogram parameters to control bin width, plotting range, labels.
# Set colour of the bars.
>>> s = ds.random_data(
>>> distribution='norm',
>>> size=113,
>>> loc=69,
>>> scale=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=5,
>>> bin_range=(30, 110),
>>> figsize=(10,8),
>>> bin_label_bool=True,
>>> color='#33bbee'
>>> )
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
if bin_width and not bin_range:
x = (s.max() - s.min()) / bin_width
number_bins = math.ceil(x)
elif bin_width and bin_range:
number_bins = int((bin_range[1] - bin_range[0]) / bin_width)
bin_range = bin_range
counts, bins, patches = ax.hist(
x=s,
bins=number_bins,
range=bin_range,
edgecolor=edgecolor,
linewidth=linewidth,
color=color
)
if bin_label_bool:
ax.set_xticks(bins)
ax.xaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
bin_centers = 0.5 * np.diff(bins) + bins[:-1]
for count, x in zip(counts, bin_centers):
ax.annotate(
text=f'{str(int(count))}',
xy=(x, 0),
xytext=(0, -18),
xycoords=(
'data',
'axes fraction'
),
textcoords='offset points',
va='top',
ha='center'
)
percent = f'{(100 * float(count) / counts.sum()):0.0f} %'
ax.annotate(
text=percent,
xy=(x, 0),
xytext=(0, -32),
xycoords=(
'data',
'axes fraction'
),
textcoords='offset points',
va='top',
ha='center'
)
return (fig, ax) | 3f51a9abbf8dde862e18bb21b82f89c34dd6a536 | 23,775 |
def get_tracks():
"""
Returns all tracks on the minerva DB
"""
# connect to the database
db = connect_minerva_db()
# return all the tracks as a list
tracks = list(db.tracks.find())
return tracks | 65eedeaf32f448a6c32f8c77476dbea6b55a55b0 | 23,776 |
def mult_pair(pair):
"""Return the product of two, potentially large, numbers."""
return pair[0]*pair[1] | b616a0fb706eec5ca8723aa05c273ece079a2350 | 23,777 |
def get_only_filename(file_list):
"""
Get filename from file's path and return list that has only filename.
Input:
file_list: List. file's paths list.
Attribute:
file_name: String. "01.jpg"
file_name_without_ext: String. "01"
Return:
filename_list: Only filename list.
"""
filename_list = list()
for file_path in file_list:
file_name = file_path.split("/")[-1]
file_name_without_ext = file_name.split(".")[0]
filename_list.append(file_name_without_ext)
return filename_list | 3b9b202a4320825eba9d32170f527c0de6e1bdc6 | 23,778 |
import _ctypes
def simple_calculate_hmac(sym_key, message,
digest_algo=DIGEST_ALGORITHM.SHA256):
"""Calculates a HMAC of given message using symmetric key."""
message_param = _get_char_param_nullify_if_zero(message)
mac = _ctypes.POINTER(_ctypes.c_char)()
mac_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_hmac(digest_algo.value, sym_key,
message_param, len(message),
_ctypes.byref(mac),
_ctypes.byref(mac_length))
mac_bytes = mac[:mac_length.value]
_lib.yaca_free(mac)
return mac_bytes | 242f703d062366828f6980d90901a1b803fc426a | 23,779 |
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data | 88361b30137ce9ca646e49b1865d79b65f2693aa | 23,780 |
import pwd
import sys
import tempfile
import os
import filecmp
import subprocess
def write_file(conf, data):
"""Write the data to the file specified in the conf.
If there is an existing file in the destination, compare the new
contents with the existing contents. Return True if there is a difference.
"""
owner = conf.get('owner')
# Check for user and group id in the environment.
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
LOG.error('The specified user does not exist: {}'.format(owner))
sys.exit(1)
try:
gid = pwd.getpwnam(owner).pw_gid
except KeyError:
LOG.error('The specified group does not exist: {}'.format(owner))
sys.exit(1)
dest = conf.get('dest')
perm = int(conf.get('perm', 0))
with tempfile.NamedTemporaryFile(prefix='kolla-mesos',
delete=False) as tf:
tf.write(data.encode('utf-8'))
tf.flush()
tf_name = tf.name
if os.path.exists(dest) and filecmp.cmp(tf_name, dest, shallow=False):
LOG.debug('write_file: %s not changed', dest)
return False
try:
inst_cmd = ' '.join(['sudo', 'install', '-v',
'--no-target-directory',
'--group=%s' % gid, '--mode=%s' % perm,
'--owner=%s' % uid, tf_name, dest])
subprocess.check_call(inst_cmd, shell=True)
except subprocess.CalledProcessError as exc:
LOG.error(exc)
LOG.exception(inst_cmd)
return True | 1d69d6cd2ee2f802ae0af9db66227895551e9816 | 23,781 |
def stat_helper(path):
"""os.path.exists will return None for PermissionError (or any other
exception) , leading us to believe a file is not present when it, in fact,
is. This is behavior is awful, so stat_helper preserves any exception
other than FileNotFoundError.
"""
try:
return path.stat()
except FileNotFoundError:
return None | 32e0863489ca19b55203d31b141d837189655cc2 | 23,782 |
from typing import Tuple
from datetime import datetime
def create_beacon_and_now_datetime(
game_name: str = "st",
waiting_time: float = 12.0,
platform_name: str = "pc"
) -> Tuple[beacons.BeaconBase, datetime.datetime]:
"""Return a BeaconBase instance with start time to current time."""
now = datetime.datetime.now(datetime.timezone.utc) \
.replace(microsecond=0)
beacon = create_beacon(game_name=game_name,
waiting_time=waiting_time,
platform_name=platform_name,
start=now)
return beacon, now | cff66d951e2b488a0c8ecd61f6ce5bdbeddae4f7 | 23,783 |
def validate(number, check_country=True):
"""Checks to see if the number provided is a valid IBAN. The country-
specific check can be disabled with the check_country argument."""
number = compact(number)
# ensure that checksum is valid
mod_97_10.validate(number[4:] + number[:4])
# look up the number
info = _ibandb.info(number)
# check if the bban part of number has the correct structure
bban = number[4:]
if not _struct_to_re(info[0][1].get('bban', '')).match(bban):
raise InvalidFormat()
# check the country-specific module if it exists
if check_country:
module = _get_cc_module(number[:2])
if module:
module.validate(number)
# return the compact representation
return number | 55ee5423ff025ab9e4332d099e5c2d7b695163dd | 23,784 |
from numpy import array
def read_group(fname):
"""Reads the symmetry group in from the 'rot_perms' styled group
output by enum.x.
:arg fname: path to the file to read the group from.
"""
i=0
groupi = []
with open(fname) as f:
for line in f:
if i > 5:
if ('Perm #:') in line:
groupi.append(list(map(int, line.split()[4::])))
else:
groupi[-1] += list(map(int, line.split()))
i += 1
return(list(map(list, array(groupi)-1))) | 7971781ae157c94329c638d4afd51a871b39498f | 23,785 |
import os
import sys
def inject_path(path):
"""
Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function
is called the module is reloaded so that you can alter your debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised)
"""
try:
dirname = os.path.dirname(path)
if dirname not in sys.path:
exists_in_sys = False
sys.path.append(dirname)
else:
exists_in_sys = True
module_name = os.path.splitext(os.path.split(path)[1])[0]
if module_name in sys.modules:
reload(sys.modules[module_name])
else:
__import__(module_name)
if not exists_in_sys:
sys.path.remove(dirname)
except Exception as e:
return e | e34f8bd53c20f25b362661e87e68e72a77bfcc12 | 23,786 |
def find_last_layer(model):
"""
Find last layer.
Args:
model (_type_): Model.
Returns:
_type_: Last layer.
"""
for layer in reversed(model.layers):
return layer | ff82705e4a74d7ad15b3d0e3e030c340b49052ca | 23,787 |
def seconds_to_time(sec):
"""
Convert seconds into time H:M:S
"""
return "%02d:%02d" % divmod(sec, 60) | 5fe639a9a6ade59258dfb2b3df8426c7e79d19fa | 23,788 |
def _compute_nfp_real(l, u, counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1]) | 40abf796ce116a92cd89c813a6343c917e413707 | 23,789 |
from typing import Union
from typing import Dict
from typing import List
from typing import Set
def is_equal_subset(
subset: Union[Dict, List, Set], superset: Union[Dict, List, Set]
) -> bool:
"""determine if all shared keys have equal value"""
if isinstance(subset, dict):
return all(
key in superset and is_equal_subset(val, superset[key])
for key, val in subset.items()
)
if isinstance(subset, list) or isinstance(subset, set):
return all(
any(is_equal_subset(subitem, superitem) for superitem in superset)
for subitem in subset
)
# assume that subset is a plain value if none of the above match
return subset == superset | 4c2edbc73c350783d795ee0aa6e12180642e205c | 23,790 |
import copy
import itertools
def concatenate_over(argname):
"""Decorator to "vectorize" functions and concatenate outputs
"""
def _prepare_args(arg_map, value):
params = copy(arg_map)
params[argname] = value
return params
@decorator
def _concatenate_over(func, *args, **kwargs):
"""Validate that an agument is a proportion [0, 1.0]
"""
arg_map = map_parameters_in_fn_call(args, kwargs, func)
value = arg_map.get(argname)
if isinstance(value, list):
return list(
itertools.chain.from_iterable(
func(**_prepare_args(arg_map, v)) for v in value))
else:
return func(**arg_map)
return _concatenate_over | 8bdf286566409bc6f8f97e06f5800e495afbc042 | 23,791 |
import os
def checkFile(path: str):
"""
Checks if a file exists, exists program if not readable
Only used if a file needs to exist
"""
if not os.path.exists(path):
print('File: "' + path + '", is not readable.')
exit(0)
return path | 9ca57e541ecf9579dc6e24ab05f48ea46ec029e9 | 23,792 |
import ctypes
def logicalToPhysicalPoint(window, x, y):
"""Converts the logical coordinates of a point in a window to physical coordinates.
This should be used when points are received directly from a window that is not DPI aware.
@param window: The window handle.
@param x: The logical x coordinate.
@type x: int
@param y: The logical y coordinate.
@type y: int
@return: The physical x and y coordinates.
@rtype: tuple of (int, int)
"""
if not _logicalToPhysicalPoint:
return x, y
point = ctypes.wintypes.POINT(x, y)
_logicalToPhysicalPoint(window, ctypes.byref(point))
return point.x, point.y | 81aeadcef460ffe1be64a69e64b562cea5dc94d6 | 23,793 |
import numba
def _node2vec_walks(Tdata, Tindptr, Tindices,
sampling_nodes,
walklen,
return_weight,
neighbor_weight):
"""
Create biased random walks from the transition matrix of a graph
in CSR sparse format. Bias method comes from Node2Vec paper.
Parameters
----------
Tdata : 1d np.array
CSR data vector from a sparse matrix. Can be accessed by M.data
Tindptr : 1d np.array
CSR index pointer vector from a sparse matrix.
Can be accessed by M.indptr
Tindices : 1d np.array
CSR column vector from a sparse matrix.
Can be accessed by M.indices
sampling_nodes : 1d np.array of int
List of node IDs to start random walks from.
Is generally equal to np.arange(n_nodes) repeated for each epoch
walklen : int
length of the random walks
return_weight : float in (0, inf]
Weight on the probability of returning to node coming from
Having this higher tends the walks to be
more like a Breadth-First Search.
Having this very high (> 2) makes search very local.
Equal to the inverse of p in the Node2Vec paper.
explore_weight : float in (0, inf]
Weight on the probability of visitng a neighbor node
to the one we're coming from in the random walk
Having this higher tends the walks to be
more like a Depth-First Search.
Having this very high makes search more outward.
Having this very low makes search very local.
Equal to the inverse of q in the Node2Vec paper.
Returns
-------
out : 2d np.array (n_walks, walklen)
A matrix where each row is a biased random walk,
and each entry is the ID of the node
"""
n_walks = len(sampling_nodes)
res = np.empty((n_walks, walklen), dtype=Tindices.dtype)
for i in numba.prange(n_walks):
# Current node (each element is one walk's state)
state = sampling_nodes[i]
res[i, 0] = state
# Do one normal step first
state = _node2vec_first_step(state, Tdata, Tindices, Tindptr)
for k in range(1, walklen-1):
# Write state
res[i, k] = state
state = _node2vec_inner(
res, i, k, state,
Tdata, Tindices, Tindptr,
return_weight, neighbor_weight
)
# Write final states
res[i, -1] = state
return res | 1a6ec24c62168f905a22809fc411036ab9f83b57 | 23,794 |
def schedule_prettify(schedule):
"""
Принимает на вход расписание в формате:
[День недели, Время, Тип занятия, Наименование занятия, Имя преподавателя, Место проведения]
Например: ['Чт', '13:00 – 14:30', 'ПЗ', 'Физическая культура', '', 'Кафедра']
"""
if not schedule:
return 'Сегодня занятий нету'
else:
bot_message = ''
time = '⌚ ' + schedule[1] + '\n'
if schedule[2]:
schedule_type = schedule[2]
else:
schedule_type = ''
if schedule[3]:
subject = '📝 ' + schedule[-3] + '\n'
else:
subject = '📝 ' + schedule_type + '\n'
if schedule[4]:
teacher = '👤 ' + schedule[4] + '\n'
else:
teacher = ''
if schedule[5]:
location = '📍 ' + schedule[5] + '\n'
else:
location = ''
bot_message += teacher + subject + time + location + '\n'
return bot_message | 868469b99bb68ec407f6861e12d063bcd6b56236 | 23,795 |
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
return notfound()
else:
return notfound()
return internal | 8ea5f555c3b102fc1830a4c616bd71f2dbf98ce4 | 23,796 |
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
# Scale factor such that maximal dimension is at most max_dimension
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# if this factor is less than 1 we have to act!
# Scale factor such that minimal dimension is at least min_dimension
orig_min_dim = min(orig_height, orig_width)
large_scale_factor = min_dimension / float(orig_min_dim)
# If image is already big enough... do nothing
large_scale_factor = max(large_scale_factor, 1.0)
# Take the minimum (we ensure that maxdim is not exceeded and if possible min_dim is met also)
scale_factor = min(small_scale_factor, large_scale_factor)
new_height = int(round(orig_height * scale_factor))
new_width = int(round(orig_width * scale_factor))
new_size = [new_height, new_width]
return tf.constant(new_size + [num_channels]) | 1cc3a3465f69a8c799ccc529ba95efba0319fdf0 | 23,797 |
def deepupdate(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for key, value in original.items():
if key not in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update | fc06aded11a674a0c5815a6f365ff790506af362 | 23,798 |
import itertools
def _omega_spectrum_odd_c(n, field):
"""Spectra of groups \Omega_{2n+1}(q) for odd q.
[1, Corollary 6]
"""
n = (n - 1) // 2
q = field.order
p = field.char
# (1)
t = (q ** n - 1) // 2
a1 = [t, t + 1]
# (2)
a2 = SemisimpleElements(q, n, min_length=2)
# (3)
k = 1
a3 = []
while True:
n1 = n - (p ** (k - 1) + 1) // 2
if n1 < 1:
break
t = (q ** n1 - 1) // 2
a3.extend([t * p ** k, (t + 1) * p ** k])
k += 1
# (4)
a4 = MixedElements(q, n,
lambda k: (p ** (k - 1) + 1) // 2,
lambda k: p ** k, min_length=2)
# (5)
k = numeric.get_exponent(2 * n - 1, p)
a5 = [] if k is None else [p * (2 * n - 1)]
return itertools.chain(a1, a2, a3, a4, a5) | 02512e8368ce1ef048ce0bfa8380ff7e102cdff7 | 23,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.