content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_mat_2d(sequence, rnn=False):
"""Uses aa_to_map to turn a sequence into a 3D array representation of the
protein.
"""
if rnn:
mat = np.zeros((len(sequence), 36))
for i, aa in enumerate(sequence):
mat[i] = aa_to_map(aa)[:,:6,:].flatten()
else:
mat = np.zeros((2*len(sequence), 8, 6))
for i, aa in enumerate(sequence):
mat[2*i] = aa_to_map(aa)
return mat | 122eb7f890995eb4d95226251bb5f2c9a4ba38df | 27,000 |
from datetime import datetime
def TimeSec():
"""[Takes current time in and convert into seconds.]
Returns:
[float]: [Time in seconds]
"""
now = datetime.now()
return now.second+(now.minute*60)+(now.hour*60*60) | 58892b89feb05a56c27d4fd62ba174f9d1c09591 | 27,001 |
def partition_variable(variable, partition_dict):
"""
As partition_shape() but takes a mapping of dimension-name to number
of partitions as it's second argument. <variable> is a VariableWrapper
instance.
"""
partitions = []
for dim in variable.dimensions:
if dim.name in partition_dict:
partitions.append(partition_dict[dim.name])
else:
partitions.append(1)
return partition_shape(variable.shape, partitions) | 7ffd4075bbb6bbd156f76c9271101003c5db8c1e | 27,002 |
def _get_object_properties(agent,
properties,
obj_type,
obj_property_name,
obj_property_value,
include_mors=False):
"""
Helper method to simplify retrieving of properties
This method is used by the '*.get' vPoller Worker methods and is
meant for collecting properties for a single managed object.
We first search for the object with property name and value,
then create a list view for this object and
finally collect it's properties.
Args:
agent (VConnector): A VConnector instance
properties (list): List of properties to be collected
obj_type pyVmomi.vim.*): Type of vSphere managed object
obj_property_name (str): Property name used for searching for the object
obj_property_value (str): Property value identifying the object in question
Returns:
The collected properties for this managed object in JSON format
"""
logger.info(
'[%s] Retrieving properties for %s managed object of type %s',
agent.host,
obj_property_value,
obj_type.__name__
)
# Find the Managed Object reference for the requested object
try:
obj = agent.get_object_by_property(
property_name=obj_property_name,
property_value=obj_property_value,
obj_type=obj_type
)
except Exception as e:
return {'success': 1, 'msg': 'Cannot collect properties: {}'.format(e.message)}
if not obj:
return {
'success': 1,
'msg': 'Cannot find object {}'.format(obj_property_value)
}
# Create a list view for this object and collect properties
view_ref = agent.get_list_view(obj=[obj])
try:
data = agent.collect_properties(
view_ref=view_ref,
obj_type=obj_type,
path_set=properties,
include_mors=include_mors
)
except Exception as e:
return {'success': 1, 'msg': 'Cannot collect properties: {}'.format(e.message)}
view_ref.DestroyView()
result = {
'success': 0,
'msg': 'Successfully retrieved object properties',
'result': data,
}
return result | d2e43bcc1700e76a7ca117eaf419d1c5ef941975 | 27,003 |
def get_crypto_currency_pairs(info=None):
"""Gets a list of all the cypto currencies that you can trade
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
"""
url = urls.crypto_currency_pairs()
data = helper.request_get(url, 'results')
return(helper.filter(data, info)) | b44a013d6bbf348321c4f00006262e1ab02e0459 | 27,004 |
def sparse_graph_convolution_layers(name, inputs, units, reuse=True):
"""
This one is used by the Joint_SMRGCN model;
A crude prototypical operation
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE if reuse else False):
# adj_tensor: list (size nb_bonds) of [length, length] matrices
adj_tensor, hidden_tensor, node_tensor = inputs
annotations = hidden_tensor if hidden_tensor is not None else node_tensor
input_dim = annotations.get_shape().as_list()[-1]
nb_bonds = len(adj_tensor)
output = []
for i in range(nb_bonds):
msg_bond = linear('lt_bond_%d' % (i + 1), input_dim, units,
annotations, biases=False, variables_on_cpu=False)
output.append(tf.sparse_tensor_dense_matmul(adj_tensor[i], msg_bond))
output = tf.add_n(output) / nb_bonds
# self-connection \approx residual connection
output = output + linear('self-connect', input_dim, units, annotations, variables_on_cpu=False)
return output | c5c16242fb175e851a78a34249c2f902bd2e9cb4 | 27,005 |
import click
import sys
import logging
import os
def main(argv):
"""
The main function to invoke the powerfulseal cli
"""
args = parse_args(args=argv)
if args.mode is None:
return parse_args(['--help'])
##########################################################################
# VALIDATE POLICY MODE
##########################################################################
if args.mode == 'validate':
policy = PolicyRunner.load_file(args.policy_file)
if PolicyRunner.is_policy_valid(policy):
return print('OK')
print("Policy not valid. See log output above.")
return sys.exit(1)
##########################################################################
# LOGGING
##########################################################################
# this is to calm down the flask stdout
# calm down the workzeug
logging.getLogger("werkzeug").setLevel(logging.ERROR)
try:
def echo(*args, **kwargs):
pass
click.echo = echo
click.secho = echo
except:
pass
# parse the verbosity flags
if args.silent == 1:
log_level = logging.WARNING
elif args.silent == 2:
log_level = logging.ERROR
elif not args.verbose:
log_level = logging.INFO
else:
log_level = logging.DEBUG
server_log_handler = ServerStateLogHandler()
server_log_handler.setLevel(log_level)
# do a basic config with the server log handler
logging.basicConfig(level=log_level, handlers=[server_log_handler])
# this installs a stdout handler by default to the root
coloredlogs.install(
level=log_level,
fmt='%(asctime)s %(levelname)s %(name)s %(message)s'
)
# the main cli handler
logger = makeLogger(__name__)
logger.setLevel(log_level)
logger.info("verbosity: %s; log level: %s; handler level: %s", args.verbose, logging.getLevelName(logger.getEffectiveLevel()), logging.getLevelName(log_level) )
##########################################################################
# KUBERNETES
##########################################################################
kube_config = parse_kubeconfig(args)
k8s_client = K8sClient(kube_config=kube_config)
operation_mode = args.execution_mode
# backwards compatibility
if args.use_pod_delete_instead_of_ssh_kill:
operation_mode = "kubernetes"
k8s_inventory = K8sInventory(
k8s_client=k8s_client,
)
##########################################################################
# CLOUD DRIVER
##########################################################################
if args.openstack:
logger.info("Building OpenStack driver")
driver = OpenStackDriver(
cloud=args.openstack_cloud_name,
)
elif args.aws:
logger.info("Building AWS driver")
driver = AWSDriver()
elif args.azure:
logger.info("Building Azure driver")
driver = AzureDriver(
cluster_rg_name=args.azure_resource_group_name,
cluster_node_rg_name=args.azure_node_resource_group_name,
)
elif args.gcp:
logger.info("Building GCP driver")
driver = GCPDriver(config=args.gcp_config_file)
else:
logger.info("No cloud driver - some functionality disabled")
driver = NoCloudDriver()
##########################################################################
# INVENTORY
##########################################################################
if args.inventory_file:
logger.info("Reading inventory from %s", args.inventory_file)
groups_to_restrict_to = read_inventory_file_to_dict(
args.inventory_file
)
else:
logger.debug("Attempting to read the inventory from kubernetes")
groups_to_restrict_to = k8s_client.get_nodes_groups()
logger.debug("Restricting inventory to %s" % groups_to_restrict_to)
inventory = NodeInventory(
driver=driver,
restrict_to_groups=groups_to_restrict_to,
)
inventory.sync()
##########################################################################
# SSH EXECUTOR
##########################################################################
if operation_mode == "kubernetes":
executor = KubernetesExecutor(
k8s_client=k8s_client,
)
else:
if args.use_private_ip:
logger.info("Using each node's private IP address")
if args.override_ssh_host:
logger.info("Using each overriten host: %s", args.override_ssh_host)
executor = SSHExecutor(
user=args.remote_user,
ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
ssh_path_to_private_key=args.ssh_path_to_private_key,
override_host=args.override_ssh_host,
ssh_password=args.ssh_password,
use_private_ip=args.use_private_ip,
ssh_kill_command=args.ssh_kill_command,
)
##########################################################################
# INTERACTIVE MODE
##########################################################################
if args.mode == 'interactive':
# create a command parser
cmd = PSCmd(
inventory=inventory,
driver=driver,
executor=executor,
k8s_inventory=k8s_inventory,
)
logger.info("STARTING INTERACTIVE MODE")
while True:
try:
cmd.cmdloop()
except GeneratorExit:
print("Exiting")
sys.exit(0)
except KeyboardInterrupt:
print()
print("Ctrl-c again to quit")
try:
input()
except KeyboardInterrupt:
sys.exit(0)
return
##########################################################################
# METRICS
##########################################################################
metric_collector = StdoutCollector()
if args.prometheus_collector:
flask_debug = os.environ.get("FLASK_DEBUG")
flask_env = os.environ.get("FLASK_ENVIROMENT")
if flask_debug is not None or (flask_env is not None and flask_env != "production"):
logger.error("PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER")
else:
logger.info("Starting prometheus metrics server (%s:%s)", args.prometheus_host, args.prometheus_port)
start_http_server(args.prometheus_port, args.prometheus_host)
metric_collector = PrometheusCollector()
elif args.datadog_collector:
logger.info("Starting datadog collector")
metric_collector = DatadogCollector()
else:
logger.info("Using stdout metrics collector")
##########################################################################
# AUTONOMOUS MODE
##########################################################################
if args.mode == 'autonomous':
runner = PolicyRunner(args.policy_file, k8s_client, logger)
# run the metrics server if requested
if not args.headless:
# start the server
logger.info("Starting the UI server (%s:%s)", args.host, args.port)
start_server(
host=args.host,
port=args.port,
read_policy_fn=runner.read_policy,
accept_proxy_headers=args.accept_proxy_headers,
logger=server_log_handler,
)
else:
logger.info("NOT starting the UI server")
logger.info("STARTING AUTONOMOUS MODE")
success = runner.run(
inventory,
k8s_inventory,
driver,
executor,
metric_collector=metric_collector
)
if not success:
logger.error("Policy runner finishes with an error")
return sys.exit(1)
return sys.exit(0)
##########################################################################
# LABEL MODE
##########################################################################
elif args.mode == 'label':
label_runner = LabelRunner(
inventory,
k8s_inventory,
driver,
executor,
min_seconds_between_runs=args.min_seconds_between_runs,
max_seconds_between_runs=args.max_seconds_between_runs,
namespace=args.kubernetes_namespace,
metric_collector=metric_collector,
)
logger.info("STARTING LABEL MODE")
label_runner.run() | 76e38fb72b4ea96c8d4e62ed0359045e770df03f | 27,006 |
def update_or_create_tags(observer, repo, tag=None, type_to_update=None):
"""Create or update tags."""
observer.update_state(
state='PROGRESS',
meta='Retrieving data and media from Github'
)
git = GithubAPI(repo)
if tag:
data, media = git.get_data(tag)
if type_to_update == "ssot":
populate_media(observer, media, tag)
populate_data(observer, data, tag)
populate_index(observer, tag, type_to_update)
observer.update_state(
state='SUCCESS',
meta='All tasks complete'
)
return True | 3f478c66cda9648cb72325e9668ea08b52147fbf | 27,007 |
def confirm_api_access_changes(request):
"""Renders the confirmation page to confirm the successful changes made to
the API access settings for the superuser's group.
Parameters:
request - The request object sent with the call to the confirm page if
the requested changes were successfully made to the API
access settings.
"""
product_name = request.user.get_profile().api_access_data.product_name
return render_to_response('confirm_api_access_changes.html',
{'product_name': product_name},
context_instance=RequestContext(request)) | b31ce15ec72607200edd66e72df99b5ad9cb4afc | 27,008 |
def mpl_hill_shade(data, terrain=None,
cmap=DEF_CMAP, vmin=None, vmax=None, norm=None, blend_function=rgb_blending,
azimuth=DEF_AZIMUTH, elevation=DEF_ELEVATION):
""" Hill shading that uses the matplotlib intensities. Is only for making comparison between
blending methods where we need to include the matplotlib hill shading. For all other
plots we can use the combined_intensities function that is used in the regular hill_shade()
"""
if terrain is None:
terrain = data
assert data.ndim == 2, "data must be 2 dimensional"
assert terrain.shape == data.shape, "{} != {}".format(terrain.shape, data.shape)
norm_intensities = mpl_surface_intensity(terrain, azimuth=azimuth, elevation=elevation)
rgba = color_data(data, cmap=cmap, vmin=vmin, vmax=vmax, norm=norm)
return blend_function(rgba, norm_intensities) | 6a881794b486e581f817bf073c7cbef465d8d504 | 27,009 |
def img_docs(filename='paths.ini', section='PATHS'):
"""
Serve the PATH to the img docs directory
"""
parser = ConfigParser()
parser.read(filename)
docs = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
docs[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return docs['path_to_img_db'] | c76cc9ae17fb5dd8cfc6387349b6f47545fe01ad | 27,010 |
def KGPhenio(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "current", **kwargs
) -> Graph:
"""Return kg-phenio graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "current"
Version to retrieve
The available versions are:
- 20220304
- 20220414
- 20220428
- 20220429
- 20220504
- 20220506
- 20220511
- 20220513
- 20220516
- 20220525
- 20220601
- 20220606
- current
"""
return AutomaticallyRetrievedGraph(
"KGPhenio", version, "kghub", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | e4a3647013c0b250e29007c0db08a6d6813c8976 | 27,011 |
def _x_mul(a, b, digit=0):
"""
Grade school multiplication, ignoring the signs.
Returns the absolute value of the product, or None if error.
"""
size_a = a.numdigits()
size_b = b.numdigits()
if a is b:
# Efficient squaring per HAC, Algorithm 14.16:
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
# Gives slightly less than a 2x speedup when a == b,
# via exploiting that each entry in the multiplication
# pyramid appears twice (except for the size_a squares).
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
while i < size_a:
f = a.widedigit(i)
pz = i << 1
pa = i + 1
carry = z.widedigit(pz) + f * f
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
assert carry <= MASK
# Now f is added in twice in each column of the
# pyramid it appears. Same as adding f<<1 once.
f <<= 1
while pa < size_a:
carry += z.widedigit(pz) + a.widedigit(pa) * f
pa += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
carry += z.widedigit(pz)
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
assert (carry >> SHIFT) == 0
i += 1
z._normalize()
return z
elif digit:
if digit & (digit - 1) == 0:
return b.lqshift(ptwotable[digit])
# Even if it's not power of two it can still be useful.
return _muladd1(b, digit)
# a is not b
# use the following identity to reduce the number of operations
# a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
size_a1 = UDIGIT_TYPE(size_a - 1)
size_b1 = UDIGIT_TYPE(size_b - 1)
while i < size_a1:
f0 = a.widedigit(i)
f1 = a.widedigit(i + 1)
pz = i
carry = z.widedigit(pz) + b.widedigit(0) * f0
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j = UDIGIT_TYPE(0)
while j < size_b1:
# this operation does not overflow using
# SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it
# carry and z.widedigit(pz) are less than 2**(B - 1);
# b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so
# carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 +
# b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1
carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \
b.widedigit(j) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j += 1
# carry < 2**(B + 1) - 2
carry += z.widedigit(pz) + b.widedigit(size_b1) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
# carry < 4
if carry:
z.setdigit(pz, carry)
assert (carry >> SHIFT) == 0
i += 2
if size_a & 1:
pz = size_a1
f = a.widedigit(pz)
pb = 0
carry = _widen_digit(0)
while pb < size_b:
carry += z.widedigit(pz) + b.widedigit(pb) * f
pb += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
z._normalize()
return z | 84dc948c03106ce26d9b4abf67c57b5d13438ef1 | 27,012 |
import re
def server_version(headers):
"""Extract the firmware version from HTTP headers."""
version_re = re.compile(r"ServerTech-AWS/v(?P<version>\d+\.\d+\w+)")
if headers.get("Server"):
match = version_re.match(headers["Server"])
if match:
return match.group("version") | 24151f3898430f5395e69b4dd7c42bd678626381 | 27,013 |
import os
def init_test_env(setting_path, output_path, file_list,fname_list):
"""
create test environment, the file list would be saved into output_path/reg/test/file_path_list.txt,
a corresponding auto-parsed filename list would also be saved in output/path/reg/test/file_name_list.txt
:param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)
:param output_path: the output path of the task
:param image_path_list: the image list, each item refers to the abstract path of the image
:param l_path_list:optional, the label of image list, each item refers to the abstract path of the image
:return: tuple of ParameterDict, datapro (optional) and tsk_set
"""
dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')
assert os.path.isfile(tsm_json_path), "task setting not exists"
dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
file_num = len(file_list)
os.makedirs(os.path.join(output_path, 'seg/test'), exist_ok=True)
os.makedirs(os.path.join(output_path, 'seg/res'), exist_ok=True)
file_txt_path = os.path.join(output_path, 'seg/test/file_path_list.txt')
fn_txt_path = os.path.join(output_path, 'seg/test/file_name_list.txt')
has_label = len(file_list[0])==2
if fname_list is None:
if has_label:
fname_list = [get_file_name(file_list[i][0]) for i in range(file_num)]
else:
fname_list = [get_file_name(file_list[i]) for i in range(file_num)]
write_list_into_txt(file_txt_path, file_list)
write_list_into_txt(fn_txt_path, fname_list)
data_task_name = 'seg'
cur_task_name = 'res'
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = output_path
dm.data_par['datapro']['dataset']['task_name'] = data_task_name
tsm.task_par['tsk_set']['task_name'] = cur_task_name
tsm.task_par['tsk_set']['output_root_path'] = os.path.join(output_path, data_task_name)
return dm, tsm | 132dfec46799c1395fefd0992c473cd52c8bc8ef | 27,014 |
def slice_constant(data, batch_size=32, name='constant_data', global_step=None):
"""Provide a slice based on the global_step.
This is useful when the entire data array can be stored in memory because it
allows you to feed the data very efficiently.
Args:
data: A numpy array or tensor.
batch_size: The batch size for the produced data.
name: An optional name for this data.
global_step: A global step variable that is used to read the data. If None
then the default prettytensor global_step is used.
Returns:
A tensor that produces the given data.
"""
with tf.name_scope(name):
all_data = tf.convert_to_tensor(data)
global_step = global_step or bookkeeper.global_step()
count = len(data) / batch_size
extra = len(data) - count * batch_size
if extra:
offset = tf.mod(global_step, count)
return tf.slice(all_data, offset * batch_size, batch_size)
else:
offset = tf.mod(global_step, count + 1)
return tf.slice(all_data, offset * batch_size,
tf.where(tf.equal(offset, count), extra, batch_size)) | 53ebf9a6216841a4a4db8c2d77bd9545328454ac | 27,015 |
def es_subcadena(adn1, adn2):
"""
(str, str) -> bool
>>> es_subcadena('gatc', 'tta')
False
>>> es_subcadena('gtattt', 'atcgta')
False
:param:adn1:str:primera cadena a comparar
:param:adn2:str:segunda cadena a comparar
:return:bool:verificacion si una es subcadena de la otra
"""
if adn2 in adn1:
return True
else:
return False | 9c3605e74e1c9dbf227695a4f0f6431cc845a5f1 | 27,016 |
def get_labels_and_features(nested_embeddings):
""" returns labels and embeddings
"""
x = nested_embeddings[:,:-1]
y = nested_embeddings[:,-1]
return x,y | 302505bd3aa769570fa602760f7da1ddd017e940 | 27,017 |
def all_(f : a >> bool, t : r(a)) -> bool:
"""
all :: Foldable r => (a -> bool) -> r a -> bool
Determines whether all elements of the structure satisfy the predicate.
"""
return DL.all_(toList(t)) | ec19ae23b282affd99580b9614edaec8a8a2fd44 | 27,018 |
def log_binom_sum(lower, upper, obs_vote, n0_curr, n1_curr, b_1_curr, b_2_curr, prev):
"""
Helper function for computing log prob of convolution of binomial
"""
# votes_within_group_count is y_0i in Wakefield's notation, the count of votes from
# given group for given candidate within precinct i (unobserved)
votes_within_group_count = tt.arange(lower, upper)
component_for_current_precinct = pm.math.logsumexp(
pm.Binomial.dist(n0_curr, b_1_curr).logp(votes_within_group_count)
+ pm.Binomial.dist(n1_curr, b_2_curr).logp(obs_vote - votes_within_group_count)
)[0]
return prev + component_for_current_precinct | 2a2d80671b594d2c56d6db5dc770833d5d8aa129 | 27,019 |
def parse_locator(src):
""" (src:str) -> [pathfile:str, label:either(str, None)]
"""
pathfile_label = src.split('#')
if len(pathfile_label)==1:
pathfile_label.append(None)
if len(pathfile_label)!=2:
raise ValueError('Malformed src: %s' % (src))
return pathfile_label | 970bc1e2e60eec4a54cd00fc5984d22ebc2b8c7a | 27,020 |
def detect_seperator(path, encoding):
"""
:param path: pathlib.Path objects
:param encoding: file encoding.
:return: 1 character.
"""
# After reviewing the logic in the CSV sniffer, I concluded that all it
# really does is to look for a non-text character. As the separator is
# determined by the first line, which almost always is a line of headers,
# the text characters will be utf-8,16 or ascii letters plus white space.
# This leaves the characters ,;:| and \t as potential separators, with one
# exception: files that use whitespace as separator. My logic is therefore
# to (1) find the set of characters that intersect with ',;:|\t' which in
# practice is a single character, unless (2) it is empty whereby it must
# be whitespace.
text = ""
for line in path.open('r', encoding=encoding): # pick the first line only.
text = line
break
seps = {',', '\t', ';', ':', '|'}.intersection(text)
if not seps:
if " " in text:
return " "
else:
raise ValueError("separator not detected")
if len(seps) == 1:
return seps.pop()
else:
frq = [(text.count(i), i) for i in seps]
frq.sort(reverse=True) # most frequent first.
return frq[0][-1] | 8436359a602d2b8caf72a6dbdac4870c502d1bad | 27,021 |
def pr_at_k(df, k):
"""
Returns p/r for a specific result at a specific k
df: pandas df with columns 'space', 'time', 'y_true', and 'y_pred'
k: the number of obs you'd like to label 1 at each time
"""
#static traits of df
universe = df['time'].nunique()
p = df['y_true'].sum()
#needs to be sorted by (time, y_pred)
#group by time and find the num tp in the top k
tp = df.groupby('time').pipe(tp_group, k)
fp = (universe*k) - tp
precision = tp/(tp+fp)
recall = tp/p
return precision, recall | 79997405f360fa66c4e0cbe35d54a15976cc6e3b | 27,022 |
def draw_segm(im, np_segms, np_label, np_score, labels, threshold=0.5, alpha=0.7):
"""
Draw segmentation on image.
"""
mask_color_id = 0
w_ratio = .4
color_list = get_color_map_list(len(labels))
im = np.array(im).astype('float32')
clsid2color = {}
np_segms = np_segms.astype(np.uint8)
for i in range(np_segms.shape[0]):
mask, score, clsid = np_segms[i], np_score[i], np_label[i] + 1
if score < threshold:
continue
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
im[idx[0], idx[1], :] *= 1.0 - alpha
im[idx[0], idx[1], :] += alpha * color_mask
sum_x = np.sum(mask, axis=0)
x = np.where(sum_x > 0.5)[0]
sum_y = np.sum(mask, axis=1)
y = np.where(sum_y > 0.5)[0]
x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
cv2.rectangle(im, (x0, y0), (x1, y1), tuple(color_mask.astype('int32').tolist()), 1)
bbox_text = '%s %.2f' % (labels[clsid], score)
t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
cv2.rectangle(im, (x0, y0), (x0 + t_size[0], y0 - t_size[1] - 3), tuple(color_mask.astype('int32').tolist()),
-1)
cv2.putText(im, bbox_text, (x0, y0 - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), 1, lineType=cv2.LINE_AA)
return Image.fromarray(im.astype('uint8')) | f2248256a0be01efb1e9402d1e51e04a5fde365d | 27,023 |
def solve(board):
"""
solve a sudoku board using backtracking
param board: 2d list of integers
return: solution
"""
space_found = find_empty_space(board)
if not space_found:
return True
else:
row, col = space_found
for i in range(1, 10):
if valid_number(board, i, (row, col)):
board[row][col] = i
if solve(board):
return True
board[row][col] = 0
return False | 5b94db3ba4873c0fc5e91355dab6c59ed0603fa0 | 27,024 |
from typing import List
from typing import Optional
def check(s: str) -> None:
"""
Checks if the given input string of brackets are balanced or not
Args:
s (str): The input string
"""
stack: List[str] = []
def get_opening(char: str) -> Optional[str]:
"""
Gets the corresponding opening braces of the given input character.
Args:
char (str): The closing braces
Returns:
str: The corresponding open braces.
"""
if char == ")":
return "("
if char == "]":
return "["
if char == "}":
return "{"
return None
# for every character in the given input string
for char in s:
# if the string is an opening brace, push to stack
if char in ("(", "{", "["):
stack.append(char)
else:
try:
# if the top element of the stack is the same as
# the corresponding opening bracket of the current
# character, pop the element
if get_opening(char) == stack[-1]:
stack.pop()
# else, the input string is unbalanced, break out of the
# loop
else:
break
except IndexError:
break
else:
# if the loop terminated normally, and stack is empty, print success message
if len(stack) == 0:
print("Balanced.")
# else print unsuccessful message
else:
print("Not balanced.")
return
# since at this point the loop terminated abnormally,
# print unsuccessful message
print("Not balanced.") | 720018e5b39e070f48e18c502e8a842feef32840 | 27,025 |
def format_address(msisdn):
"""
Format a normalized MSISDN as a URI that ParlayX will accept.
"""
if not msisdn.startswith('+'):
raise ValueError('Only international format addresses are supported')
return 'tel:' + msisdn[1:] | f5a5cc9f8bcf77f1185003cfd523d7d6f1212bd8 | 27,026 |
def get_nag_statistics(nag):
"""Return a report containing all NAG statistics"""
report = """Constants: {0}
Inputs: {1}
NANDs: {2}
Outputs: {3}
Min. I/O distance: {4}
Max. I/O distance: {5}""".format(
nag.constant_number,
nag.input_number,
nag.nand_number,
nag.output_number,
nag.input_to_output_min_distance,
nag.input_to_output_max_distance)
return report | 44d3f32bc0b05d8b1d81c3b32dc140af4fd20aa0 | 27,027 |
def svm_loss_naive(w, x, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on mini-batches of N examples.
:param w: A numpy array of shape (D, C) containing weights.
:param x: A numpy array of shape (N, D) containing a mini-batch of data.
:param y: A numpy array of shape (N,) containing training labels;
y[i] = c means that X[i] has label c, where 0 <= c < C.
:param reg: (float) regularization strength
:return: loss (single float), gradient (an array of same shape as W)
"""
dw = np.zeros(w.shape) # initialize the gradient as zero
n_classes = w.shape[1]
n_train = x.shape[0]
loss = 0.
# compute loss & gradient
for i in range(n_train):
scores = x[i].dot(w)
correct_scores = scores[y[i]]
for j in range(n_classes):
if j == y[i]:
continue
margin = scores[j] - correct_scores + 1.
if margin > 0.:
loss += margin
# update gradient
dw[:, j] += x[i]
dw[:, y[i]] -= x[i]
loss /= n_train
dw /= n_train
# L2 regularization
w_reg = .5 * reg * float(np.tensordot(w, w, axes=((0, 1), (0, 1))))
dw_reg = reg * w # differential of w_reg by w
# add reg to the loss and gradient
loss += w_reg
dw += dw_reg
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dw | c115c61d1f384b187a031a13fbb96bf9dce69cfc | 27,028 |
def create_backup(storage, remote, parent=None):
""" Create a new backup of provided remote and return its backup object.
.. warning:: Do not forget to add a label on returned backup to avoid its
removal by the garbage collector.
"""
if parent:
parent_ref = storage.resolve(parent)
parent_backup = storage.get_backup(parent_ref)
if parent_backup:
parent_root = storage.get_tree(parent_backup.root)
else:
parent_ref = None
parent_root = None
backup = Backup(parent=parent_ref)
with backup, remote:
backup.errors, backup.stats, backup.root = walk_and_ingest_remote(remote, storage, parent=parent_root)
ref, size, stored_size = storage.ingest(backup)
return ref, backup | 7c4f5b424d6c48474fce74396eb6e47d0935f559 | 27,029 |
def get_pairwise_correlation(population_df, method="pearson"):
"""Given a population dataframe, calculate all pairwise correlations.
Parameters
----------
population_df : pandas.core.frame.DataFrame
Includes metadata and observation features.
method : str, default "pearson"
Which correlation matrix to use to test cutoff.
Returns
-------
list of str
Features to exclude from the population_df.
"""
# Check that the input method is supported
method = check_correlation_method(method)
# Get a symmetrical correlation matrix
data_cor_df = population_df.corr(method=method)
# Create a copy of the dataframe to generate upper triangle of zeros
data_cor_natri_df = data_cor_df.copy()
# Replace upper triangle in correlation matrix with NaN
data_cor_natri_df = data_cor_natri_df.where(
np.tril(np.ones(data_cor_natri_df.shape), k=-1).astype(np.bool)
)
# Acquire pairwise correlations in a long format
# Note that we are using the NaN upper triangle DataFrame
pairwise_df = data_cor_natri_df.stack().reset_index()
pairwise_df.columns = ["pair_a", "pair_b", "correlation"]
return data_cor_df, pairwise_df | 85f1df4357f9996492bac6053a2f0852b2318f14 | 27,030 |
import os
def get_game_raw_pbp_filename(season, game):
"""
Returns the filename of the raw pbp folder
:param season: int, current season
:param game: int, game
:return: str, /scrape/data/raw/pbp/[season]/[game].zlib
"""
return os.path.join(organization.get_season_raw_pbp_folder(season), str(game) + '.zlib') | 111008b9e845b7e05960edcaf57ee3c155d7e2e9 | 27,031 |
def plot_route(cities, route, name='diagram.png', ax=None):
"""Plot a graphical representation of the route obtained"""
mpl.rcParams['agg.path.chunksize'] = 10000
if not ax:
fig = plt.figure(figsize=(5, 5), frameon = False)
axis = fig.add_axes([0,0,1,1])
axis.set_aspect('equal', adjustable='datalim')
plt.axis('off')
axis.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
axis.plot(route['x'], route['y'], color='purple', linewidth=1)
plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200)
plt.close()
else:
ax.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
ax.plot(route['x'], route['y'], color='purple', linewidth=1)
return ax | b8ceadb0a26e6f8c2eacea66ede9db948d73ca65 | 27,032 |
import math
def bertScore(string):
"""
Function to generate the output list consisting top K replacements for each word in the sentence using BERT.
"""
corrector = SpellCorrector()
temp1 = []
temp2 = []
temp3 = []
con = list(string.split(" "))
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="bert")
for word in con:
possible_states = corrector.edit_candidates(word, fast=False)
if len(possible_states) == 1:
word = possible_states[0]
if word in possible_states:
temp1.append([word])
continue
text = string
text_mask = text.replace(word, "**mask**")
print(text_mask)
cls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="cls")
replaced_masks = [
text_mask.replace("**mask**", state) for state in possible_states
]
# print(replaced_masks)
val = math.ceil(len(replaced_masks) / 5)
m = 0
n = 5
for i in range(0, val):
rep_new = replaced_masks[m:n]
tokens = tokenizer.tokenize(rep_new[0])
input_ids = [maskedId(tokens, i) for i in range(len(tokens))]
tokens_ids = tokenizer.convert_tokens_to_ids(tokens)
ids = [generateId(mask) for mask in rep_new]
tokens, input_ids, tokens_ids = list(zip(*ids))
indices, ids = [], []
for i in range(len(input_ids)):
indices.extend([i] * len(input_ids[i]))
ids.extend(input_ids[i])
masked_padded = tf.keras.preprocessing.sequence.pad_sequences(
ids, padding="post"
)
preds = sess.run(
tf.nn.log_softmax(model.logits), feed_dict={model.X: masked_padded}
)
preds = np.reshape(
preds, [masked_padded.shape[0], masked_padded.shape[1], 119547]
)
indices = np.array(indices)
scores = []
for i in range(len(tokens) - 1):
filter_preds = preds[indices == i]
total = np.sum(
[filter_preds[k, k + 1, x] for k, x in enumerate(tokens_ids[i])]
)
scores.append(total)
prob_scores = np.array(scores) / np.sum(scores)
probs = list(zip(possible_states, prob_scores))
for i in probs:
temp3.append(i)
m += 5
n += 5
temp3.sort(key=lambda x: x[1])
list(temp3)
j = 0
for i in temp3:
if j != 3:
temp2.append(i[0])
if j == 3:
break
j = j + 1
if len(temp2) != 0:
temp1.append(temp2)
else:
temp1.append([word])
temp2 = []
temp3 = []
sess.close()
return temp1 | 9edf75111a0df95532a1332f6b0f4b5dbe495ac2 | 27,033 |
import dataclasses
def configuration_stub(configuration_test: Configuration) -> Configuration:
"""
Configuration for tests.
"""
return dataclasses.replace(
configuration_test,
distance_between_wheels=DISTANCE_BETWEEN_WHEELS,
) | ea2fe84c19f86062fd728bd10814303026776c03 | 27,034 |
def join_smiles(df, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
if df_smiles is None:
load_resource("SMILES")
df_smiles = SMILES
result = df.merge(df_smiles, on="Compound_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna("*")
return result | e36bc5d31764e5eb8fdcf006b05e4fe75eeff36a | 27,035 |
def signature(*types, **kwtypes):
"""Type annotations and conversions for methods.
Ignores first parameter.
"""
conversions = [(t if isinstance(t, tuple) else (t, t)) for t in types]
kwconversions = {k: (t if isinstance(t, tuple) else (t, t))
for k, t in kwtypes.items()}
def decorator(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
args = [(arg if isinstance(arg, t) else conv(arg))
for (t, conv), arg in zip(conversions, args)]
kwargs = {k: (v if isinstance(v, t) else conv(v))
for k, (t, conv), v
in ((k, kwconversions[k], v)
for k, v in kwargs.items())}
return fn(self, *args, **kwargs)
return wrapped
return decorator | 414ecfd4738b431e8e059319c347a6e7bedabc80 | 27,036 |
def mean(image):
"""The mean pixel value"""
return image.mean() | 176dd8d483008fa1071f0f0be20c4b53ad0e2a5f | 27,037 |
import yaml
def read_event_file(file_name):
"""Read a file and return the corresponding objects.
:param file_name: Name of file to read.
:type file_name: str
:returns: ServiceEvent from file.
:rtype: ServiceEvent
"""
with open(file_name, 'r') as f:
contents = yaml.safe_load(f)
event = ServiceEvent(
contents['timestamp'],
contents['service'],
contents['reason'],
contents['action'],
policy_requestor_name=contents.get('policy_requestor_name'),
policy_requestor_type=contents.get('policy_requestor_type'))
return event | 66da0a76f064dd99c9b2eff5594aa58f5d1d8cca | 27,038 |
def create_basic_cnn_model(num_classes: int):
"""
Function to create a basic CNN.
:param num_classes: The number of classes (labels).
:return: A basic CNN model.
"""
model = Sequential()
# Convolutional + spooling layers
model.add(Conv2D(64, (5, 5), input_shape=(config.ROI_IMG_SIZE['HEIGHT'], config.ROI_IMG_SIZE['WIDTH'], 1)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(32, (5, 5), padding='same'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
# Dropout
model.add(Dropout(0.5, seed=config.RANDOM_SEED, name="Dropout_1"))
# FC
model.add(Dense(1024, activation='relu', name='Dense_2'))
# Output
if num_classes == 2:
model.add(Dense(1, activation='sigmoid', kernel_initializer="random_uniform", name='Output'))
else:
model.add(Dense(num_classes, activation='softmax', kernel_initializer="random_uniform", name='Output'))
# Print model details if running in debug mode.
if config.verbose_mode:
print(model.summary())
return model | 42af48bfa673c5473fa2255b34a3155ee4e88dc0 | 27,039 |
def get_layer_information(cloudsat_filenames, get_quality=True, verbose=0):
""" Returns
CloudLayerType: -9: error, 0: non determined, 1-8 cloud types
"""
all_info = []
for cloudsat_path in cloudsat_filenames:
sd = SD(cloudsat_path, SDC.READ)
if verbose:
# List available SDS datasets.
print("hdf datasets:", sd.datasets())
# get cloud types at each height
for value in all_info:
value.append(sd.select('CloudLayerType').get())
for value in all_info:
value = np.vstack(value).astype(np.int8)
return all_info | e3c52cd9284730c35da3ddbc1879d0e083fa63bd | 27,040 |
import torch
def from_magphase(mag_spec, phase, dim: int = -2):
"""Return a complex-like torch tensor from magnitude and phase components.
Args:
mag_spec (torch.tensor): magnitude of the tensor.
phase (torch.tensor): angle of the tensor
dim(int, optional): the frequency (or equivalent) dimension along which
real and imaginary values are concatenated.
Returns:
:class:`torch.Tensor`:
The corresponding complex-like torch tensor.
"""
return torch.cat([mag_spec * torch.cos(phase), mag_spec * torch.sin(phase)], dim=dim) | 2f33de266fa295d0c21cf5002f6420c60eb07071 | 27,041 |
def remove_suboptimal_parses(parses: Parses, just_one: bool) -> Parses:
""" Return all parses that have same optimal cost. """
minimum = min(parse_cost(parse) for parse in parses)
minimal_parses = [parse for parse in parses if parse_cost(parse) == minimum]
if just_one:
return Parses([minimal_parses[0]])
return Parses(minimal_parses) | c223229e73a5319bdb40ac58695aa6f5a8c0bb4b | 27,042 |
def local_ranking(results):
"""
Parameters
----------
results : list
Dataset with initial hand ranking and the global hand ranking.
Returns
-------
results : list
Dataset with the initial hand ranking and the game-local hand ranking
(from 0 - nplayers).
"""
for i in range(len(results)):
results[i][1] = np.argsort(results[i][1])
return results | 2be1ff269ad18ba9439d183f5899f5034927b5d7 | 27,043 |
def is_monotonic_increasing(bounds: np.ndarray) -> bool:
"""Check if int64 values are monotonically increasing."""
n = len(bounds)
if n < 2:
return True
prev = bounds[0]
for i in range(1, n):
cur = bounds[i]
if cur < prev:
return False
prev = cur
return True | e745ce3825f4e052b2f62c7fdc23e66b5ee5d4d1 | 27,044 |
def parse(data):
"""
Takes binary data, detects the TLS message type, parses the info into a nice
Python object, which is what is returned.
"""
if data[0] == TLS_TYPE_HANDSHAKE:
obj = TlsHandshake()
obj.version = data[1:3]
obj.length = unpack(">H", data[3:5])[0]
if data[5] == TLS_TYPE_CLIENT_HELLO:
obj.data = ClientHello()
obj.data.length = unpack(">I", (b"\x00" + data[6:9]))[0] # 3-byte length
obj.data.version = data[9:11]
obj.data.random = data[11:43] # 32 bytes of random
if data[43] == 0x00:
obj.data.session_id = None
else:
obj.data.session_id = data[44:44+data[43]]
offset = 44 + data[43]
cipher_suite_length = unpack(">H", data[offset:offset+2])[0]
offset += 2
obj.data.cipher_suites = data[offset:offset+cipher_suite_length]
offset += cipher_suite_length
obj.data.compression_methods = data[offset+1:offset+data[offset]+1]
offset += 1 + data[offset]
extensions_length = unpack(">H", data[offset:offset+2])[0]
offset += 2
extension_data = data[offset:]
obj.data.extension_data = []
while len(extension_data):
extension, extension_data = parse_tls_extension(extension_data)
obj.data.extension_data.append(extension)
return obj
raise NotImplemented("Only CLIENT_HELLO handshake message is currently implemented")
raise NotImplemented("Only handshake messages are currently implemented") | 64698fde904d702181f4d8bacda648d9fbea68a7 | 27,045 |
def recoverSecretRanks_GPT(mod_rec, tok_rec, startingText, outInd, finishSentence=True):
"""
Function to calculate the secret ranks of GPT2 LM of a cover text given the cover text
"""
startingInd=tok_rec.encode(startingText)
endingInd=outInd[len(startingInd):]
secretTokensRec=[]
for i in range(len(endingInd)):
token=getTokens_GPT(mod_rec, tok_rec, startingInd, endingInd[i])
if (finishSentence):
if (token==3):
break
if(token>2):
token-=1
startingInd.append(endingInd[i])
secretTokensRec.append(token[0].tolist())
return secretTokensRec | be08520901b5c010d89a248814f96681265bb467 | 27,046 |
def threshold_strategies(random_state=None):
"""Plan (threshold):
- [x] aggregated features: (abs(mean - median) < 3dBm) || (2*stdev(x) < 8dBm)
- [x] histogram: x < 85dBm
- [ ] timeseries batch: p < 10**-3
"""
dummy = lambda: dummy_anomaly_injector(scaler=None, random_state=random_state)
spikes = lambda: spike_anomaly_injector(scaler=None, random_state=random_state)
norecovery = lambda: norecovery_anomaly_injector(scaler=None, random_state=random_state)
recovery = lambda: recovery_anomaly_injector(scaler=None, random_state=random_state)
slow = lambda: slow_anomaly_injector(scaler=None, random_state=random_state)
datasets = {
#'dummy': dummy,
'norecovery': norecovery,
'recovery': recovery,
'spikes': spikes,
'slow': slow,
}
lin2log = lambda x: 10 * np.log10(x)
def aggr_approach(df, offset_threshold=3.0, stdev_threshold=2.5):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
# Difference between mean and median has to be lower than threshold
criteria1 = np.abs(np.mean(x) - np.median(x)) > offset_threshold
# Deviation has to be lower than threshold
criteria2 = 2 * np.std(x) > stdev_threshold
#criteria2 = (np.mean(x) + 2*np.std()
result = np.any(criteria1 | criteria2)
#print(criteria1 + criteria2)
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
#print(np.any(view['anomaly']), 2*np.std(x))
#print()
return y_true, y_pred
def hist_approach(df, threshold=-85.0):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
result = np.any(x < threshold)
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
return y_true, y_pred
def ts_as_feature_vector(df, alpha=1e-3):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
k2, p = sp.stats.normaltest(x)
result = (p < alpha) # if p < alpha, it is not normal distribution, therefore anomaly
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
return y_true, y_pred
#def fft_approach(x):
# freq = np.abs(np.fft.fft(x))
# freq_db = lin2log(freq)
# # [N/2] - 1; frequency for each sample is i * samplingFrequency / N; 10Hz / (300 / 2 - 1)
# ratio = 300 // 5
# return (np.sum(freq_db[:ratio] > -20.0) > ratio // 2)
#df = norecovery_anomaly_injector(scaler=None, random_state=SEED)
for dataset_name, dataset in datasets.items():
string = f'% computer generated: anomaly={dataset_name}\nBaseline & Threshold (Tab.~\\ref{{tab:threshold-config}})'
for name, func in (('time-value', ts_as_feature_vector), ('aggr', aggr_approach), ('hist', hist_approach)):
df = dataset()
y_true, y_pred = func(df)
#print(metrics.classification_report(y_true=y_true, y_pred=y_pred))
prec = metrics.precision_score(y_true, y_pred, labels=[False, True])
rec = metrics.recall_score(y_true, y_pred, labels=[False, True])
f1 = metrics.f1_score(y_true, y_pred, labels=[False, True])
string += f'\t& {prec:.2f} & {rec:.2f} & {f1:.2f}\\tnote{{1}} &'
string = string + '& - & - ' + '\\\\'
print(string) | d127a36d36360f6733e26538c37d3cbb47f199a4 | 27,047 |
import math
def ellipse_properties(x, y, w):
"""
Given a the (x,y) locations of the foci of the ellipse and the width return
the center of the ellipse, width, height, and angle relative to the x-axis.
:param double x: x-coordinates of the foci
:param double y: y-coordinates of the foci
:param double w: width of the ellipse
:rtype: tuple of doubles
:returns: (center_coordinates, width, height, angle_in_rads)
"""
p1 = [x[0], y[0]]
p2 = [x[1], y[1]]
#center point
xy = [(p1[0] + p2[0])/2, (p1[1] + p2[1])/2]
#distance between points
d = ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)**(0.5)
#theta to positive Xaxis
angle = math.atan((p1[1] - p2[1])/(p1[0] - p2[0]))
#sin = math.sin(-angle)
#cos = math.cos(-angle)
#width will be the axis the points lie on
width = 2*((0.5*d)**2 + (0.5*w)**2)**(0.5)
height = w
return (xy, width, height, angle*180/math.pi) | 95864eac0feb9c34546eefed5ca158f330f88e3d | 27,048 |
def build_func(f, build_type):
"""
Custom decorator that is similar to the @conf decorator except that it is intended to mark
build functions specifically. All build functions must be decorated with this decorator
:param f: build method to bind
:type f: function
:parm build_type: The WAF build type (see BuildTargetType)
:type build_type: string
"""
def fun(*k, **kw):
kw[BUILD_FUNC_KW] = f.__name__
kw[BUILD_TYPE_KW] = build_type
result = f(*k, **kw)
return result
setattr(OptionsContext, f.__name__, fun)
setattr(ConfigurationContext, f.__name__, fun)
setattr(BuildContext, f.__name__, fun)
return f | e880b7d5a3c4ac79a3caff48f1a3f991ed321262 | 27,049 |
def getnumoflinesinblob(ext_blob):
"""
Get number of lines in blob
"""
ext, blob_id = ext_blob
return (ext, blob_id, int(getpipeoutput(['git cat-file blob %s' % blob_id, 'wc -l']).split()[0])) | ccc492cc66e046d73389f6822ad04cd943376f7b | 27,050 |
import os
def dir_exists(dir):
"""Test if dir exists"""
return os.path.exists(dir) and os.path.isdir(dir) | f787457f3a03c3e9c605a1753de0ab7c648e4a2c | 27,051 |
import requests
def fetch_data(full_query):
"""
Fetches data from the given url
"""
url = requests.get(full_query)
# Parse the json dat so it can be used as a normal dict
raw_data = url.json()
# It's a good practice to always close opened urls!
url.close()
return raw_data | 576b2548c1b89827e7586542e4d7e3f0cc89051d | 27,052 |
import http
def post(*args, **kwargs): # pragma: no cover
"""Make a post request. This method is needed for mocking."""
return http.post(*args, **kwargs) | d5c91da5f39ece36183a8265f74378a35f11c4c7 | 27,053 |
def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(
image=wrap(image), transforms=[1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace) | 230fb5d346a966c4945b0bb39f336c1fddeb94fd | 27,054 |
import os
import logging
import sys
def sequence_data(project_dir, config):
"""
opens word sequence HDF5 file
:param project_dir:
:param config:
:return: pointer to word sequence array
"""
try:
h5_seq = h5py.File(os.path.join(project_dir, config['files'][config['word_sequence']['file']]))
except IOError:
logging.error('cannot open HDF5 file %s', config['files'][config['word_sequence']['file']])
sys.exit(-1)
res = h5_seq[config['word_sequence']['path']]
return res | 4f6afa11e2e9f3473b5e9343c9ebba940964c1bb | 27,055 |
def extract_const_string(data):
"""Extract const string information from a string
Warning: strings array seems to be practically indistinguishable from strings with ", ".
e.g.
The following is an array of two elements
const/4 v0, 0x1
new-array v0, v0, [Ljava/lang/String;
const/4 v1, 0x0
const-string v2, "NIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4, OIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4"
aput-object v2, v0, v1
It seems equal to this other case:
const/4 v0, 0x2
new-array v0, v0, [Ljava/lang/String;
const/4 v1, 0x0
const-string v2, "LIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4"
aput-object v2, v0, v1
But who says that in the second case the const-string last argument is just a string while in the
first case the last arg are two elements of the array?
:data: Data would be sth like: v0, "this is a string"
:return: Returns a const string object, otherwise None
:rtype: dict or list
"""
match = regex_var.search(data)
if match:
# A const string is usually saved in this form
# <variable name>,<value>
name = match.group('var')
value = match.group('value')
if ", " not in value:
c = {
# Variable
'name': name,
# Value of string
'value': value
}
return c
else:
c = []
for val in value.split(", "):
c.append({
'name': name,
'value': val
})
return c
else:
return None | 70229ea1a6183218577244f185a5e37d170fe4be | 27,056 |
def choose_action(q, sx, so, epsilon):
"""
Choose action index for given state.
"""
# Get valid action indices
a_vindices = np.where((sx+so)==False)
a_tvindices = np.transpose(a_vindices)
q_max_index = tuple(a_tvindices[np.argmax(q[a_vindices])])
# Choose next action based on epsilon-greedy policy
if np.random.random() <= epsilon:
# Choose random action from list of valid actions
a_index = tuple(a_tvindices[np.random.randint(len(a_tvindices))])
else:
# Choose valid action w/ max Q
a_index = q_max_index
return q_max_index, a_index | 626ccda15c24d983a060bdd6dd90a836c461b1ba | 27,057 |
from typing import Union
import sys
def _meta_commands(sql: str, context: Context, client: Client) -> Union[bool, Client]:
"""
parses metacommands and prints their result
returns True if meta commands detected
"""
cmd, schema_name = _parse_meta_command(sql)
available_commands = [
["\\l", "List schemas"],
["\\d?, help, ?", "Show available commands"],
["\\conninfo", "Show Dask cluster info"],
["\\dt [schema]", "List tables"],
["\\df [schema]", "List functions"],
["\\dm [schema]", "List models"],
["\\de [schema]", "List experiments"],
["\\dss [schema]", "Switch schema"],
["\\dsc [dask scheduler address]", "Switch Dask cluster"],
["quit", "Quits dask-sql-cli"],
]
if cmd == "\\dsc":
# Switch Dask cluster
_, scheduler_address = _parse_meta_command(sql)
client = Client(scheduler_address)
return client # pragma: no cover
schema_name = schema_name or context.schema_name
if cmd == "\\d?" or cmd == "help" or cmd == "?":
_display_markdown(available_commands, columns=["Commands", "Description"])
elif cmd == "\\l":
_display_markdown(context.schema.keys(), columns=["Schemas"])
elif cmd == "\\dt":
_display_markdown(context.schema[schema_name].tables.keys(), columns=["Tables"])
elif cmd == "\\df":
_display_markdown(
context.schema[schema_name].functions.keys(), columns=["Functions"]
)
elif cmd == "\\de":
_display_markdown(
context.schema[schema_name].experiments.keys(), columns=["Experiments"]
)
elif cmd == "\\dm":
_display_markdown(context.schema[schema_name].models.keys(), columns=["Models"])
elif cmd == "\\conninfo":
cluster_info = [
["Dask scheduler", client.scheduler.__dict__["addr"]],
["Dask dashboard", client.dashboard_link],
["Cluster status", client.status],
["Dask workers", len(client.cluster.workers)],
]
_display_markdown(
cluster_info, columns=["components", "value"]
) # pragma: no cover
elif cmd == "\\dss":
if schema_name in context.schema:
context.schema_name = schema_name
else:
print(f"Schema {schema_name} not available")
elif cmd == "quit":
print("Quitting dask-sql ...")
client.close() # for safer side
sys.exit()
elif cmd.startswith("\\"):
print(
f"The meta command {cmd} not available, please use commands from below list"
)
_display_markdown(available_commands, columns=["Commands", "Description"])
else:
# nothing detected probably not a meta command
return False
return True | faf456a4a0b448a7e2ea50a052dd100b0166e056 | 27,058 |
def soliswets(function, sol, fitness, lower, upper, maxevals, delta):
""""
Implements the solis wets algorithm
"""
bias = zeros(delta.shape)
evals = 0
num_success = 0
num_failed = 0
dim = len(sol)
while evals < maxevals:
dif = uniform(0, delta, dim)
newsol = clip(sol+bias+dif, lower, upper)
new_fitness = function(newsol)
evals += 1
if new_fitness < fitness:
sol = newsol
fitness = new_fitness
bias = _increm_bias(bias, dif)
num_success += 1
num_failed = 0
elif evals < maxevals:
new_sol = clip(sol - bias - dif, lower, upper)
new_fitness = function(new_sol)
evals += 1
if new_fitness < fitness:
sol = new_sol
fitness = new_fitness
bias = _dec_bias(bias, dif)
num_success += 1
num_failed = 0
else:
bias = 0.5 * bias
num_success = 0
num_failed += 1
if num_success >= 5:
num_success = 0
delta *= 2
elif num_failed >= 3:
num_failed = 0
delta /= 2
return EAresult(solution=sol, fitness=fitness, evaluations=maxevals), delta | 19104e717af6701ce3d838d526059575306018cf | 27,059 |
def _get_precision_type(network_el):
"""Given a network element from a VRP-REP instance, returns its precision type:
floor, ceil, or decimals. If no such precision type is present, returns None.
"""
if 'decimals' in network_el:
return 'decimals'
if 'floor' in network_el:
return 'floor'
if 'ceil' in network_el:
return 'ceil'
return None | b3b451a26ec50ce5f2424ea7a3652123ae96321d | 27,060 |
import textwrap
import argparse
def vtr_command_argparser(prog=None):
""" Argument parse for run_vtr_task """
description = textwrap.dedent(
"""
Runs one or more VTR tasks.
"""
)
epilog = textwrap.dedent(
"""
Examples
--------
Run the task named 'timing_chain':
%(prog)s timing_chain
Run all the tasks listed in the file 'task_list.txt':
%(prog)s -l task_list.txt
Run 'timing_chain' with 4 jobs running in parallel:
%(prog)s timing_chain -j4
Exit Code
---------
The exit code equals the number failures (i.e. exit code 0 indicates no failures).
"""
)
parser = argparse.ArgumentParser(
prog=prog,
description=description,
epilog=epilog,
formatter_class=RawDefaultHelpFormatter,
)
#
# Major arguments
#
parser.add_argument("task", nargs="*", help="Tasks to be run")
parser.add_argument(
"-l",
nargs="*",
default=[],
metavar="TASK_LIST_FILE",
dest="list_file",
help="A file listing tasks to be run",
)
parser.add_argument(
"-parse",
default=False,
action="store_true",
help="Perform only parsing on the latest task run",
)
parser.add_argument(
"-create_golden",
default=False,
action="store_true",
help="Update or create golden results for the specified task",
)
parser.add_argument(
"-check_golden",
default=False,
action="store_true",
help="Check the latest task run against golden results",
)
parser.add_argument(
"-system",
choices=["local", "scripts"],
default="local",
help="What system to run the tasks on.",
)
parser.add_argument(
"-script",
default="run_vtr_flow.py",
help="Determines what flow script is used for the tasks",
)
parser.add_argument(
"-short_task_names",
default=False,
action="store_true",
help="Output shorter task names.",
)
parser.add_argument(
"-show_failures",
default=False,
action="store_true",
help="Produce additional debug output",
)
parser.add_argument(
"-j",
"-p",
default=1,
type=int,
metavar="NUM_PROC",
help="How many processors to use for execution.",
)
parser.add_argument(
"-timeout",
default=30 * 24 * 60 * 60, # 30 days
metavar="TIMEOUT_SECONDS",
help="Time limit for this script.",
)
parser.add_argument(
"-verbosity",
default=0,
type=int,
help="Sets the verbosity of the script. Higher values produce more output.",
)
parser.add_argument(
"-minw_hint_factor",
default=1,
type=float,
help="Minimum width hint factor to multiplied by the minimum width hint",
)
parser.add_argument("-revision", default="", help="Revision number")
parser.add_argument(
"-calc_geomean",
default=False,
action="store_true",
help="QoR geomeans are not computed by default",
)
parser.add_argument(
"-print_metadata",
default=True,
type=argparse_str2bool,
help="Print meta-data like command-line arguments and run-time",
)
parser.add_argument(
"-s",
nargs=argparse.REMAINDER,
default=[],
dest="shared_script_params",
help="Treat the remainder of the command line options as script parameters "
"shared by all tasks",
)
return parser | 3f1c167f98a11123a194683df061e435b2d181c7 | 27,061 |
import json
def user_list():
"""Retrieves a list of the users currently in the db.
Returns:
A json object with 'items' set to the list of users in the db.
"""
users_json = json.dumps(({'items': models.User.get_items_as_list_of_dict()}))
return flask.Response(ufo.XSSI_PREFIX + users_json, headers=ufo.JSON_HEADERS) | b216b41b35b4b25c23ea2cc987ff4fe2b6464775 | 27,062 |
import hashlib
def md5_str(content):
"""
计算字符串的MD5值
:param content:输入字符串
:return:
"""
m = hashlib.md5(content.encode('utf-8'))
return m.hexdigest() | affe4742c2b44a60ef6dafa52d7a330594a70ed9 | 27,063 |
import requests
def hurun_rank(indicator: str = "百富榜", year: str = "2020") -> pd.DataFrame:
"""
胡润排行榜
http://www.hurun.net/CN/HuList/Index?num=3YwKs889SRIm
:param indicator: choice of {"百富榜", "富豪榜", "至尚优品"}
:type indicator: str
:param year: 指定年份; {"百富榜": "2015至今", "富豪榜": "2015至今", "至尚优品": "2017至今"}
:type year: str
:return: 指定 indicator 和 year 的数据
:rtype: pandas.DataFrame
"""
if indicator == "百富榜":
symbol_map = {
"2015": "5",
"2016": "1",
"2017": "11",
"2018": "15",
"2019": "19",
"2020": "22",
}
elif indicator == "全球榜":
symbol_map = {
"2015": "6",
"2016": "4",
"2017": "2",
"2018": "14",
"2019": "18",
"2020": "20",
}
elif indicator == "至尚优品":
symbol_map = {
"2017": "10",
"2018": "13",
"2019": "17",
"2020": "21",
}
url = f"http://www.hurun.net/CN/HuList/BobListJson/{symbol_map[year]}"
payload = {"order": "asc", "search": ""}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = [
"_",
"_",
"_",
"类别",
"_",
"_",
"奖项",
"_",
"排名",
"品牌",
"_",
"_",
]
temp_df = temp_df[
[
"类别",
"奖项",
"排名",
"品牌",
]
]
temp_df["类别"].replace("", np.nan, inplace=True)
temp_df.fillna(method="ffill", inplace=True)
return temp_df
url = f"http://www.hurun.net/CN/HuList/ListJson/{symbol_map[year]}"
payload = {"order": "asc", "search": ""}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = [
"_",
"排名",
"姓名",
"财富",
"出生日期",
"关联企业",
"主营行业",
]
temp_df = temp_df[
[
"排名",
"姓名",
"财富",
"出生日期",
"关联企业",
"主营行业",
]
]
return temp_df | d8540f3b7482f8f56f0ec40ac2592ef0cfae4035 | 27,064 |
def yamartino_method(a, axis=None):
"""This function calclates the standard devation along the
chosen axis of the array. This function has been writen to
calculate the mean of complex numbers correctly by taking
the standard devation of the argument & the
angle (exp(1j*theta) ). This uses the Yamartino method
which is a one pass method of estimating the standard
devation of an angle.
Input :
a : N-D numpy array
axis : The axis to perform the operation over
The Default is over all axies
Output:
This returns a an array or a one value array
Example:
"""
if a.dtype.kind == 'c':
r = np.sqrt(a.real ** 2 + a.imag ** 2).std(axis=axis)#mean
th = np.arctan2(a.imag,a.real)
if axis is None :
sa = (np.sin(th) / len(th)).sum()
ca = (np.cos(th) / len(th)).sum()
else:
sa = (np.sin(th) / len(th)).sum(axis=axis)
ca = (np.cos(th) / len(th)).sum(axis=axis)
e = np.sqrt(1. - (sa ** 2 + ca ** 2))
thsd = np.arcsin(e)*(1. + (2. / np.sqrt(3) - 1.) * e ** 3)
return r * np.exp(1j * thsd)
else:
return np.std(a, axis=axis) | 1a313ac97495a0822de1f071191be08ec5b65269 | 27,065 |
def calc_half_fs_axis(total_points, fs):
""" Геренирует ось до половины частоты дискр. с числом
точек равным заданному
"""
freq_axis = arange(total_points)*fs/2/total_points # Hz до половины fs
return freq_axis | 35ef0482e3062d0af6f0e03e03e58e1c3cd33406 | 27,066 |
def fetch_weather():
""" select flight records for display """
sql = "select station, latitude,longitude,visibility,coalesce(nullif(windspeed,''),cast(0.0 as varchar)) as windspeed, coalesce(nullif(precipitation,''),cast(0.00 as varchar)) as precipitation from (select station_id AS station, info ->> 'Latitude' as latitude, info ->> 'Longitude' as longitude, info ->> 'Mean_Visibility' as visibility, info ->> 'Mean_Windspeed' as windspeed, info ->> 'Precipitation' as precipitation from weather w where date_trunc('day',w.create_date) >= date_trunc('day',current_timestamp - interval '1' day) and create_date = (select max(create_date) from weather wi where wi.station_id = w.station_id) limit 300) b;"
conn = None
state_id = None
try:
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(sql)
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in cur.fetchall()]
cur.connection.close()
return r
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close() | 8ab9f20255a64cfdaa5bfd6ed9aa675ed76f2f5d | 27,067 |
def update_params(old_param, new_param, errors="raise"):
""" Update 'old_param' with 'new_param'
"""
# Copy old param
updated_param = old_param.copy()
for k,v in new_param.items():
if k in old_param:
updated_param[k] = v
else:
if errors=="raise":
raise Exception(f"Parameters {k} not recognized as a default parameter for this estimator")
else:
pass
return updated_param | 95de4e8e1278b07d2bd8ccc61af4e2dc43f87ca2 | 27,068 |
from enum import Enum
def UppercaseEnum(*args):
"""
Provides an :class:`~stem.util.enum.Enum` instance where the values are
identical to the keys. Since the keys are uppercase by convention this means
the values are too. For instance...
::
>>> from stem.util import enum
>>> runlevels = enum.UppercaseEnum('DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR')
>>> runlevels.DEBUG
'DEBUG'
:param list args: enum keys to initialize with
:returns: :class:`~stem.util.enum.Enum` instance with the given keys
"""
return Enum(*[(v, v) for v in args]) | a765ff333cb274e3d15bc9d3e29f83f89d384842 | 27,069 |
from datetime import datetime
def rng_name():
"""Generate random string for a username."""
name = "b{dt.second}{dt.microsecond}"
return name.format(dt=datetime.datetime.utcnow()) | 81be1b40770b08ec6b9adce0c3c9970ff1f3d442 | 27,070 |
def collections(id=None):
"""
Return Collections
Parameters
----------
id : STR, optional
The default is None, which returns all know collections.
You can provide a ICOS URI or DOI to filter for a specifict collection
Returns
-------
query : STR
A query, which can be run against the SPARQL endpoint.
"""
if not id:
coll = '' # create an empyt string insert into sparql query
else:
coll = ''.join(['FILTER(str(?collection) = "' + id+ '" || ?doi = "' + id + '") .'])
query = """
prefix cpmeta: <http://meta.icos-cp.eu/ontologies/cpmeta/>
prefix dcterms: <http://purl.org/dc/terms/>
select * where{
?collection a cpmeta:Collection .
%s
OPTIONAL{?collection cpmeta:hasDoi ?doi} .
?collection dcterms:title ?title .
OPTIONAL{?collection dcterms:description ?description}
FILTER NOT EXISTS {[] cpmeta:isNextVersionOf ?collection}
}
order by ?title
""" % coll
return query | 0cd1704d2ac43f34d6e83a3f9e9ead39db390c2e | 27,071 |
import os
def ajax_upload():
""" handle ajax file upload """
# dropzone sends one ajax request per file, so this invocation is for
# one file even if multiple files have been dropped on the web page.
page = request.page
print_debug(' ajax_upload ')
print_debug(' request.form.keys() : {}'.format(list(request.form.keys())))
print_debug(' destination : "{}"'.format(page.abspath))
print_debug(' page.url : {}'.format(page.url))
# Make sure that this user is authorized to put files here.
if not page.can['write']:
return ajax_response(False, 'Oops - invalid permissions for file upload.')
if page.is_dir:
abspath = page.abspath
else:
# create .attachments folder if need be
abspath = page.attachments_folder()
if not os.path.exists(abspath):
try:
os.mkdir(abspath)
except:
print_debug(' submit_createfolder: os.makedir failed')
return ajax_response(False, 'error creating attachments folder')
for upload in request.files.getlist("file"):
# TODO: send something other than success if there's a problem here,
# probably with a try: except: block.
filename = secure_filename(upload.filename)
destination = os.path.join(abspath, filename)
print_debug(f" ajax_response destination='{destination}'")
upload.save(destination)
gitlocal.add_commit(page, abspath=destination)
print_debug(" sending ajax response ")
return ajax_response(True, 'upload success') | 543101acc0bf5e04ffc24fe3220f30b21c4d0979 | 27,072 |
def zmat_to_coords(zmat, keep_dummy=False, skip_undefined=False):
"""
Generate the cartesian coordinates from a zmat dict.
Considers the zmat atomic map so the returned coordinates is ordered correctly.
Most common isotopes assumed, if this is not the case, then isotopes should be reassigned to the xyz.
This function assumes that all zmat variables relate to already defined atoms with a lower index in the zmat.
This function implements the SN-NeRF algorithm as described in:
J. Parsons, J.B. Holmes, J.M Rojas, J. Tsai, C.E.M. Strauss, "Practical Conversion from Torsion Space to Cartesian
Space for In Silico Protein Synthesis", Journal of Computational Chemistry 2005, 26 (10), 1063-1068,
https://doi.org/10.1002/jcc.20237
Tested in converterTest.py rather than zmatTest
Args:
zmat (dict): The zmat.
keep_dummy (bool): Whether to keep dummy atoms ('X'), ``True`` to keep, default is ``False``.
skip_undefined (bool): Whether to skip atoms with undefined variables, instead of raising an error.
``True`` to skip, default is ``False``.
Returns:
list: The cartesian coordinates.
Returns:
list: The atomic symbols corresponding to the coordinates.
Raises:
ZMatError: If zmat if of wrong type or does not contain all keys.
"""
if not isinstance(zmat, dict):
raise ZMatError(f'zmat has to be a dictionary, got {type(zmat)}')
if 'symbols' not in zmat or 'coords' not in zmat or 'vars' not in zmat or 'map' not in zmat:
raise ZMatError(f'Expected to find symbols, coords, vars, and map in zmat, got instead: {list(zmat.keys())}.')
if not len(zmat['symbols']) == len(zmat['coords']) == len(zmat['map']):
raise ZMatError(f'zmat sections symbols, coords, and map have different lengths: {len(zmat["symbols"])}, '
f'{len(zmat["coords"])}, and {len(zmat["map"])}, respectively.')
var_list = list(zmat['vars'].keys())
coords_to_skip = list()
for i, coords in enumerate(zmat['coords']):
for coord in coords:
if coord is not None and coord not in var_list:
if skip_undefined:
coords_to_skip.append(i)
else:
raise ZMatError(f'The parameter {coord} was not found in the "vars" section of '
f'the zmat:\n{zmat["vars"]}')
coords = list()
for i in range(len(zmat['symbols'])):
coords = _add_nth_atom_to_coords(zmat=zmat, coords=coords, i=i, coords_to_skip=coords_to_skip)
# reorder the xyz according to the zmat map and remove dummy atoms
ordered_coords, ordered_symbols = list(), list()
for i in range(len([symbol for symbol in zmat['symbols'] if symbol != 'X'])):
zmat_index = key_by_val(zmat['map'], i)
if zmat_index < len(coords) and i not in coords_to_skip:
ordered_coords.append(coords[zmat_index])
ordered_symbols.append(zmat['symbols'][zmat_index])
if keep_dummy:
for key, val in zmat['map'].items():
if 'X' in str(val):
ordered_coords.append(coords[key])
ordered_symbols.append(zmat['symbols'][key])
return ordered_coords, ordered_symbols | 0859a549b611347b4e3d94e4f0965a8a550e198e | 27,073 |
def get_module_version(module_name: str) -> str:
"""Check module version. Raise exception when not found."""
version = None
if module_name == "onnxrt":
module_name = "onnx"
command = [
"python",
"-c",
f"import {module_name} as module; print(module.__version__)",
]
proc = Proc()
proc.run(args=command)
if proc.is_ok:
for line in proc.output:
version = line
proc.remove_logs()
if version is None:
raise ClientErrorException(f"Could not found version of {module_name} module.")
return version | caadba47f46d96b0318cd90b0f85f8a2ca2275b0 | 27,074 |
def pd_images(foc_offsets=[0,0], xt_offsets = [0,0], yt_offsets = [0,0],
phase_zernikes=[0,0,0,0], amp_zernikes = [0], outer_diam=200, inner_diam=0, \
stage_pos=[0,-10,10], radians_per_um=None, NA=0.58, wavelength=0.633, sz=512, \
fresnel_focal_length=None, um_per_pix=6.0):
"""
Create a set of simulated phase diversity images.
Note that dimensions here are in microns.
Parameters
----------
foc_offsets: (n_images-1) numpy array
Focus offset in radians for the second and subsequent images
xt_offsets: (n_images-1) numpy array
X tilt offset
yt_offsets: (n_images-1) numpy array
Y tilt offset
phase_zernikes: numpy array
Zernike terms for phase, excluding piston.
amp_zernikes: numpy array
Zernike terms for amplitude, including overall normalisation.
outer_rad, inner_rad: float
Inner and outer radius of annular pupil in pixels. Note that a better
model would have a (slightly) variable pupil size as the focus changes.
radians_per_micron: float
Radians in focus term per micron of stage movement. This is
approximately 2*np.pi * NA^2 / wavelength.
stage_pos: (n_images) numpy array
Nominal stage position in microns.
fresnel_focal_length: float
Focal length in microns if we are in the Fresnel regime. If this is None,
a Fraunhofer calculation will be made.
um_per_pix: float
If we are in the Fresnel regime, we need to define the pixel scale of the
input pupil.
"""
#Firstly, sort out focus, and tilt offsets. This focus offset is a little of a
#guess...
if radians_per_um is None:
radians_per_um = np.pi*NA**2/wavelength
total_focus = np.array(stage_pos) * radians_per_um
total_focus[1:] += np.array(foc_offsets)
#Add a zero (for ref image) to the tilt offsets
xt = np.concatenate([[0],xt_offsets])
yt = np.concatenate([[0],yt_offsets])
#Create the amplitude zernike array. Normalise so that the
#image sum is zero for a evenly illuminated pupil (amplitude zernikes
#all 0).
pup_even = circle(sz, outer_diam, interp_edge=True) - \
circle(sz, inner_diam, interp_edge=True)
pup_even /= np.sqrt(np.sum(pup_even**2))*sz
pup = pup_even*zernike_amp(sz, amp_zernikes, diam=outer_diam)
#Needed for the Fresnel calculation
flux_norm = np.sum(pup**2)/np.sum(pup_even**2)
#Prepare for fresnel propagation if needed.
if fresnel_focal_length is not None:
lens = FocusingLens(sz, um_per_pix, um_per_pix, fresnel_focal_length, wavelength)
print("Using Fresnel propagation...")
#Now iterate through the images at different foci.
n_ims = len(total_focus)
ims = np.zeros( (n_ims, sz, sz) )
for i in range(n_ims):
#Phase zernikes for this image
im_phase_zernikes = np.concatenate([[0.], phase_zernikes])
im_phase_zernikes[1] += xt[i]
im_phase_zernikes[2] += yt[i]
im_phase_zernikes[4] += total_focus[i]
wf = pup*zernike_wf(sz, im_phase_zernikes, diam=outer_diam)
if fresnel_focal_length is None:
ims[i] = np.fft.fftshift(np.abs(np.fft.fft2(wf))**2)
else:
#For a Fresnel propagation, we need to normalise separately,
#because the lens class was written with inbuilt normalisation.
ims[i] = lens.focus(wf) * flux_norm
return ims | 71a7dd7206936541cc55d8909be7795261aeaefa | 27,075 |
def add_tickets(create_user, add_flights):
"""Fixture to add tickets"""
user = create_user(USER)
tickets = [{
"ticket_ref": "LOS29203SLC",
"paid": False,
"flight": add_flights[0],
"type": "ECO",
"seat_number": "E001",
"made_by": user,
}, {
"ticket_ref": "LOS24933SLC",
"paid": False,
"flight": add_flights[1],
"type": "ECO",
"seat_number": "E001",
"made_by": user
}]
return [Ticket.objects.create(**ticket) for ticket in tickets] | 27f9ed9a5231c71e98a79632a97137b73831a0e0 | 27,076 |
def compute_depth_errors(gt, pred):
"""Computation of error metrics between predicted and ground truth depths
Args:
gt (N): ground truth depth
pred (N): predicted depth
"""
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
log10 = np.mean(np.abs((np.log10(gt) - np.log10(pred))))
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
if args.dataset == 'nyu':
return abs_rel, log10, rmse, a1, a2, a3
elif args.dataset == 'kitti':
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 | a781d5a8c1e61b5562870d75124de64e05fe2789 | 27,077 |
def sanitize_bvals(bvals, target_bvals=[0, 1000, 2000, 3000]):
"""
Remove small variation in bvals and bring them to their closest target bvals
"""
for idx, bval in enumerate(bvals):
bvals[idx] = min(target_bvals, key=lambda x: abs(x - bval))
return bvals | a92b170748b5dbc64c4e62703a3c63103675b702 | 27,078 |
def fetch_engines():
"""
fetch_engines() : Fetches documents from Firestore collection as JSON
all_engines : Return all documents
"""
all_engines = []
for doc in engine_ref.stream():
engine = doc.to_dict()
engine["id"] = doc.id
all_engines.append(engine)
return make_response(jsonify(all_engines), 200) | a79a623140209ed4e9e7cbea2d8944b3434f720a | 27,079 |
def isone(a: float) -> bool:
"""Work around with float precision issues"""
return np.isclose(a, 1.0, atol=1.0e-8, rtol=0.0) | ee44d5d7a9b00457e51501d8ce5680cd95726e3f | 27,080 |
import argparse
def get_arguments():
"""Get needed options for the cli parser interface"""
usage = """DSStoreParser CLI tool. v{}""".format(__VERSION__)
usage = usage + """\n\nSearch for .DS_Store files in the path provided and parse them."""
argument_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=(usage)
)
argument_parser.add_argument(
'-s',
'--source',
dest='source',
action="store",
type=commandline_arg,
required=True,
help='The source path to search recursively for .DS_Store files to parse. '
)
argument_parser.add_argument(
'-o',
'--out',
dest='outdir',
action="store",
type=commandline_arg,
required=True,
help='The destination folder for generated reports.'
)
return argument_parser | 8a306b13216149366564b6401c03ef869ca1cf66 | 27,081 |
def kerr(E=0, U=0, gs=None):
"""
Setup the Kerr nonlinear element
"""
model = scattering.Model(
omegas=[E]*1,
links=[],
U=[U])
if gs is None:
gs = (0.1, 0.1)
channels = []
channels.append(scattering.Channel(site=0, strength=gs[0]))
channels.append(scattering.Channel(site=0, strength=gs[1]))
se = scattering.Setup(model, channels)
se.label = 'U={0}'.format(U)
return se | a94ecb4618405a2817267609008bc56ef97033b9 | 27,082 |
from typing import Dict
import requests
import logging
def get_estate_urls(last_estate_id: str) -> Dict:
"""Fetch urls of newly added estates
Args:
last_estate_id (str): estate_id of the most recent estate added (from last scrape)
Returns:
Dict: result dict in format {estate_id_1: {estate_url_1}, ... estate_id_N: {estate_url_N}}
"""
# Calculate number of API pages based on result size and estates per page
base_url = 'https://www.sreality.cz/api/'
res = requests.get(base_url + 'cs/v2/estates?per_page=1&page=1')
num_pages = res.json()['result_size'] // 500
# Obtain url suffix for each estate up until the newest from last scrape
estate_urls = {}
for page in range(1, num_pages):
url = base_url + f'cs/v2/estates?per_page=500&page={page}'
# EAFP
try:
res = requests.get(url)
res.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.error(error)
# Some API responses are missing the content
# which causes the entire scraper to fail
res = res.json().get("_embedded")
if res is None:
continue
estates = res["estates"]
for estate in estates:
estate_url = estate["_links"]["self"]["href"]
estate_id = estate_url.split("/")[-1]
# Break once we hit an estate from last scraping
already_scraped = estate_id == last_estate_id
if already_scraped:
return estate_urls
estate_urls[estate_id] = estate_url
return estate_urls | d93299002204edc9d26b3c77e2dff1f56f4b93d8 | 27,083 |
from datetime import datetime
def revert_transaction():
"""Revert a transaction."""
if not (current_user.is_admin or current_user.is_bartender):
flash("You don't have the rights to access this page.", 'danger')
return redirect(url_for('main.dashboard'))
transaction_id = request.args.get('transaction_id', -1, type=int)
# Transactions that are already reverted can't be reverted again
transaction = Transaction.query.filter_by(id=transaction_id).first_or_404()
if transaction.is_reverted or 'Revert' in transaction.type:
flash("You can't revert this transaction.", 'warning')
return redirect(request.referrer)
# Revert client balance
if transaction.client:
# Check if user balance stays positive before reverting
if transaction.client.balance - transaction.balance_change < 0:
flash(transaction.client.first_name + ' '
+ transaction.client.last_name + '\'s balance would be '
+ 'negative if this transaction were reverted.', 'warning')
return redirect(request.referrer)
transaction.client.balance -= transaction.balance_change
if transaction.item and transaction.item.is_alcohol:
transaction.client.last_drink = None
# Revert item quantity
if transaction.item and transaction.item.is_quantifiable:
transaction.item.quantity += 1
# Transaction is now reverted: it won't ever be 'unreverted'
transaction.is_reverted = True
transaction = Transaction(client_id=None,
barman=current_user.username,
date=datetime.datetime.utcnow(),
type='Revert #'+str(transaction_id),
balance_change=None)
db.session.add(transaction)
db.session.commit()
flash('The transaction #'+str(transaction_id)+' has been reverted.',
'primary')
return redirect(request.referrer) | 39f4fc0c6af9c58197c514d5d648e07da20558aa | 27,084 |
def is_number(s):
"""returns true if input can be converted to a float"""
try:
float(s)
return True
except ValueError:
return False | d9fc4411bbc5e5fd8d02b3c105a770e8859048e0 | 27,085 |
def bib_to_string(bibliography):
""" dict of dict -> str
Take a biblatex bibliography represented as a dictionary
and return a string representing it as a biblatex file.
"""
string = ''
for entry in bibliography:
string += '\n@{}{{{},\n'.format(
bibliography[entry]['type'],
bibliography[entry]['id']
)
for field in bibliography[entry]:
if field != 'id' and field != 'type':
string += '\t{} = {{{}}},\n'.format(
field,
bibliography[entry][field]
)
string = string[:-2] + '}\n'
return string | c8fc4247210f74309929fdf9b210cd6f1e2ece3f | 27,086 |
import io
def make_plot(z, figsize=(20, 20), scale=255 * 257,
wavelength=800, terrain=None,
nir_min=0.2, offset=3.5):
"""
Make a 3-D plot of image intensity as z-axis and RGB image as an underlay on the z=0 plane.
:param z: NIR intensities
:param figsize: size of the figure to default (20,20)
:param scale: Scale to resize intensities for aesthetics (make intensities <= 1)
:param wavelength: The wavelength to include in the legend
:param terrain: The rgb image to include as the x-y plane (default is no underlay)
:param nir_min: Cutoff for the minimum level of NIR intensity (0 - 1) so the plot is cleaner
:param offset: Shift the RGB underlay by this amount for visual appeal so there is a space
:return: a PIL Image
"""
fig = plt.figure(figsize=figsize)
ax = fig.gca(projection='3d')
z = np.float32(bw)
Z = z / scale
X, Y = np.arange(0, z.shape[1], 1), np.arange(0, z.shape[0], 1)
X, Y = np.meshgrid(X, Y)
surf = ax.plot_surface(X, Y, Z,
rstride=3, cstride=3,
cmap=cm.coolwarm,
alpha=0.3,
linewidth=0,
antialiased=False,
vmin=0, vmax=1)
if terrain is not None:
ax.plot_surface(X, Y, -offset * np.ones_like(Z, dtype=np.float32),
rstride=5, cstride=5, facecolors=terrain / 255)
''' Now overlay the fluorescence '''
z_fluorescence = Z.copy()
z_fluorescence[z_fluorescence < nir_min] = 0
z_rgba = np.ones((Z.shape[0], Z.shape[1], 4))
z_rgba[:, :, 3] = z_fluorescence[:, :]
ax.plot_surface(X, Y, -offset * np.ones_like(Z, dtype=np.float32),
rstride=5, cstride=5, facecolors=z_rgba)
ax.set_zlim(-offset, 1)
else:
ax.plot_surface(X, Y, (Z / 257) - offset,
rstride=3, cstride=3,
cmap=cm.coolwarm,
alpha=0.4,
linewidth=0,
antialiased=False,
vmin=-offset, vmax=(1.0 / 257.0) - offset)
ax.set_zlim(-offset, 1)
ax.zaxis.set_major_locator(FixedLocator([0.0, 0.5, 1.0]))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel('\nHeight (Pixels)')
ax.set_ylabel('\nWidth (Pixels)')
ax.set_zlabel('\nRelative NIR\nIntensity')
ax.view_init(azim=30)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.4, aspect=5, orientation="horizontal",
label='Relative Intensity of fluorescence \nat wavelength ' + r"$\lambda_{f} =$" + "{}nm".format(
wavelength))
buf = io.BytesIO()
plt.tight_layout(h_pad=1)
plt.savefig(buf, format='png')
buf.seek(0)
img = Image.open(buf)
plt.close('all')
return img | 1a4dde23a11b320e6564b6657a871a33ecb65eea | 27,087 |
def check_prio_and_sorted(node):
"""Check that a treap object fulfills the priority requirement and that its sorted correctly."""
if node is None:
return None # The root is empty
else:
if (node.left_node is None) and (node.right_node is None): # No children to compare with
pass # Do nothing
elif node.left_node is None: # No left child
assert node.prio <= node.right_node.prio # Check priority of right child and node
assert node.data < node.right_node.data # Check sorting
elif node.right_node is None: # No right child
assert node.prio <= node.left_node.prio # Check priority of left child and node
assert node.data > node.left_node.data # Check sorting
else: # Two children
assert node.prio <= (node.left_node.prio and node.right_node.prio) # Check priority of both left and right child with node
assert (node.data > node.left_node.data) and (node.data < node.right_node.data) # Check sorting
check_prio_and_sorted(node.left_node) # Recursion. Goes down the left tree first
check_prio_and_sorted(node.right_node) # Recursion. Goes down the right tree next | 64100fd4ba9af699ab362d16f5bbf216effa2da5 | 27,088 |
import os
def download100():
"""download cifar100 dataset"""
if not os.path.exists(IMG_DIR):
os.mkdir(IMG_DIR)
cifar = CIFAR100(root=IMG_DIR, download=True)
return cifar | 9d722ecad0758885489ce7f5ba01b99f14227ec4 | 27,089 |
import pickle
async def wait_for_msg(channel):
"""Wait for a message on the specified Redis channel"""
while await channel.wait_message():
pickled_msg = await channel.get()
return pickle.loads(pickled_msg) | dca398cb3adeb778458dd6be173a53cdd204bcb9 | 27,090 |
def abandoned_baby_bull(high, low, open_, close, periods = 10):
"""
Abandoned Baby Bull
Parameters
----------
high : `ndarray`
An array containing high prices.
low : `ndarray`
An array containing low prices.
open_ : `ndarray`
An array containing open prices.
close : `ndarray`
An array containing close prices.
periods : `int`, optional
Specifying number of periods for trend identification.
Returns
-------
pattern : `ndarray`
A numpy ndarray of type bool specifying true whether
a pattern has been found or false otherwise.
"""
type_ = "bull"
pattern = abandoned_baby_calc(high, low, open_, close, periods, type_)
return pattern | 5fb0f2e3063e7b7aa03663d1e2d04d565ec8e885 | 27,091 |
def split_line_num(line):
"""Split each line into line number and remaining line text
Args:
line (str): Text of each line to split
Returns:
tuple consisting of:
line number (int): Line number split from the beginning of line
remaining text (str): Text for remainder of line with whitespace
stripped
"""
line = line.lstrip()
acc = []
while line and line[0].isdigit():
acc.append(line[0])
line = line[1:]
return (int(''.join(acc)), line.lstrip()) | d232fd046ee60ac804fff032494c8c821456c294 | 27,092 |
def rad_to_arcmin(angle: float) -> float:
"""Convert radians to arcmins"""
return np.rad2deg(angle)*60 | c342286befd79a311edda18e8a7a2e978d8312ad | 27,093 |
def get_tile_prefix(rasterFileName):
"""
Returns 'rump' of raster file name, to be used as prefix for tile files.
rasterFileName is <date>_<time>_<sat. ID>_<product type>_<asset type>.tif(f)
where asset type can be any of ["AnalyticMS","AnalyticMS_SR","Visual","newVisual"]
The rump is defined as <date>_<time>_<sat. ID>_<product type>
"""
return rasterFileName.rsplit("_", 1)[0].rsplit("_AnalyticMS")[0] | 15b517e5ba83b2cfb5f3b0014d800402c9683815 | 27,094 |
def get_indices_by_groups(dataset):
"""
Only use this to see F1-scores for how well we can recover the subgroups
"""
indices = []
for g in range(len(dataset.group_labels)):
indices.append(
np.where(dataset.targets_all['group_idx'] == g)[0]
)
return indices | 864aad8eef0339afd04cce34bee65f46c9fb030b | 27,095 |
def ranksumtest(x, y):
"""Calculates the rank sum statistics for the two input data sets
``x`` and ``y`` and returns z and p.
This method returns a slight difference compared to scipy.stats.ranksumtest
in the two-tailed p-value. Should be test drived...
Returns: z-value for first data set ``x`` and two-tailed p-value
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = np.sum(x, axis=0)
assert s + np.sum(y, axis=0) == np.sum(range(n1 + n2 + 1))
expected = n1 * (n1 + n2 + 1) / 2.0
z = (s - expected) / np.sqrt(n1 * n2 * (n1 + n2 + 1) / 12.0)
prob = 2 * (1.0 - zprob(abs(z)))
return z, prob | d01d0a56cf888983fa1b8358f2f6f0819ca824d9 | 27,096 |
from urls import routes
import jinja2
async def create_app():
""" Prepare application """
redis_pool = await aioredis.create_pool(settings.REDIS_CON)
middlewares = [session_middleware(RedisStorage(redis_pool)), request_user_middleware]
if settings.DEBUG:
middlewares.append(aiohttp_debugtoolbar.middleware)
# init application
app = web.Application(middlewares=middlewares)
app.redis_pool = redis_pool
app.ws_list = {}
jinja_env = aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATE_DIR),
context_processors=[aiohttp_jinja2.request_processor], )
jinja_env.globals.update(tags)
if settings.DEBUG:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False)
# db conn
database.init(**settings.DATABASE)
app.database = database
app.database.set_allow_sync(False)
app.objects = peewee_async.Manager(app.database)
# make routes
for route in routes:
app.router.add_route(**route)
app.router.add_static('/static', settings.STATIC_DIR, name='static')
app.logger = logger
return app | 2dc90c99aa03383e418ae4c2637a5ef635dbec8e | 27,097 |
def inchi_to_can(inchi, engine="openbabel"):
"""Convert InChI to canonical SMILES.
Parameters
----------
inchi : str
InChI string.
engine : str (default: "openbabel")
Molecular conversion engine ("openbabel" or "rdkit").
Returns
-------
str
Canonical SMILES.
"""
if engine == "openbabel":
obconversion = openbabel.OBConversion()
obconversion.SetInAndOutFormats("inchi", "can")
obmol = openbabel.OBMol()
obconversion.ReadString(obmol, inchi)
outinchi = obconversion.WriteString(obmol)
can = outinchi.rstrip()
elif engine == "rdkit":
mol = Chem.MolFromInchi(inchi)
can = Chem.MolToSmiles(mol)
else:
raise AttributeError(
"Engine must be either 'openbabel' or 'rdkit'."
)
return can | 040d091f1cdbc1556fd60b9ee001953e1a382356 | 27,098 |
from typing import List
from re import T
def swap(arr: List[T],
i: int,
j: int) -> List[T]:
"""Swap two array elements.
:param arr:
:param i:
:param j:
:return:
"""
arr[i], arr[j] = arr[j], arr[i]
return arr | e34c983b816f255a8f0fb438c14b6c81468b38c6 | 27,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.