hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba77447cc795ee759e86bd6f229a74ae3a445102
| 4,739
|
py
|
Python
|
python3/koans/about_proxy_object_project.py
|
zgparsons/python-koans
|
1551ca2f476a1680ce02f2e32825362a21d6ca3e
|
[
"MIT"
] | null | null | null |
python3/koans/about_proxy_object_project.py
|
zgparsons/python-koans
|
1551ca2f476a1680ce02f2e32825362a21d6ca3e
|
[
"MIT"
] | null | null | null |
python3/koans/about_proxy_object_project.py
|
zgparsons/python-koans
|
1551ca2f476a1680ce02f2e32825362a21d6ca3e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier than its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy:
def __init__(self, target_object):
# WRITE CODE HERE
self._messages = []
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
# WRITE CODE HERE
def __getattr__(self, attr):
self._messages.append(attr)
return self._obj.__getattribute__(attr)
def __setattr__(self, attr, value):
names = ['_obj', '_messages', 'was_called']
if attr in names:
return object.__setattr__(self, attr, value)
else:
self._messages.append(attr)
self._obj.__setattr__(attr, value)
def messages(self):
return self._messages
def was_called(self, attr):
return attr in self._messages
def number_of_times_called(self, attr):
return len([x for x in self._messages if x == attr])
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
with self.assertRaises(AttributeError):
tv.no_such_method()
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television:
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| 26.926136
| 73
| 0.629036
|
7409aaa0be66b102739a19f372e582b79df5c2f5
| 5,519
|
py
|
Python
|
python/ray/tune/registry.py
|
Ezra-H/ray
|
e428134137f05cf08317582e9820e7c43657e972
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/registry.py
|
Ezra-H/ray
|
e428134137f05cf08317582e9820e7c43657e972
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/registry.py
|
Ezra-H/ray
|
e428134137f05cf08317582e9820e7c43657e972
|
[
"Apache-2.0"
] | null | null | null |
import logging
from types import FunctionType
import ray
import ray.cloudpickle as pickle
from ray.experimental.internal_kv import _internal_kv_initialized, \
_internal_kv_get, _internal_kv_put
from ray.tune.error import TuneError
TRAINABLE_CLASS = "trainable_class"
ENV_CREATOR = "env_creator"
RLLIB_MODEL = "rllib_model"
RLLIB_PREPROCESSOR = "rllib_preprocessor"
RLLIB_ACTION_DIST = "rllib_action_dist"
TEST = "__test__"
KNOWN_CATEGORIES = [
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR,
RLLIB_ACTION_DIST, TEST
]
logger = logging.getLogger(__name__)
def has_trainable(trainable_name):
return _global_registry.contains(TRAINABLE_CLASS, trainable_name)
def get_trainable_cls(trainable_name):
validate_trainable(trainable_name)
return _global_registry.get(TRAINABLE_CLASS, trainable_name)
def validate_trainable(trainable_name):
if not has_trainable(trainable_name):
# Make sure everything rllib-related is registered.
from ray.rllib import _register_all
_register_all()
if not has_trainable(trainable_name):
raise TuneError("Unknown trainable: " + trainable_name)
def register_trainable(name, trainable, warn=True):
"""Register a trainable function or class.
This enables a class or function to be accessed on every Ray process
in the cluster.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable class. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
from ray.tune.trainable import Trainable
from ray.tune.function_runner import wrap_function
if isinstance(trainable, type):
logger.debug("Detected class for trainable.")
elif isinstance(trainable, FunctionType):
logger.debug("Detected function for trainable.")
trainable = wrap_function(trainable, warn=warn)
elif callable(trainable):
logger.info(
"Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable, warn=warn)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
_global_registry.register(TRAINABLE_CLASS, name, trainable)
def register_env(name, env_creator):
"""Register a custom environment for use with RLlib.
This enables the environment to be accessed on every Ray process
in the cluster.
Args:
name (str): Name to register.
env_creator (obj): Function that creates an env.
"""
if not isinstance(env_creator, FunctionType):
raise TypeError("Second argument must be a function.", env_creator)
_global_registry.register(ENV_CREATOR, name, env_creator)
def check_serializability(key, value):
_global_registry.register(TEST, key, value)
def _make_key(category, key):
"""Generate a binary key for the given category and key.
Args:
category (str): The category of the item
key (str): The unique identifier for the item
Returns:
The key to use for storing a the value.
"""
return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
key.encode("ascii"))
class _Registry:
def __init__(self):
self._to_flush = {}
def register(self, category, key, value):
"""Registers the value with the global registry.
Raises:
PicklingError if unable to pickle to provided file.
"""
if category not in KNOWN_CATEGORIES:
from ray.tune import TuneError
raise TuneError("Unknown category {} not among {}".format(
category, KNOWN_CATEGORIES))
self._to_flush[(category, key)] = pickle.dumps(value)
if _internal_kv_initialized():
self.flush_values()
def contains(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
return value is not None
else:
return (category, key) in self._to_flush
def get(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
if value is None:
raise ValueError(
"Registry value for {}/{} doesn't exist.".format(
category, key))
return pickle.loads(value)
else:
return pickle.loads(self._to_flush[(category, key)])
def flush_values(self):
for (category, key), value in self._to_flush.items():
_internal_kv_put(_make_key(category, key), value, overwrite=True)
self._to_flush.clear()
_global_registry = _Registry()
ray.worker._post_init_hooks.append(_global_registry.flush_values)
class _ParameterRegistry:
def __init__(self):
self.to_flush = {}
self.references = {}
def put(self, k, v):
self.to_flush[k] = v
if ray.is_initialized():
self.flush()
def get(self, k):
if not ray.is_initialized():
return self.to_flush[k]
return ray.get(self.references[k])
def flush(self):
for k, v in self.to_flush.items():
self.references[k] = ray.put(v)
self.to_flush.clear()
parameter_registry = _ParameterRegistry()
ray.worker._post_init_hooks.append(parameter_registry.flush)
| 31.180791
| 77
| 0.672586
|
9e4cb6e02568780ff5165121f3eb4a184c2e43a4
| 399
|
py
|
Python
|
src/dispatch/plugins/bases/email.py
|
axellaurelut/dispatch
|
338482d59846dda9aff14e761045b374725ab1bd
|
[
"Apache-2.0"
] | 3,417
|
2020-02-23T22:54:47.000Z
|
2022-03-31T13:01:01.000Z
|
src/dispatch/plugins/bases/email.py
|
axellaurelut/dispatch
|
338482d59846dda9aff14e761045b374725ab1bd
|
[
"Apache-2.0"
] | 607
|
2020-02-24T14:27:02.000Z
|
2022-03-30T19:15:39.000Z
|
src/dispatch/plugins/bases/email.py
|
axellaurelut/dispatch
|
338482d59846dda9aff14e761045b374725ab1bd
|
[
"Apache-2.0"
] | 359
|
2020-02-24T19:04:43.000Z
|
2022-03-29T06:48:12.000Z
|
"""
.. module: dispatch.plugins.bases.email
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from dispatch.plugins.base import Plugin
class EmailPlugin(Plugin):
type = "email"
def send(self, items, **kwargs):
raise NotImplementedError
| 24.9375
| 62
| 0.694236
|
6a5c0394847775fe2d207483b873b8defe3a4644
| 13,027
|
py
|
Python
|
tensorflow/python/ops/ragged/ragged_concat_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 3
|
2019-11-19T14:07:27.000Z
|
2020-10-04T12:57:40.000Z
|
tensorflow/python/ops/ragged/ragged_concat_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2020-04-09T16:22:20.000Z
|
2021-12-15T13:57:36.000Z
|
tensorflow/python/ops/ragged/ragged_concat_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Concat and stack operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
@dispatch.dispatch_for_api(array_ops.concat)
def concat(values: typing.List[ragged_tensor.RaggedOrDense], axis, name=None):
"""Concatenates potentially ragged tensors along one dimension.
Given a list of tensors with the same rank `K` (`K >= axis`), returns a
rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the
concatenation of `[rt[i0...iaxis] for rt in values]`.
Args:
values: A list of potentially ragged tensors. May not be empty. All
`values` must have the same rank and the same dtype; but unlike
`tf.concat`, they can have arbitrary shapes.
axis: A python integer, indicating the dimension along which to concatenate.
(Note: Unlike `tf.concat`, the `axis` parameter must be statically known.)
Negative values are supported only if the rank of at least one
`values` value is statically known.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` with rank `K`.
`result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`.
Raises:
ValueError: If `values` is empty, if `axis` is out of bounds or if
the input tensors have different ranks.
#### Example:
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
>>> tf.concat([t1, t2], axis=0)
<tf.RaggedTensor [[1, 2], [3, 4, 5], [6], [7, 8, 9]]>
>>> tf.concat([t1, t2], axis=1)
<tf.RaggedTensor [[1, 2, 6], [3, 4, 5, 7, 8, 9]]>
"""
if not isinstance(values, (list, tuple)):
values = [values]
with ops.name_scope(name, 'RaggedConcat', values):
return _ragged_stack_concat_helper(values, axis, stack_values=False)
@tf_export('ragged.stack')
@dispatch.add_dispatch_support
@dispatch.dispatch_for_api(array_ops.stack)
def stack(values: typing.List[ragged_tensor.RaggedOrDense],
axis=0,
name=None):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` `RaggedTensor`.
Given a list of tensors or ragged tensors with the same rank `R`
(`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that
`result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`.
#### Examples:
>>> # Stacking two ragged tensors.
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
>>> tf.ragged.stack([t1, t2], axis=0)
<tf.RaggedTensor [[[1, 2], [3, 4, 5]], [[6], [7, 8, 9]]]>
>>> tf.ragged.stack([t1, t2], axis=1)
<tf.RaggedTensor [[[1, 2], [6]], [[3, 4, 5], [7, 8, 9]]]>
>>> # Stacking two dense tensors with different sizes.
>>> t3 = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> t4 = tf.constant([[5], [6], [7]])
>>> tf.ragged.stack([t3, t4], axis=0)
<tf.RaggedTensor [[[1, 2, 3], [4, 5, 6]], [[5], [6], [7]]]>
Args:
values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All
`values` must have the same rank and the same dtype; but unlike
`tf.stack`, they can have arbitrary dimension sizes.
axis: A python integer, indicating the dimension along which to stack.
(Note: Unlike `tf.stack`, the `axis` parameter must be statically known.)
Negative values are supported only if the rank of at least one
`values` value is statically known.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` with rank `R+1` (if `R>0`).
If `R==0`, then the result will be returned as a 1D `Tensor`, since
`RaggedTensor` can only be used when `rank>1`.
`result.ragged_rank=1+max(axis, max(rt.ragged_rank for rt in values]))`.
Raises:
ValueError: If `values` is empty, if `axis` is out of bounds or if
the input tensors have different ranks.
"""
if not isinstance(values, (list, tuple)):
values = [values]
with ops.name_scope(name, 'RaggedConcat', values):
return _ragged_stack_concat_helper(values, axis, stack_values=True)
def _ragged_stack_concat_helper(rt_inputs, axis, stack_values):
"""Helper function to concatenate or stack ragged tensors.
Args:
rt_inputs: A list of RaggedTensors or Tensors to combine.
axis: The axis along which to concatenate or stack.
stack_values: A boolean -- if true, then stack values; otherwise,
concatenate them.
Returns:
A RaggedTensor.
Raises:
ValueError: If rt_inputs is empty, or if axis is out of range.
"""
# Validate parameters.
if not rt_inputs:
raise ValueError('rt_inputs may not be empty.')
# Convert input tensors.
rt_inputs = [
ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input') for rt_input in rt_inputs
]
row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes(
*rt_inputs, return_dtype=True)
rt_inputs = list(rt_inputs)
# Special case: if there's only one input, then return it as-is.
if len(rt_inputs) == 1 and not stack_values:
return rt_inputs[0]
# Check the rank (number of dimensions) of the input tensors.
ndims = None
for rt in rt_inputs:
if ndims is None:
ndims = rt.shape.ndims
else:
rt.shape.assert_has_rank(ndims)
out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1
axis = array_ops.get_positive_axis(axis, out_ndims)
if stack_values and ndims == 1 and axis == 0:
return ragged_tensor.RaggedTensor.from_row_lengths(
values=array_ops.concat(rt_inputs, axis=0),
row_lengths=array_ops.concat([array_ops.shape(r) for r in rt_inputs],
axis=0))
# If all the inputs are Tensors, and we're combining the final dimension,
# then we can delegate to the tf.stack/tf.concat operation, and return a
# Tensor.
if all(not ragged_tensor.is_ragged(rt) for rt in rt_inputs):
if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1):
if stack_values:
return array_ops.stack(rt_inputs, axis)
else:
return array_ops.concat(rt_inputs, axis)
# Convert any Tensor inputs to RaggedTensors. This makes it
# possible to concatenate Tensors and RaggedTensors together.
for i in range(len(rt_inputs)):
if not ragged_tensor.is_ragged(rt_inputs[i]):
rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor(
rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype)
# Convert the input tensors to all have the same ragged_rank.
ragged_rank = max(max(rt.ragged_rank for rt in rt_inputs), 1)
rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype)
for rt in rt_inputs]
if axis == 0:
return _ragged_stack_concat_axis_0(rt_inputs, stack_values)
elif axis == 1:
return _ragged_stack_concat_axis_1(rt_inputs, stack_values)
else: # axis > 1: recurse.
values = [rt.values for rt in rt_inputs]
splits = [[rt_input.row_splits] for rt_input in rt_inputs]
with ops.control_dependencies(ragged_util.assert_splits_match(splits)):
return ragged_tensor.RaggedTensor.from_row_splits(
_ragged_stack_concat_helper(values, axis - 1, stack_values),
splits[0][0], validate=False)
def _ragged_stack_concat_axis_0(rt_inputs, stack_values):
"""Helper function to concatenate or stack ragged tensors along axis 0.
Args:
rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.
stack_values: Boolean. If true, then stack values; otherwise, concatenate
them.
Returns:
A RaggedTensor.
"""
# Concatenate the inner values together.
flat_values = [rt.flat_values for rt in rt_inputs]
concatenated_flat_values = array_ops.concat(flat_values, axis=0)
# Concatenate the splits together for each ragged dimension (adjusting
# split offsets as necessary).
nested_splits = [rt.nested_row_splits for rt in rt_inputs]
ragged_rank = rt_inputs[0].ragged_rank
concatenated_nested_splits = [
_concat_ragged_splits([ns[dim]
for ns in nested_splits])
for dim in range(ragged_rank)
]
# If we are performing a stack operation, then add another splits.
if stack_values:
stack_lengths = array_ops.stack([rt.nrows() for rt in rt_inputs])
stack_splits = ragged_util.lengths_to_splits(stack_lengths)
concatenated_nested_splits.insert(0, stack_splits)
return ragged_tensor.RaggedTensor.from_nested_row_splits(
concatenated_flat_values, concatenated_nested_splits, validate=False)
def _ragged_stack_concat_axis_1(rt_inputs, stack_values):
"""Helper function to concatenate or stack ragged tensors along axis 1.
Args:
rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.
stack_values: Boolean. If true, then stack values; otherwise, concatenate
them.
Returns:
A RaggedTensor.
"""
num_inputs = len(rt_inputs)
rt_nrows = rt_inputs[0].nrows()
nrows_msg = 'Input tensors have incompatible shapes.'
nrows_checks = [
check_ops.assert_equal(rt.nrows(), rt_nrows, message=nrows_msg)
for rt in rt_inputs[1:]
]
with ops.control_dependencies(nrows_checks):
# Concatenate the inputs together to put them in a single ragged tensor.
concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False)
# Use ragged.gather to permute the rows of concatenated_rt. In particular,
# permuted_rt = [rt_inputs[0][0], ..., rt_inputs[N][0],
# rt_inputs[0][1], ..., rt_inputs[N][1],
# ...,
# rt_inputs[0][M], ..., rt_input[N][M]]
# where `N=num_inputs-1` and `M=rt_nrows-1`.
row_indices = math_ops.range(rt_nrows * num_inputs)
row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1])
transposed_row_index_matrix = array_ops.transpose(row_index_matrix)
row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1])
permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation)
if stack_values:
# Add a new splits tensor to group together the values.
stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs)
_copy_row_shape(rt_inputs, stack_splits)
return ragged_tensor.RaggedTensor.from_row_splits(
permuted_rt, stack_splits, validate=False)
else:
# Merge together adjacent rows by dropping the row-split indices that
# separate them.
concat_splits = permuted_rt.row_splits[::num_inputs]
_copy_row_shape(rt_inputs, concat_splits)
return ragged_tensor.RaggedTensor.from_row_splits(
permuted_rt.values, concat_splits, validate=False)
def _copy_row_shape(rt_inputs, splits):
"""Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs."""
for rt in rt_inputs:
if rt.shape[0] is not None:
splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1))
def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype):
"""Adds ragged dimensions to `rt_input` so it has the desired ragged rank."""
if ragged_rank > 0:
if not ragged_tensor.is_ragged(rt_input):
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, row_splits_dtype=row_splits_dtype)
if rt_input.ragged_rank < ragged_rank:
rt_input = rt_input.with_values(
_increase_ragged_rank_to(rt_input.values, ragged_rank - 1,
row_splits_dtype))
return rt_input
def _concat_ragged_splits(splits_list):
"""Concatenates a list of RaggedTensor splits to form a single splits."""
pieces = [splits_list[0]]
splits_offset = splits_list[0][-1]
for splits in splits_list[1:]:
pieces.append(splits[1:] + splits_offset)
splits_offset += splits[-1]
return array_ops.concat(pieces, axis=0)
| 39.83792
| 80
| 0.697474
|
dbc965b3133794d3384214f7a871127079654445
| 3,979
|
py
|
Python
|
src/auv_pilot/scripts/common/common_states.py
|
hidronautics/ROS
|
0dc4019025666c14e908b9f3ce37c7b36f5e1436
|
[
"MIT"
] | 11
|
2017-12-27T09:16:01.000Z
|
2021-02-14T08:02:03.000Z
|
src/auv_pilot/scripts/common/common_states.py
|
hidronautics/ROS
|
0dc4019025666c14e908b9f3ce37c7b36f5e1436
|
[
"MIT"
] | 1
|
2018-01-29T08:20:51.000Z
|
2018-01-29T08:20:51.000Z
|
src/auv_pilot/scripts/common/common_states.py
|
hidronautics/ROS
|
0dc4019025666c14e908b9f3ce37c7b36f5e1436
|
[
"MIT"
] | 3
|
2018-05-20T20:16:02.000Z
|
2019-03-15T11:48:14.000Z
|
#! /usr/bin/env python
import rospy
import smach
import smach_ros
from auv_common.srv import EnablingCmd
from auv_common.msg import DiveGoal, DiveAction, MoveAction, MoveGoal
# TODO: Create more common states
# Creates delay (in seconds)
class WaitState(smach.State):
def __init__(self, delay):
smach.State.__init__(self, outcomes=['OK'])
self.delay = delay
def execute(self, userdata):
if self.delay == 0:
return 'OK'
rate = rospy.Rate(1.0 / self.delay)
rate.sleep()
return 'OK'
class IMUInitState(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['OK'])
def execute(self, userdata):
imu_init_service = rospy.ServiceProxy("imu_init_service", EnablingCmd)
imu_init_service(True)
rospy.sleep(0.8)
imu_init_service(False)
return 'OK'
class StabilizationInitState(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['OK'])
def execute(self, userdata):
stabilization_service = rospy.ServiceProxy("stabilization_service", EnablingCmd)
stabilization_service(False)
rospy.sleep(1.0)
stabilization_service(True)
rospy.sleep(5.0)
return 'OK'
def create_timer_state(time):
class mat_timer_fsm(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['ERROR'])
self.timer_flag = True
#self.timer = rospy.ServiceProxy("timer_service", TimerFsm)
#self.timerControl = self.timer(False)
#print (self.timerControl.duration)
print ("Timer FSM created with delay parameter: ", time)
def execute(self, userdata):
rospy.sleep(time) # Test
return 'ERROR'
return mat_timer_fsm()
def create_time_calculation_state(time, sleep_time):
class time_calculation(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['CALCULATING_TIME', 'TERMINATING'])
self.state_time = rospy.get_rostime() # get time as rospy.Time instance
self.timerFlag = True
self.start_time = -1
self.current_time = -1
rospy.loginfo("Calculation state added with time parameter: %i", time)
def execute(self, userdata):
self.current_time = rospy.get_rostime() # get time as rospy.Time instance
if self.timerFlag:
self.start_time = rospy.get_rostime() # get time as rospy.Time instance
rospy.loginfo("Start time %i %i", self.start_time.secs, self.start_time.nsecs)
self.timerFlag = False
rospy.sleep(sleep_time) # Test
return 'CALCULATING_TIME'
#elif reset:
#self.timerFlag = True
#rospy.loginfo("RESET")
#return 'RESET'
elif abs(self.current_time.secs - self.start_time.secs) > time:
return 'TERMINATING'
else:
rospy.sleep(sleep_time) # Test
return 'CALCULATING_TIME'
return time_calculation()
# Creates diving state (depth in centimeters)
def create_diving_state(depth):
dive = DiveGoal()
dive.depth = depth
return smach_ros.SimpleActionState('dive', DiveAction, goal=dive)
def create_signal_state():
goal = MoveGoal()
goal.direction = MoveGoal.DIRECTION_FORWARD
goal.velocityLevel = MoveGoal.VELOCITY_LEVEL_1
goal.value = 1000
return smach_ros.SimpleActionState('move_by_time', MoveAction, goal=goal)
def create_move_state(direction, value, velocity_level, hold_if_infinity=False):
action_goal = MoveGoal()
action_goal.direction = direction
action_goal.velocityLevel = velocity_level
action_goal.value = value
action_goal.holdIfInfinityValue = hold_if_infinity
return smach_ros.SimpleActionState('move_by_time', MoveAction, goal=action_goal)
| 34.6
| 94
| 0.647901
|
9ec4bfd78e5930e66670f3caa4cd956f9ee9fe07
| 2,066
|
py
|
Python
|
src/util/doc2string.py
|
SwaXTech/Detector-de-Plagio
|
11445e81097b52cd75561a049d4c85165d73f461
|
[
"MIT"
] | 3
|
2021-11-16T13:19:16.000Z
|
2021-11-16T17:53:42.000Z
|
src/util/doc2string.py
|
SwaXTech/Detector-de-Plagio
|
11445e81097b52cd75561a049d4c85165d73f461
|
[
"MIT"
] | null | null | null |
src/util/doc2string.py
|
SwaXTech/Detector-de-Plagio
|
11445e81097b52cd75561a049d4c85165d73f461
|
[
"MIT"
] | null | null | null |
import docx
from util.file_manager import file_extension
from util.file_manager import get_filename
from util.generic_document import GenericDocument
from util.data_cleaning import separate_glued_words
from util.data_cleaning import merge_string
import util.log as log
import re
class WordDocument():
type_manager = None
document = None
string = None
paragraphs = []
def __init__(self, path):
self.document = self.__get_document(path)
self.string = self.document.string
self.paragraphs = self.document.paragraphs
def __get_document(self, path):
if file_extension(path) == '.doc':
return Doc(path)
return Docx(path)
class Docx():
string = None
document = None
raw_text = None
paragraphs = []
def __init__(self, path):
log.debug('Trying to read: {}'.format(get_filename(path)))
self.document = self.read_document(path)
self.raw_text = self.raw_full_text()
self.string = self.build_string(self.raw_text)
self.paragraphs = self.get_paragraphs()
def read_document(self, path):
return docx.Document(path)
def raw_full_text(self):
return [paragraph.text for paragraph in self.document.paragraphs]
def fix_void_paragraph(self, raw_text):
return [paragraph for paragraph in raw_text if paragraph != '']
def build_string(self, raw_text):
fixed_void_paragraph = self.fix_void_paragraph(raw_text)
merged_string = merge_string(fixed_void_paragraph)
separated_glued_words = separate_glued_words(merged_string)
return separated_glued_words
def get_paragraphs(self):
return self.raw_text.copy()
class Doc(GenericDocument):
def __init__(self, path):
log.debug('Trying to read: {}'.format(get_filename(path)))
super().__init__(path, '.doc')
def get_paragraphs(self):
return re.split('\.\n\n', self.string)
class Rtf(GenericDocument):
def __init__(self, path):
log.debug('Trying to read: {}'.format(get_filename(path)))
super().__init__(path, '.rtf')
def get_paragraphs(self):
return re.split('\.\n', self.string)
| 24.891566
| 69
| 0.723621
|
4b25cb13ac67154cb33b12b5a183dbea4df98cbc
| 8,093
|
py
|
Python
|
src/oci/jms/models/fleet_agent_configuration.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/jms/models/fleet_agent_configuration.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/jms/models/fleet_agent_configuration.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class FleetAgentConfiguration(object):
"""
Management Agent Configuration for a Fleet. Includes JRE scanning frequency and list of include/exclude file system paths.
"""
def __init__(self, **kwargs):
"""
Initializes a new FleetAgentConfiguration object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param jre_scan_frequency_in_minutes:
The value to assign to the jre_scan_frequency_in_minutes property of this FleetAgentConfiguration.
:type jre_scan_frequency_in_minutes: int
:param java_usage_tracker_processing_frequency_in_minutes:
The value to assign to the java_usage_tracker_processing_frequency_in_minutes property of this FleetAgentConfiguration.
:type java_usage_tracker_processing_frequency_in_minutes: int
:param linux_configuration:
The value to assign to the linux_configuration property of this FleetAgentConfiguration.
:type linux_configuration: oci.jms.models.FleetAgentOsConfiguration
:param windows_configuration:
The value to assign to the windows_configuration property of this FleetAgentConfiguration.
:type windows_configuration: oci.jms.models.FleetAgentOsConfiguration
:param time_last_modified:
The value to assign to the time_last_modified property of this FleetAgentConfiguration.
:type time_last_modified: datetime
"""
self.swagger_types = {
'jre_scan_frequency_in_minutes': 'int',
'java_usage_tracker_processing_frequency_in_minutes': 'int',
'linux_configuration': 'FleetAgentOsConfiguration',
'windows_configuration': 'FleetAgentOsConfiguration',
'time_last_modified': 'datetime'
}
self.attribute_map = {
'jre_scan_frequency_in_minutes': 'jreScanFrequencyInMinutes',
'java_usage_tracker_processing_frequency_in_minutes': 'javaUsageTrackerProcessingFrequencyInMinutes',
'linux_configuration': 'linuxConfiguration',
'windows_configuration': 'windowsConfiguration',
'time_last_modified': 'timeLastModified'
}
self._jre_scan_frequency_in_minutes = None
self._java_usage_tracker_processing_frequency_in_minutes = None
self._linux_configuration = None
self._windows_configuration = None
self._time_last_modified = None
@property
def jre_scan_frequency_in_minutes(self):
"""
**[Required]** Gets the jre_scan_frequency_in_minutes of this FleetAgentConfiguration.
The frequency (in minutes) of JRE scanning. (That is, how often should JMS scan for JRE installations.)
:return: The jre_scan_frequency_in_minutes of this FleetAgentConfiguration.
:rtype: int
"""
return self._jre_scan_frequency_in_minutes
@jre_scan_frequency_in_minutes.setter
def jre_scan_frequency_in_minutes(self, jre_scan_frequency_in_minutes):
"""
Sets the jre_scan_frequency_in_minutes of this FleetAgentConfiguration.
The frequency (in minutes) of JRE scanning. (That is, how often should JMS scan for JRE installations.)
:param jre_scan_frequency_in_minutes: The jre_scan_frequency_in_minutes of this FleetAgentConfiguration.
:type: int
"""
self._jre_scan_frequency_in_minutes = jre_scan_frequency_in_minutes
@property
def java_usage_tracker_processing_frequency_in_minutes(self):
"""
**[Required]** Gets the java_usage_tracker_processing_frequency_in_minutes of this FleetAgentConfiguration.
The frequency (in minutes) of Java Usage Tracker processing. (That is, how often should JMS process data from the Java Usage Tracker.)
:return: The java_usage_tracker_processing_frequency_in_minutes of this FleetAgentConfiguration.
:rtype: int
"""
return self._java_usage_tracker_processing_frequency_in_minutes
@java_usage_tracker_processing_frequency_in_minutes.setter
def java_usage_tracker_processing_frequency_in_minutes(self, java_usage_tracker_processing_frequency_in_minutes):
"""
Sets the java_usage_tracker_processing_frequency_in_minutes of this FleetAgentConfiguration.
The frequency (in minutes) of Java Usage Tracker processing. (That is, how often should JMS process data from the Java Usage Tracker.)
:param java_usage_tracker_processing_frequency_in_minutes: The java_usage_tracker_processing_frequency_in_minutes of this FleetAgentConfiguration.
:type: int
"""
self._java_usage_tracker_processing_frequency_in_minutes = java_usage_tracker_processing_frequency_in_minutes
@property
def linux_configuration(self):
"""
**[Required]** Gets the linux_configuration of this FleetAgentConfiguration.
:return: The linux_configuration of this FleetAgentConfiguration.
:rtype: oci.jms.models.FleetAgentOsConfiguration
"""
return self._linux_configuration
@linux_configuration.setter
def linux_configuration(self, linux_configuration):
"""
Sets the linux_configuration of this FleetAgentConfiguration.
:param linux_configuration: The linux_configuration of this FleetAgentConfiguration.
:type: oci.jms.models.FleetAgentOsConfiguration
"""
self._linux_configuration = linux_configuration
@property
def windows_configuration(self):
"""
**[Required]** Gets the windows_configuration of this FleetAgentConfiguration.
:return: The windows_configuration of this FleetAgentConfiguration.
:rtype: oci.jms.models.FleetAgentOsConfiguration
"""
return self._windows_configuration
@windows_configuration.setter
def windows_configuration(self, windows_configuration):
"""
Sets the windows_configuration of this FleetAgentConfiguration.
:param windows_configuration: The windows_configuration of this FleetAgentConfiguration.
:type: oci.jms.models.FleetAgentOsConfiguration
"""
self._windows_configuration = windows_configuration
@property
def time_last_modified(self):
"""
**[Required]** Gets the time_last_modified of this FleetAgentConfiguration.
The date and time of the last modification to the Fleet Agent Configuration (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:return: The time_last_modified of this FleetAgentConfiguration.
:rtype: datetime
"""
return self._time_last_modified
@time_last_modified.setter
def time_last_modified(self, time_last_modified):
"""
Sets the time_last_modified of this FleetAgentConfiguration.
The date and time of the last modification to the Fleet Agent Configuration (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param time_last_modified: The time_last_modified of this FleetAgentConfiguration.
:type: datetime
"""
self._time_last_modified = time_last_modified
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 42.371728
| 245
| 0.729519
|
a8eaa5ffef824af2a5d459aeabe18c45bfb7d7b9
| 13,948
|
py
|
Python
|
pyboto3/augmentedairuntime.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/augmentedairuntime.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/augmentedairuntime.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def delete_human_loop(HumanLoopName=None):
"""
Deletes the specified human loop for a flow definition.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_human_loop(
HumanLoopName='string'
)
:type HumanLoopName: string
:param HumanLoopName: [REQUIRED]\nThe name of the human loop that you want to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
:return: {}
:returns:
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
"""
pass
def describe_human_loop(HumanLoopName=None):
"""
Returns information about the specified human loop.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_human_loop(
HumanLoopName='string'
)
:type HumanLoopName: string
:param HumanLoopName: [REQUIRED]\nThe name of the human loop that you want information about.\n
:rtype: dict
ReturnsResponse Syntax{
'CreationTime': datetime(2015, 1, 1),
'FailureReason': 'string',
'FailureCode': 'string',
'HumanLoopStatus': 'InProgress'|'Failed'|'Completed'|'Stopped'|'Stopping',
'HumanLoopName': 'string',
'HumanLoopArn': 'string',
'FlowDefinitionArn': 'string',
'HumanLoopOutput': {
'OutputS3Uri': 'string'
}
}
Response Structure
(dict) --
CreationTime (datetime) --The creation time when Amazon Augmented AI created the human loop.
FailureReason (string) --The reason why a human loop failed. The failure reason is returned when the status of the human loop is Failed .
FailureCode (string) --A failure code that identifies the type of failure.
HumanLoopStatus (string) --The status of the human loop.
HumanLoopName (string) --The name of the human loop. The name must be lowercase, unique within the Region in your account, and can have up to 63 characters. Valid characters: a-z, 0-9, and - (hyphen).
HumanLoopArn (string) --The Amazon Resource Name (ARN) of the human loop.
FlowDefinitionArn (string) --The Amazon Resource Name (ARN) of the flow definition.
HumanLoopOutput (dict) --An object that contains information about the output of the human loop.
OutputS3Uri (string) --The location of the Amazon S3 object where Amazon Augmented AI stores your human loop output.
Exceptions
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
:return: {
'CreationTime': datetime(2015, 1, 1),
'FailureReason': 'string',
'FailureCode': 'string',
'HumanLoopStatus': 'InProgress'|'Failed'|'Completed'|'Stopped'|'Stopping',
'HumanLoopName': 'string',
'HumanLoopArn': 'string',
'FlowDefinitionArn': 'string',
'HumanLoopOutput': {
'OutputS3Uri': 'string'
}
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_human_loops(CreationTimeAfter=None, CreationTimeBefore=None, FlowDefinitionArn=None, SortOrder=None, NextToken=None, MaxResults=None):
"""
Returns information about human loops, given the specified parameters. If a human loop was deleted, it will not be included.
See also: AWS API Documentation
Exceptions
:example: response = client.list_human_loops(
CreationTimeAfter=datetime(2015, 1, 1),
CreationTimeBefore=datetime(2015, 1, 1),
FlowDefinitionArn='string',
SortOrder='Ascending'|'Descending',
NextToken='string',
MaxResults=123
)
:type CreationTimeAfter: datetime
:param CreationTimeAfter: (Optional) The timestamp of the date when you want the human loops to begin in ISO 8601 format. For example, 2020-02-24 .
:type CreationTimeBefore: datetime
:param CreationTimeBefore: (Optional) The timestamp of the date before which you want the human loops to begin in ISO 8601 format. For example, 2020-02-24 .
:type FlowDefinitionArn: string
:param FlowDefinitionArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of a flow definition.\n
:type SortOrder: string
:param SortOrder: Optional. The order for displaying results. Valid values: Ascending and Descending .
:type NextToken: string
:param NextToken: A token to display the next page of results.
:type MaxResults: integer
:param MaxResults: The total number of items to return. If the total number of available items is more than the value specified in MaxResults , then a NextToken is returned in the output. You can use this token to display the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'HumanLoopSummaries': [
{
'HumanLoopName': 'string',
'HumanLoopStatus': 'InProgress'|'Failed'|'Completed'|'Stopped'|'Stopping',
'CreationTime': datetime(2015, 1, 1),
'FailureReason': 'string',
'FlowDefinitionArn': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
HumanLoopSummaries (list) --
An array of objects that contain information about the human loops.
(dict) --
Summary information about the human loop.
HumanLoopName (string) --
The name of the human loop.
HumanLoopStatus (string) --
The status of the human loop.
CreationTime (datetime) --
When Amazon Augmented AI created the human loop.
FailureReason (string) --
The reason why the human loop failed. A failure reason is returned when the status of the human loop is Failed .
FlowDefinitionArn (string) --
The Amazon Resource Name (ARN) of the flow definition used to configure the human loop.
NextToken (string) --
A token to display the next page of results.
Exceptions
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
:return: {
'HumanLoopSummaries': [
{
'HumanLoopName': 'string',
'HumanLoopStatus': 'InProgress'|'Failed'|'Completed'|'Stopped'|'Stopping',
'CreationTime': datetime(2015, 1, 1),
'FailureReason': 'string',
'FlowDefinitionArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
"""
pass
def start_human_loop(HumanLoopName=None, FlowDefinitionArn=None, HumanLoopInput=None, DataAttributes=None):
"""
Starts a human loop, provided that at least one activation condition is met.
See also: AWS API Documentation
Exceptions
:example: response = client.start_human_loop(
HumanLoopName='string',
FlowDefinitionArn='string',
HumanLoopInput={
'InputContent': 'string'
},
DataAttributes={
'ContentClassifiers': [
'FreeOfPersonallyIdentifiableInformation'|'FreeOfAdultContent',
]
}
)
:type HumanLoopName: string
:param HumanLoopName: [REQUIRED]\nThe name of the human loop.\n
:type FlowDefinitionArn: string
:param FlowDefinitionArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the flow definition associated with this human loop.\n
:type HumanLoopInput: dict
:param HumanLoopInput: [REQUIRED]\nAn object that contains information about the human loop.\n\nInputContent (string) -- [REQUIRED]Serialized input from the human loop. The input must be a string representation of a file in JSON format.\n\n\n
:type DataAttributes: dict
:param DataAttributes: Attributes of the specified data. Use DataAttributes to specify if your data is free of personally identifiable information and/or free of adult content.\n\nContentClassifiers (list) -- [REQUIRED]Declares that your content is free of personally identifiable information or adult content.\nAmazon SageMaker can restrict the Amazon Mechanical Turk workers who can view your task based on this information.\n\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'HumanLoopArn': 'string'
}
Response Structure
(dict) --
HumanLoopArn (string) --
The Amazon Resource Name (ARN) of the human loop.
Exceptions
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.ServiceQuotaExceededException
AugmentedAIRuntime.Client.exceptions.InternalServerException
AugmentedAIRuntime.Client.exceptions.ConflictException
:return: {
'HumanLoopArn': 'string'
}
:returns:
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.ServiceQuotaExceededException
AugmentedAIRuntime.Client.exceptions.InternalServerException
AugmentedAIRuntime.Client.exceptions.ConflictException
"""
pass
def stop_human_loop(HumanLoopName=None):
"""
Stops the specified human loop.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_human_loop(
HumanLoopName='string'
)
:type HumanLoopName: string
:param HumanLoopName: [REQUIRED]\nThe name of the human loop that you want to stop.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
:return: {}
:returns:
AugmentedAIRuntime.Client.exceptions.ValidationException
AugmentedAIRuntime.Client.exceptions.ResourceNotFoundException
AugmentedAIRuntime.Client.exceptions.ThrottlingException
AugmentedAIRuntime.Client.exceptions.InternalServerException
"""
pass
| 30.654945
| 453
| 0.727344
|
986db5b99aecf1c78aecd810ab822dcf12494c2b
| 18,689
|
py
|
Python
|
cwltool/workflow.py
|
RenskeW/cwltool
|
8ef515037de411abd2f84b569ad4d4a4f7a2c7a0
|
[
"Apache-2.0"
] | null | null | null |
cwltool/workflow.py
|
RenskeW/cwltool
|
8ef515037de411abd2f84b569ad4d4a4f7a2c7a0
|
[
"Apache-2.0"
] | 11
|
2022-02-17T03:20:41.000Z
|
2022-03-30T10:54:02.000Z
|
cwltool/workflow.py
|
RenskeW/cwltool
|
8ef515037de411abd2f84b569ad4d4a4f7a2c7a0
|
[
"Apache-2.0"
] | null | null | null |
import copy
import datetime
import functools
import logging
import random
from typing import (
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
cast,
)
from uuid import UUID
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine, indent
from . import command_line_tool, context, procgenerator
from .checker import circular_dependency_checker, static_checker
from .context import LoadingContext, RuntimeContext, getdefault
from .errors import WorkflowException
from .load_tool import load_tool
from .loghandler import _logger
from .process import Process, get_overrides, shortname
from .provenance_profile import ProvenanceProfile
from .utils import (
CWLObjectType,
JobsGeneratorType,
OutputCallbackType,
StepType,
aslist,
)
from .workflow_job import WorkflowJob
def default_make_tool(
toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> Process:
if not isinstance(toolpath_object, MutableMapping):
raise WorkflowException("Not a dict: '%s'" % toolpath_object)
if "class" in toolpath_object:
if toolpath_object["class"] == "CommandLineTool":
return command_line_tool.CommandLineTool(toolpath_object, loadingContext)
if toolpath_object["class"] == "ExpressionTool":
return command_line_tool.ExpressionTool(toolpath_object, loadingContext)
if toolpath_object["class"] == "Workflow":
return Workflow(toolpath_object, loadingContext)
if toolpath_object["class"] == "ProcessGenerator":
return procgenerator.ProcessGenerator(toolpath_object, loadingContext)
if toolpath_object["class"] == "Operation":
return command_line_tool.AbstractOperation(toolpath_object, loadingContext)
raise WorkflowException(
"Missing or invalid 'class' field in "
"%s, expecting one of: CommandLineTool, ExpressionTool, Workflow"
% toolpath_object["id"]
)
context.default_make_tool = default_make_tool
class Workflow(Process):
def __init__(
self,
toolpath_object: CommentedMap,
loadingContext: LoadingContext,
) -> None:
"""Initialize this Workflow."""
super().__init__(toolpath_object, loadingContext)
self.provenance_object = None # type: Optional[ProvenanceProfile]
if loadingContext.research_obj is not None:
run_uuid = None # type: Optional[UUID]
is_main = not loadingContext.prov_obj # Not yet set
if is_main:
run_uuid = loadingContext.research_obj.ro_uuid
self.provenance_object = ProvenanceProfile(
loadingContext.research_obj,
full_name=loadingContext.cwl_full_name,
host_provenance=loadingContext.host_provenance,
user_provenance=loadingContext.user_provenance,
orcid=loadingContext.orcid,
run_uuid=run_uuid,
fsaccess=loadingContext.research_obj.fsaccess,
) # inherit RO UUID for main wf run
# TODO: Is Workflow(..) only called when we are the main workflow?
self.parent_wf = self.provenance_object
# FIXME: Won't this overwrite prov_obj for nested workflows?
loadingContext.prov_obj = self.provenance_object
loadingContext = loadingContext.copy()
loadingContext.requirements = self.requirements
loadingContext.hints = self.hints
self.steps = [] # type: List[WorkflowStep]
validation_errors = []
for index, step in enumerate(self.tool.get("steps", [])):
try:
self.steps.append(
self.make_workflow_step(
step, index, loadingContext, loadingContext.prov_obj
)
)
except ValidationException as vexc:
if _logger.isEnabledFor(logging.DEBUG):
_logger.exception("Validation failed at")
validation_errors.append(vexc)
if validation_errors:
raise ValidationException("\n".join(str(v) for v in validation_errors))
random.shuffle(self.steps)
# statically validate data links instead of doing it at runtime.
workflow_inputs = self.tool["inputs"]
workflow_outputs = self.tool["outputs"]
step_inputs = [] # type: List[CWLObjectType]
step_outputs = [] # type: List[CWLObjectType]
param_to_step = {} # type: Dict[str, CWLObjectType]
for step in self.steps:
step_inputs.extend(step.tool["inputs"])
step_outputs.extend(step.tool["outputs"])
for s in step.tool["inputs"]:
param_to_step[s["id"]] = step.tool
for s in step.tool["outputs"]:
param_to_step[s["id"]] = step.tool
if getdefault(loadingContext.do_validate, True):
static_checker(
workflow_inputs,
workflow_outputs,
step_inputs,
step_outputs,
param_to_step,
)
circular_dependency_checker(step_inputs)
def make_workflow_step(
self,
toolpath_object: CommentedMap,
pos: int,
loadingContext: LoadingContext,
parentworkflowProv: Optional[ProvenanceProfile] = None,
) -> "WorkflowStep":
return WorkflowStep(toolpath_object, pos, loadingContext, parentworkflowProv)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
builder = self._init_job(job_order, runtimeContext)
if runtimeContext.research_obj is not None:
if runtimeContext.toplevel:
# Record primary-job.json
runtimeContext.research_obj.fsaccess = runtimeContext.make_fs_access("")
runtimeContext.research_obj.create_job(builder.job)
job = WorkflowJob(self, runtimeContext)
yield job
runtimeContext = runtimeContext.copy()
runtimeContext.part_of = "workflow %s" % job.name
runtimeContext.toplevel = False
yield from job.job(builder.job, output_callbacks, runtimeContext)
def visit(self, op: Callable[[CommentedMap], None]) -> None:
op(self.tool)
for step in self.steps:
step.visit(op)
def used_by_step(step: StepType, shortinputid: str) -> bool:
for st in cast(MutableSequence[CWLObjectType], step["in"]):
if st.get("valueFrom"):
if ("inputs.%s" % shortinputid) in cast(str, st.get("valueFrom")):
return True
if step.get("when"):
if ("inputs.%s" % shortinputid) in cast(str, step.get("when")):
return True
return False
class WorkflowStep(Process):
def __init__(
self,
toolpath_object: CommentedMap,
pos: int,
loadingContext: LoadingContext,
parentworkflowProv: Optional[ProvenanceProfile] = None,
) -> None:
"""Initialize this WorkflowStep."""
debug = loadingContext.debug
if "id" in toolpath_object:
self.id = toolpath_object["id"]
else:
self.id = "#step" + str(pos)
loadingContext = loadingContext.copy()
parent_requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
loadingContext.requirements = copy.deepcopy(
toolpath_object.get("requirements", [])
)
assert loadingContext.requirements is not None # nosec
for parent_req in parent_requirements:
found_in_step = False
for step_req in loadingContext.requirements:
if parent_req["class"] == step_req["class"]:
found_in_step = True
break
if not found_in_step:
loadingContext.requirements.append(parent_req)
loadingContext.requirements.extend(
cast(
List[CWLObjectType],
get_overrides(
getdefault(loadingContext.overrides_list, []), self.id
).get("requirements", []),
)
)
hints = copy.deepcopy(getdefault(loadingContext.hints, []))
hints.extend(toolpath_object.get("hints", []))
loadingContext.hints = hints
try:
if isinstance(toolpath_object["run"], CommentedMap):
self.embedded_tool = loadingContext.construct_tool_object(
toolpath_object["run"], loadingContext
) # type: Process
else:
loadingContext.metadata = {}
self.embedded_tool = load_tool(toolpath_object["run"], loadingContext)
except ValidationException as vexc:
if loadingContext.debug:
_logger.exception("Validation exception")
raise WorkflowException(
"Tool definition %s failed validation:\n%s"
% (toolpath_object["run"], indent(str(vexc)))
) from vexc
validation_errors = []
self.tool = toolpath_object = copy.deepcopy(toolpath_object)
bound = set()
if self.embedded_tool.get_requirement("SchemaDefRequirement")[0]:
if "requirements" not in toolpath_object:
toolpath_object["requirements"] = []
toolpath_object["requirements"].append(
self.embedded_tool.get_requirement("SchemaDefRequirement")[0]
)
for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
toolpath_object[toolfield] = []
for index, step_entry in enumerate(toolpath_object[stepfield]):
if isinstance(step_entry, str):
param = CommentedMap() # type: CommentedMap
inputid = step_entry
else:
param = CommentedMap(step_entry.items())
inputid = step_entry["id"]
shortinputid = shortname(inputid)
found = False
for tool_entry in self.embedded_tool.tool[toolfield]:
frag = shortname(tool_entry["id"])
if frag == shortinputid:
# if the case that the step has a default for a parameter,
# we do not want the default of the tool to override it
step_default = None
if "default" in param and "default" in tool_entry:
step_default = param["default"]
param.update(tool_entry)
param["_tool_entry"] = tool_entry
if step_default is not None:
param["default"] = step_default
found = True
bound.add(frag)
break
if not found:
if stepfield == "in":
param["type"] = "Any"
param["used_by_step"] = used_by_step(self.tool, shortinputid)
param["not_connected"] = True
else:
if isinstance(step_entry, Mapping):
step_entry_name = step_entry["id"]
else:
step_entry_name = step_entry
validation_errors.append(
SourceLine(
self.tool["out"], index, include_traceback=debug
).makeError(
"Workflow step output '%s' does not correspond to"
% shortname(step_entry_name)
)
+ "\n"
+ SourceLine(
self.embedded_tool.tool,
"outputs",
include_traceback=debug,
).makeError(
" tool output (expected '%s')"
% (
"', '".join(
[
shortname(tool_entry["id"])
for tool_entry in self.embedded_tool.tool[
"outputs"
]
]
)
)
)
)
param["id"] = inputid
param.lc.line = toolpath_object[stepfield].lc.data[index][0]
param.lc.col = toolpath_object[stepfield].lc.data[index][1]
param.lc.filename = toolpath_object[stepfield].lc.filename
toolpath_object[toolfield].append(param)
missing_values = []
for _, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
if shortname(tool_entry["id"]) not in bound:
if "null" not in tool_entry["type"] and "default" not in tool_entry:
missing_values.append(shortname(tool_entry["id"]))
if missing_values:
validation_errors.append(
SourceLine(self.tool, "in", include_traceback=debug).makeError(
"Step is missing required parameter%s '%s'"
% (
"s" if len(missing_values) > 1 else "",
"', '".join(missing_values),
)
)
)
if validation_errors:
raise ValidationException("\n".join(validation_errors))
super().__init__(toolpath_object, loadingContext)
if self.embedded_tool.tool["class"] == "Workflow":
(feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
if not feature:
raise WorkflowException(
"Workflow contains embedded workflow but "
"SubworkflowFeatureRequirement not in requirements"
)
if "scatter" in self.tool:
(feature, _) = self.get_requirement("ScatterFeatureRequirement")
if not feature:
raise WorkflowException(
"Workflow contains scatter but ScatterFeatureRequirement "
"not in requirements"
)
inputparms = copy.deepcopy(self.tool["inputs"])
outputparms = copy.deepcopy(self.tool["outputs"])
scatter = aslist(self.tool["scatter"])
method = self.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise ValidationException(
"Must specify scatterMethod when scattering over multiple inputs"
)
inp_map = {i["id"]: i for i in inputparms}
for inp in scatter:
if inp not in inp_map:
SourceLine(
self.tool, "scatter", ValidationException, debug
).makeError(
"Scatter parameter '%s' does not correspond to "
"an input parameter of this step, expecting '%s'"
% (
shortname(inp),
"', '".join(shortname(k) for k in inp_map.keys()),
)
)
inp_map[inp]["type"] = {"type": "array", "items": inp_map[inp]["type"]}
if self.tool.get("scatterMethod") == "nested_crossproduct":
nesting = len(scatter)
else:
nesting = 1
for _ in range(0, nesting):
for oparam in outputparms:
oparam["type"] = {"type": "array", "items": oparam["type"]}
self.tool["inputs"] = inputparms
self.tool["outputs"] = outputparms
self.prov_obj = None # type: Optional[ProvenanceProfile]
if loadingContext.research_obj is not None:
self.prov_obj = parentworkflowProv
if self.embedded_tool.tool["class"] == "Workflow":
self.parent_wf = self.embedded_tool.parent_wf
else:
self.parent_wf = self.prov_obj
def receive_output(
self,
output_callback: OutputCallbackType,
jobout: CWLObjectType,
processStatus: str,
) -> None:
output = {}
for i in self.tool["outputs"]:
field = shortname(i["id"])
if field in jobout:
output[i["id"]] = jobout[field]
else:
processStatus = "permanentFail"
output_callback(output, processStatus)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
"""Initialize sub-workflow as a step in the parent profile."""
if (
self.embedded_tool.tool["class"] == "Workflow"
and runtimeContext.research_obj
and self.prov_obj
and self.embedded_tool.provenance_object
):
self.embedded_tool.parent_wf = self.prov_obj
process_name = self.tool["id"].split("#")[1]
self.prov_obj.start_process(
process_name,
datetime.datetime.now(),
self.embedded_tool.provenance_object.workflow_run_uri,
)
step_input = {}
for inp in self.tool["inputs"]:
field = shortname(inp["id"])
if not inp.get("not_connected"):
step_input[field] = job_order[inp["id"]]
try:
yield from self.embedded_tool.job(
step_input,
functools.partial(self.receive_output, output_callbacks),
runtimeContext,
)
except WorkflowException:
_logger.error("Exception on step '%s'", runtimeContext.name)
raise
except Exception as exc:
_logger.exception("Unexpected exception")
raise WorkflowException(str(exc)) from exc
def visit(self, op: Callable[[CommentedMap], None]) -> None:
self.embedded_tool.visit(op)
| 39.679406
| 88
| 0.554872
|
21a33869ce4ffd54c88f6ed40031637b8abd2f28
| 1,223
|
py
|
Python
|
examples/run_cell_hppc_ecm.py
|
sratgh/equiv-circ-model
|
0c8c30814b819e893f49a810eae090a6dabe39e9
|
[
"MIT"
] | 2
|
2020-03-06T21:22:14.000Z
|
2020-03-26T20:13:52.000Z
|
examples/run_cell_hppc_ecm.py
|
sratgh/equiv-circ-model
|
0c8c30814b819e893f49a810eae090a6dabe39e9
|
[
"MIT"
] | null | null | null |
examples/run_cell_hppc_ecm.py
|
sratgh/equiv-circ-model
|
0c8c30814b819e893f49a810eae090a6dabe39e9
|
[
"MIT"
] | null | null | null |
"""
Run equivalent circuit model (ECM) for battery cell and compare to HPPC data.
Plot HPPC voltage data and ECM voltage. Plot absolute voltage difference
between HPPC data and ECM.
"""
import matplotlib.pyplot as plt
import params
from ecm import CellHppcData
from ecm import EquivCircModel
from utils import config_ax
# Battery cell HPPC data and equivalent circuit model
# ----------------------------------------------------------------------------
file_hppc = 'data/cell-low-current-hppc-25c-2.csv'
data = CellHppcData.process(file_hppc)
ecm = EquivCircModel(data, params)
soc = ecm.soc()
ocv = ecm.ocv(soc)
coeffs = ecm.curve_fit_coeff(ecm.func_ttc, 5)
rctau = ecm.rctau_ttc(coeffs)
vt = ecm.vt(soc, ocv, rctau)
# Plot HPPC data and equivalent circuit model
# ----------------------------------------------------------------------------
fig, ax = plt.subplots(tight_layout=True)
ax.plot(data.time, data.voltage, 'C3', label='data')
ax.plot(data.time, vt, 'k--', label='ecm')
config_ax(ax, xylabels=('Time [s]', 'Voltage [V]'), loc='best')
fig, ax = plt.subplots(tight_layout=True)
ax.plot(data.time, abs(data.voltage - vt))
config_ax(ax, xylabels=('Time [s]', 'Absolute voltage difference [V]'))
plt.show()
| 29.829268
| 78
| 0.642682
|
6ec53dff1ae260c52fabd5f6d1f2f3377d6fdaab
| 1,795
|
py
|
Python
|
daily_exercise/148. Sort List_medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
daily_exercise/148. Sort List_medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
daily_exercise/148. Sort List_medium.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head:
return None
dummy_head = ListNode()
dummy_head.next = head
length_list = 0
while head:
length_list += 1
head = head.next
head = dummy_head.next
def merge_sort(head, length_list):
if length_list == 1:
return head
head_a = head
pivot = length_list // 2
prev, curr = None, head
while pivot > 0:
prev, curr = curr, curr.next
pivot -= 1
prev.next = None
head_b = curr
length_a, length_b = length_list // 2, length_list - length_list // 2
head_a_sorted, head_b_sorted = merge_sort(head_a, length_a), merge_sort(head_b, length_b)
return self.merge(head_a_sorted, head_b_sorted)
return merge_sort(head, length_list)
def merge(self, head_a, head_b):
dummy = ListNode()
if head_a.val <= head_b.val:
dummy.next = head_a
curr_a, curr_b = head_a.next, head_b
else:
dummy.next = head_b
curr_a, curr_b = head_a, head_b.next
curr = dummy.next
while curr_a and curr_b:
if curr_a.val >= curr_b.val:
curr.next, curr_b = curr_b, curr_b.next
else:
curr.next, curr_a = curr_a, curr_a.next
curr = curr.next
if curr_a:
curr.next = curr_a
else:
curr.next = curr_b
return dummy.next
| 28.951613
| 101
| 0.532033
|
c67c85c4ea4deca21aad45b0fe300d6d465c029d
| 1,978
|
py
|
Python
|
python-project/experiments/continuous/ContinuousExperiment.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
python-project/experiments/continuous/ContinuousExperiment.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
python-project/experiments/continuous/ContinuousExperiment.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
from scipy.io import arff
from spn.structure.Base import Context
import pandas as pd
from methods import MSPN, KDE
from abc import ABCMeta, abstractmethod
class ContinuousExperiment(metaclass=ABCMeta):
def __init__(self, data_name):
self.data_name = data_name
@property
@abstractmethod
def meta_types(self):
pass
@property
@abstractmethod
def var_types_string(self):
pass
def run(self, run: int, n_folds: int, fold_log: bool):
base_path = "../../../data/continuous/" + self.data_name + "/10_folds/"
train_datasets = []
test_datasets = []
ds_contexts = []
# Prepare folds' data
for i in range(1, 11):
train_data_path = base_path + self.data_name + "_" + str(i) + "_train.arff"
test_data_path = base_path + self.data_name + "_" + str(i) + "_test.arff"
# Load data
train_data = arff.loadarff(train_data_path)
train_data = pd.DataFrame(train_data[0])
train_data = train_data.values
train_datasets.append(train_data)
test_data = arff.loadarff(test_data_path)
test_data = pd.DataFrame(test_data[0])
test_data = test_data.values
test_datasets.append(test_data)
# Create context for MSPN algorithm
ds_context = Context(self.meta_types)
ds_contexts.append(ds_context)
# Apply KDE
results_path = "../../../results/run_" + str(run) + "/continuous/" + self.data_name + "/" + str(n_folds) + "_folds/KDE/"
KDE.apply(train_datasets, self.var_types_string, test_datasets, n_folds, results_path, self.data_name, fold_log)
# Apply MSPN
#results_path = "../../../results/run_" + str(run) + "/continuous/" + self.data_name + "/" + str(n_folds) + "_folds/MSPN/"
#MSPN.apply(train_datasets, ds_contexts, test_datasets, n_folds, results_path, self.data_name, fold_log)
| 35.321429
| 130
| 0.625885
|
3c125bc305ad7d39f655cb9aece1ad34c4c89a16
| 18,068
|
py
|
Python
|
TWLight/applications/forms.py
|
nicole331/TWLight
|
fab9002e76868f8a2ef36f9279c777de34243b2c
|
[
"MIT"
] | null | null | null |
TWLight/applications/forms.py
|
nicole331/TWLight
|
fab9002e76868f8a2ef36f9279c777de34243b2c
|
[
"MIT"
] | 60
|
2018-11-05T16:40:16.000Z
|
2022-03-29T02:19:35.000Z
|
TWLight/applications/forms.py
|
nicole331/TWLight
|
fab9002e76868f8a2ef36f9279c777de34243b2c
|
[
"MIT"
] | null | null | null |
"""
This forms.py contains base forms that applications/views.py will use to
generate the actual forms filled in by users in making requests for partner
content.
For usability reasons, we only want users to have to fill in one form at a time
(even if they are requesting access to multiple partners' resources), and we
only want to ask them once for any piece of data even if multiple partners
require it, and we *don't* want to ask them for data that *isn't* required by
any of the partners in their set.
This means that the actual form we present to users must be generated
dynamically; we cannot hardcode it here. What we have here instead is a base
form that takes a dict of required fields, and constructs the form accordingly.
(See the docstring of BaseApplicationForm for the expected dict format.)
"""
from dal import autocomplete
from crispy_forms.bootstrap import InlineField
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit, BaseInput, Div, HTML
import logging
import re
from django import forms
from django.urls import reverse
from django.utils.translation import gettext as _
from TWLight.resources.models import Partner, Stream
from TWLight.users.groups import get_coordinators
from TWLight.users.models import Editor, Authorization
from .helpers import (
USER_FORM_FIELDS,
PARTNER_FORM_OPTIONAL_FIELDS,
PARTNER_FORM_BASE_FIELDS,
FIELD_TYPES,
FIELD_LABELS,
SPECIFIC_STREAM,
AGREEMENT_WITH_TERMS_OF_USE,
ACCOUNT_EMAIL,
)
from .models import Application
logger = logging.getLogger(__name__)
coordinators = get_coordinators()
class StylableSubmit(BaseInput):
"""
The built-in Submit adds classes that don't look right in our context;
we actually have to create our own input to get around this.
"""
input_type = "submit"
def __init__(self, *args, **kwargs):
self.field_classes = ""
super(StylableSubmit, self).__init__(*args, **kwargs)
class BaseApplicationForm(forms.Form):
"""
Given a dict of parameters describing the required fields for this
application, constructs a suitable application form.
Expected dict format:
{
'user': [list, of, required, user, data, fields],
'partner_1': [list, of, required, fields, for, partner, 1],
'partner_2': [list, of, required, fields, for, partner, 2],
(additional partners as needed)
}
'user' is mandatory. 'partner_1' is mandatory. Additional partners are
optional.
See https://django-crispy-forms.readthedocs.org/ for information on form
layout.
"""
def __init__(self, *args, **kwargs):
self._validate_parameters(**kwargs)
self.field_params = kwargs.pop("field_params")
try:
self.user = kwargs.pop("requested_user")
except KeyError:
pass
super(BaseApplicationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self._initialize_form_helper()
self.helper.layout = Layout()
user_data = self.field_params.pop("user")
self._add_user_data_subform(user_data)
# For each partner, build a partner data section of the form.
for partner in self.field_params:
self._add_partner_data_subform(partner)
# Make sure to align any checkbox inputs with other field types
self.helper.filter_by_widget(forms.CheckboxInput).wrap(
Div, css_class="col-sm-8 col-sm-offset-4 col-md-9 col-md-offset-3 apply-tos"
)
self.helper.add_input(
Submit(
"submit",
# Translators: Labels the button users click to apply for a partner's resources.
_("Apply"),
css_class="center-block",
)
)
def _get_partner_object(self, partner):
# Extract the number component of (e.g.) 'partner_1'.
try:
partner_id = partner[8:]
# Verify that it is the ID number of a real partner.
partner = Partner.objects.get(id=partner_id)
return partner
except Partner.DoesNotExist:
logger.exception(
"BaseApplicationForm received a partner ID that "
"did not match any partner in the database"
)
raise
def _validate_parameters(self, **kwargs):
"""
Ensure that parameters have been passed in and match the format
specified in the docstring.
"""
try:
field_params = kwargs["field_params"]
except KeyError:
logger.exception(
"Tried to instantiate a BaseApplicationForm but "
"did not have field_params"
)
raise
try:
assert "user" in field_params
except AssertionError:
logger.exception(
"Tried to instantiate a BaseApplicationForm but "
"there was no user parameter in field_params"
)
raise
try:
# We should have 'user' plus at least one partner in the keys.
assert len(list(field_params.keys())) >= 2
except AssertionError:
logger.exception(
"Tried to instantiate a BaseApplicationForm but "
"there was not enough information in field_params"
)
raise
expected = re.compile(r"partner_\d+")
for key in list(field_params.keys()):
# All keys which are not the user data should be partner data.
if key != "user":
try:
assert expected.match(key)
except AssertionError:
logger.exception(
"Tried to instantiate a BaseApplicationForm but "
"there was a key that did not match any expected values"
)
def _validate_user_data(self, user_data):
try:
assert set(user_data) <= set(USER_FORM_FIELDS)
except AssertionError:
logger.exception("BaseApplicationForm received invalid user data")
raise
def _validate_partner_data(self, partner_data):
try:
assert set(partner_data) <= set(PARTNER_FORM_OPTIONAL_FIELDS)
except AssertionError:
logger.exception("BaseApplicationForm received invalid partner data")
raise
def _initialize_form_helper(self):
# Add basic styling to the form.
self.helper.form_class = "form-horizontal"
self.helper.label_class = "col-xs-12 col-sm-4 col-md-3"
self.helper.field_class = "col-xs-12 col-sm-8 col-md-9"
def _add_user_data_subform(self, user_data):
self._validate_user_data(user_data)
if user_data:
# Translators: This labels a section of a form where we ask users to enter personal information (such as their country of residence) when making an application.
user_data_layout = Fieldset(_("About you"))
for datum in user_data:
self.fields[datum] = FIELD_TYPES[datum]
self.fields[datum].label = FIELD_LABELS[datum]
# Show which partner wants which personal data if applying
# for more than one.
if len(self.field_params) > 1:
# fmt: off
# Translators: This text is shown in the application form under each piece of personal information requested. {partner_list} will be a list of 2 or more organisations that require this personal data, and should not be translated.
self.fields[datum].help_text = _("Requested by: {partner_list}").format(
partner_list=", ".join(user_data[datum])
),
# fmt: on
user_data_layout.append(datum)
self.helper.layout.append(user_data_layout)
# fmt: off
# Translators: This note appears in a section of a form where we ask users to enter info (like country of residence) when applying for resource access.
disclaimer_html = _("<p><small><i>Your personal data will be processed according to our <a href='{terms_url}'> privacy policy</a>.</i></small></p>").format(
terms_url=reverse("terms")
)
# fmt: on
self.helper.layout.append(HTML(disclaimer_html))
def _add_partner_data_subform(self, partner):
partner_data = self.field_params[partner]
partner_object = self._get_partner_object(partner)
partner_layout = Fieldset(
# Translators: This is the title of the application form page, where users enter information required for the application. It lets the user know which partner application they are entering data for. {partner}
_("Your application to {partner}").format(partner=partner_object)
)
self._validate_partner_data(partner_data)
# partner_data lists the optional fields required by that partner;
# base fields should be in the form for all partners.
all_partner_data = partner_data + PARTNER_FORM_BASE_FIELDS
if all_partner_data:
for datum in all_partner_data:
# This will yield fields with names like 'partner_1_occupation'.
# This will let us tell during form processing which fields
# belong to which partners.
field_name = "{partner}_{datum}".format(partner=partner, datum=datum)
self.fields[field_name] = FIELD_TYPES[datum]
self.fields[field_name].label = FIELD_LABELS[datum]
if datum == AGREEMENT_WITH_TERMS_OF_USE:
# Make sure that, if the partner requires agreement with
# terms of use, that link is provided inline.
help_text = '<a href="{url}">{url}</a>'.format(
url=partner_object.terms_of_use
)
self.fields[field_name].help_text = help_text
if datum == SPECIFIC_STREAM:
# Only show streams for this partner
partner_id = int(partner[8:])
# We use the logic below to filter out the streams for which
# the user already has authorizations. Streams with authorizations
# can only be renewed (as opposed to applying) from the My Library
# page.
queryset = Stream.objects.filter(partner_id=partner_id)
# We need a user if we are to determine which streams have authorizations.
# We set the user in the view code if a partner has streams.
if self.user:
all_authorizations = Authorization.objects.filter(
user=self.user, partners=partner_id, stream__isnull=False
)
existing_streams = []
for each_authorization in all_authorizations:
existing_streams.append(each_authorization.stream.id)
if len(existing_streams) > len(set(existing_streams)):
logger.info(
"Multiple authorizations returned for the same partner {}, same stream for user {}. "
"Unable to pop options.".format(partner_id, self.user)
)
break
else:
# We exclude the streams that already have authorizations.
queryset = Stream.objects.exclude(
id__in=existing_streams
).filter(partner_id=partner_id)
specific_stream = forms.ModelChoiceField(
queryset=queryset, empty_label=None
)
self.fields[field_name] = specific_stream
self.fields[field_name].label = FIELD_LABELS[datum]
if datum == ACCOUNT_EMAIL:
# If partner requires pre-registration, make sure users
# get a link where they can sign up.
url = '<a href="{url}">{url}</a>'.format(
url=partner_object.registration_url
)
# Translators: For some applications, users must register at another website before finishing the application form, and must then enter their email address used when registering. Don't translate {url}.
help_text = _("You must register at {url} before applying.").format(
url=url
)
self.fields[field_name].help_text = help_text
partner_layout.append(field_name)
self.helper.layout.append(partner_layout)
class ApplicationAutocomplete(forms.ModelForm):
class Meta:
model = Application
fields = ["editor", "partner"]
widgets = {
"editor": autocomplete.ModelSelect2(
url="applications:editor_autocomplete",
attrs={"data-placeholder": "Username"},
),
"partner": autocomplete.ModelSelect2(
url="applications:partner_autocomplete",
attrs={"data-placeholder": "Partner"},
),
}
def __init__(self, user=None, *args, **kwargs):
super(ApplicationAutocomplete, self).__init__(*args, **kwargs)
# Make sure that we aren't leaking info via our form choices.
if user.is_superuser:
self.fields["editor"].queryset = Editor.objects.all().order_by(
"wp_username"
)
self.fields["partner"].queryset = Partner.objects.all().order_by(
"company_name"
)
elif coordinators in user.groups.all():
self.fields["editor"].queryset = Editor.objects.filter(
applications__partner__coordinator__pk=user.pk
).order_by("wp_username")
self.fields["partner"].queryset = Partner.objects.filter(
coordinator__pk=user.pk
).order_by("company_name")
# Prettify.
self.helper = FormHelper()
self.helper.form_class = "form-inline"
self.helper.field_template = "bootstrap3/layout/inline_field.html"
self.helper.layout = Layout(
InlineField("editor"),
InlineField("partner"),
StylableSubmit("submit", "Filter", css_class="btn btn-default"),
)
# Required on the model, but optional for autocomplete, so override
# the default.
self.fields["editor"].required = False
self.fields["partner"].required = False
# Internationalize user-visible labels. These will appear inline as
# placeholders.
# Translators: Label of the field where coordinators can enter the username of a user
self.fields["editor"].label = _("Username")
# Translators: Label of the field where coordinators can enter the name of a partner
self.fields["partner"].label = _("Partner name")
class RenewalForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
self.field_params = kwargs.pop("field_params")
except KeyError:
logger.exception(
"Tried to instantiate a RenewalForm but did not have field_params"
)
raise
super(RenewalForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
# Translators: This will be the title of the page where users will have to confirm their renewal request of an application.
fieldset = Fieldset(_("Renewal confirmation"))
account_email = False
if (
"account_email" in self.field_params
and self.field_params["account_email"] is not None
):
self.fields["account_email"] = forms.EmailField(
initial=self.field_params["account_email"]
)
account_email = True
elif "account_email" in self.field_params:
self.fields["account_email"] = forms.EmailField()
account_email = True
if account_email:
# fmt: off
# Translators: This labels an email field where users will be asked to enter their emails as part of the application renewal confirmation.
self.fields["account_email"].label = _("The email for your account on the partner's website")
# fmt: on
fieldset.append("account_email")
if "requested_access_duration" in self.field_params:
self.fields["requested_access_duration"] = forms.ChoiceField(
choices=Application.REQUESTED_ACCESS_DURATION_CHOICES
)
# fmt: off
# Translators: This labels a choice field where users will have to select the number of months they wish to have their access for as part of the application renewal confirmation.
self.fields["requested_access_duration"].label = _("The number of months you wish to have this access for before renewal is required")
# fmt: on
fieldset.append("requested_access_duration")
self.fields["return_url"] = forms.CharField(
widget=forms.HiddenInput, max_length=70
)
self.fields["return_url"].initial = self.field_params["return_url"]
fieldset.append("return_url")
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.form_class = "form-horizontal"
self.helper.label_class = "col-lg-3"
self.helper.field_class = "col-lg-4"
self.helper.layout = Layout()
self.helper.layout.append(fieldset)
| 41.921114
| 249
| 0.60704
|
f9ab9e9c953697269cb2a183fc82d6b8bb98e225
| 623
|
py
|
Python
|
World 1/First attempts/ex006 - Double, Triple and Square root.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 2
|
2021-04-23T19:18:06.000Z
|
2021-05-15T17:45:21.000Z
|
World 1/First attempts/ex006 - Double, Triple and Square root.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:29:23.000Z
|
2021-05-14T00:29:23.000Z
|
World 1/First attempts/ex006 - Double, Triple and Square root.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:19:33.000Z
|
2021-05-14T00:19:33.000Z
|
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
n1 = int(input("Enter a number: "))
print("Your number is {}{}{}"
"\nThe double of your number is {}{}{}"
"\nThe triple is {}{}{}"
"\nThe square root is {}{:.2f}{}"
.format(colors["purple"], n1, colors["clean"],
colors["purple"], n1*2, colors["clean"],
colors["purple"], n1*3, colors["clean"],
colors["purple"], pow(n1, 1/2), colors["clean"]))
| 36.647059
| 63
| 0.457464
|
fe2d3776bd09ce40884af8392ff7b8515e4bdef3
| 1,554
|
py
|
Python
|
wagtail/wagtailredirects/middleware.py
|
yohanlebret/wagtail
|
03c623b467ef8ed3849872273ebad13d48f755ac
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailredirects/middleware.py
|
yohanlebret/wagtail
|
03c623b467ef8ed3849872273ebad13d48f755ac
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailredirects/middleware.py
|
yohanlebret/wagtail
|
03c623b467ef8ed3849872273ebad13d48f755ac
|
[
"BSD-3-Clause"
] | 1
|
2019-03-05T15:37:22.000Z
|
2019-03-05T15:37:22.000Z
|
from django import http
from django.utils.six.moves.urllib.parse import urlparse
from wagtail.wagtailredirects import models
# Originally pinched from: https://github.com/django/django/blob/master/django/contrib/redirects/middleware.py
class RedirectMiddleware(object):
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
# If a middleware before `SiteMiddleware` returned a response the
# `site` attribute was never set, ref #2120
if not hasattr(request, 'site'):
return response
# Get the path
path = models.Redirect.normalise_path(request.get_full_path())
# Get the path without the query string or params
path_without_query = urlparse(path).path
# Find redirect
try:
redirect = models.Redirect.get_for_site(request.site).get(old_path=path)
except models.Redirect.DoesNotExist:
if path == path_without_query:
# don't try again if we know we will get the same response
return response
try:
redirect = models.Redirect.get_for_site(request.site).get(old_path=path_without_query)
except models.Redirect.DoesNotExist:
return response
if redirect.is_permanent:
return http.HttpResponsePermanentRedirect(redirect.link)
else:
return http.HttpResponseRedirect(redirect.link)
| 37
| 110
| 0.664736
|
efac9518ec49ff143ed42e8acbaddd5d185bcc1f
| 1,358
|
py
|
Python
|
es_search.py
|
ops-over-elasticsearch/es-python-tools
|
6d542b70755d9ae247aa59ec1bbc4895db4c92c2
|
[
"Apache-2.0"
] | null | null | null |
es_search.py
|
ops-over-elasticsearch/es-python-tools
|
6d542b70755d9ae247aa59ec1bbc4895db4c92c2
|
[
"Apache-2.0"
] | null | null | null |
es_search.py
|
ops-over-elasticsearch/es-python-tools
|
6d542b70755d9ae247aa59ec1bbc4895db4c92c2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# coding:utf-8
"""
es request wrapper tool for python
"""
from es_wrapper_client import ElasticSearchWrapperClient
import json
if __name__ == '__main__':
es_client = ElasticSearchWrapperClient("http://xxx.com:9222")
strsss = ',\"from\":0, \"size\":10000'
search_dsl = '{\"filter\":{\"bool\":{\"must\":[{\"terms\":{\"businessChanceId\":[%s]}},{\"term\":{\"nature\":4}},{\"term\":{\"channel\":\"2dj\"}},{\"terms\":{\"taskStatus\":[0,1]}}]}},\"size\":300,"_source":[\"businessChanceId\"]}'
id_file = open('/home/zshell/Desktop/bid.txt', 'r')
id_list = id_file.readlines()
tmp_id_list = []
result_files = open('/home/zshell/Desktop/result', 'w')
condition = ''
count = 0
try:
for bid in id_list:
if bid.endswith('\r\n'):
bid = bid.strip('\r\n')
if bid.endswith('\n'):
bid = bid.strip('\n')
bid = '\"' + bid + '\"'
tmp_id_list.append(bid)
if tmp_id_list.__len__() < 300:
continue
query = ','.join(tmp_id_list)
dsl = search_dsl % query
result = es_client.search('task_info_idx', 'task_info', dsl)
for item in result:
result_files.write(str(item) + '\n')
tmp_id_list = []
finally:
result_files.close()
| 36.702703
| 235
| 0.538292
|
398e1cef54c43d9188fa225c255745707737d56c
| 2,693
|
py
|
Python
|
gr_client.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | null | null | null |
gr_client.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | null | null | null |
gr_client.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | 1
|
2019-06-20T02:37:34.000Z
|
2019-06-20T02:37:34.000Z
|
"""
This is a client for gr_server.py
It attaches to the tcp sockets of both the gnu radio server and the control
server of gr_server
"""
import time
import numpy as np
import src.tcp_sync as ts
import src.dab_tuning_lib as dt
import pandas as pd
import src.SendDictTcp as sdt
use_fft=True
if use_fft: t1 = ts.UhdSyncMsg(port=47009, packet_size=4*16384, packet_type="f"*16384)
else: t1 = ts.UhdSyncMsg(port=47009, packet_size=4*1, packet_type="f")
sender = sdt.SendDictTcp('127.0.0.1', 1112)
sender.send({"txgain":83})
sender.send({"rxgain":15})
sender.send({"a1":0.8})
sender.send({"a2":0.0})
sender.send({"a3":0.0})
sender.send({"a4":0.0})
sender.send({"a5":0.0})
sender.send({"a6":0.0})
sender.send({"a7":0.0})
sender.send({"a8":0.0})
t1.has_msg()
np.mean(t1.get_msgs(10))
def measure_sholders(verbose = False, raw_data=False, std_max=0.025):
"""
Measure the soulders of the received fft spectrum. Repeat measurement if
standard deviation larger than std_max
"""
for i in range(20):
try:
if verbose: print("%d measurement" % i)
t1.has_msg()
msgs = t1.get_msgs_fft(200)
def sig_mean(s): return dt.calc_mean_in_freq_range(np.array(s), 8192000, -700000, 700000)
def sholder_mean(s): return dt.calc_mean_in_freq_range(np.array(s), 8192000, 900000, 1500000)
sig = [sig_mean(msg) for msg in msgs]
sholders = [sholder_mean(msg) for msg in msgs]
std = np.std(sholders)
mean = np.mean(sholders)
std_perc = std/np.abs(mean)
if verbose == 2:
print( {"mean":mean, "std":std, "std_perc":std_perc})
if std_perc > std_max:
if verbose: print("%.4f std/mean" % (std_perc))
continue
else:
if raw_data: return sholders, sig
else: return np.mean(sholders), np.mean(sig)
except Exception as e:
print (e)
raise Exception("Variance of measurement to high")
res = []
for i in range(5):
for txgain in range(80, 89):
sender.send({"txgain":txgain})
sender.send({"input_path":"/home/andreas/dab/out_cut.iq"})
sh, sig = measure_sholders(verbose=0, std_max=100)
res.append({"txgain":txgain, "shoulder":sh, "sig":sig, "dpd":False})
sender.send({"input_path":"/home/andreas/dab/out_dpd_cut.iq"})
sh, sig = measure_sholders(verbose=0, std_max=100)
res.append({"txgain":txgain, "shoulder":sh, "sig":sig, "dpd":True})
df = pd.DataFrame(res)
df.to_csv("~/dab/doc/dab_mod_sholder.csv")
sender.send({"txgain":20})
sender.send({"rxgain":15})
sender.send({"a1":0.1})
| 31.313953
| 105
| 0.623097
|
69070022bf34959e64cc6e868439c27739889c68
| 353
|
py
|
Python
|
tests/src/test_models.py
|
DinithHerath/drf-registration
|
7cd0e48d125061c126765f7946401aa5363cef7f
|
[
"MIT"
] | 35
|
2020-09-23T02:22:48.000Z
|
2022-03-25T10:09:48.000Z
|
tests/src/test_models.py
|
DinithHerath/drf-registration
|
7cd0e48d125061c126765f7946401aa5363cef7f
|
[
"MIT"
] | 8
|
2020-11-17T06:56:04.000Z
|
2022-03-29T23:40:23.000Z
|
tests/src/test_models.py
|
DinithHerath/drf-registration
|
7cd0e48d125061c126765f7946401aa5363cef7f
|
[
"MIT"
] | 8
|
2020-10-05T14:56:25.000Z
|
2022-03-28T14:13:26.000Z
|
from drf_registration.utils.users import get_user_model
from tests.utils import BaseModelTestCase
class UserModelTestCases(BaseModelTestCase):
def setUp(self):
self.user_model = get_user_model()
def test_get_user_model(self):
user_model = get_user_model()
self.assertHasModelFields(user_model, ['username', 'email'])
| 27.153846
| 68
| 0.747875
|
a396a36e636d0795626422d97ac170a49abff925
| 7,517
|
py
|
Python
|
exchanges/bitfinex.py
|
assassinen/grid_bot_client
|
e42dfe0fb9ce3716832cf842de4e63848ec8d06f
|
[
"Apache-2.0"
] | 1
|
2021-12-03T12:32:00.000Z
|
2021-12-03T12:32:00.000Z
|
exchanges/bitfinex.py
|
assassinen/grid_bot_client
|
e42dfe0fb9ce3716832cf842de4e63848ec8d06f
|
[
"Apache-2.0"
] | 4
|
2021-09-07T05:55:29.000Z
|
2022-01-04T22:54:34.000Z
|
exchanges/bitfinex.py
|
assassinen/grid_bot_client
|
e42dfe0fb9ce3716832cf842de4e63848ec8d06f
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import hmac
import time
import json
import requests
from models.log import setup_custom_logger
class BitfinexExchangeInterface:
def __init__(self, key, secret, base_url, api_url, instrument):
self.key = key
self.secret = secret
self.base_url = base_url
self.api_url = api_url
self.url = base_url + api_url
self.access_token = None
self.refresh_token = None
self.expires_in = 0
self.instrument = instrument
self.logger = setup_custom_logger(f'bitfinex_exchange.{self.key}')
def generate_auth_headers(self, path, body):
"""
Generate headers for a signed payload
"""
nonce = str(int(round(time.time() * 1000000)))
signature = "/api/v2/{}{}{}".format(path, nonce, body)
h = hmac.new(self.secret.encode('utf8'), signature.encode('utf8'), hashlib.sha384)
signature = h.hexdigest()
return {
"bfx-nonce": nonce,
"bfx-apikey": self.key,
"bfx-signature": signature
}
def request(self, metod, endpoint, data={}, params=""):
"""
Send a pre-signed POST request to the bitfinex api
@return response
"""
url = '{}/{}'.format(self.url, endpoint)
headers = {"content-type": "application/json"}
try:
if metod == 'GET':
response = requests.get(url=url, headers=headers, json=data)
if metod == 'POST':
sData = json.dumps(data)
headers.update(self.generate_auth_headers(endpoint, sData))
response = requests.post(url=url, headers=headers, json=data)
except Exception as r:
self.logger.info(r)
if response.status_code != 200:
raise Exception(f"Wrong response code: {response.status_code}",
f"{response.request.url}",
f"{response.request.body}",
f"{response.text}")
if endpoint == f'auth/r/orders/{self.instrument}/hist':
self.logger.debug(response.request.url)
self.logger.debug(response.request.body)
self.logger.debug(response.json())
return response.json()
def _post(self, endpoint, data={}, params=""):
return self.request('POST', endpoint, data=data, params=params)
def _get(self, endpoint, data={}, params=""):
return self.request('GET', endpoint, data=data, params=params)
def get_positions(self):
endpoint = 'auth/r/wallets'
wallet = [wallet for wallet in self._post(endpoint) if wallet[0] == 'exchange' and wallet[1] == 'BTC']
wallet = wallet[0] if len(wallet) > 0 else wallet
size = round(wallet[2], 10) if len(wallet) > 0 else 0
return {'average_price': self.get_last_trade_price(), 'size': size}
def get_last_trade_price(self):
endpoint = f'trades/{self.instrument}/hist'
params = {'limit': 1}
return self._get(endpoint, params)[0][3]
def get_last_order_price(self, side):
endpoint = f'auth/r/trades/{self.instrument}/hist'
if side == 'buy':
last_order_price = [i[5] for i in self._post(endpoint) if i[4] > 0]
else:
last_order_price = [i[5] for i in self._post(endpoint) if i[4] > 0]
return last_order_price[0] if len(last_order_price) > 0 else self.get_last_trade_price()
def get_open_orders(self):
method = f'auth/r/orders/{self.instrument}'
open_orders = self._post(method)
return [self.get_order_params_from_responce(order) for order in open_orders]
def get_order_state(self, order_id):
method = f'auth/r/orders/{self.instrument}/hist'
params = {'id': [order_id]}
order = self._post(method, params)
return self.get_order_params_from_responce(order[0]) if len(order) > 0 \
else {'order_id': order_id, 'order_state': 'cancelled'}
def get_orders_state(self, order_state_ids):
retry = 3
open_orders = self.get_open_orders()
open_orders_ids = [open_order.get('order_id') for open_order in open_orders]
method = f'auth/r/orders/{self.instrument}/hist'
params = {'id': order_state_ids + open_orders_ids}
existing_orders = [self.get_order_params_from_responce(order)
for order in self._post(method, params)] if len(params.get('id')) > 0 else []
existing_orders_ids = [order.get('order_id') for order in existing_orders]
not_found_orders = [{'price': None,
'size': None,
'side': None,
'order_id': order_id,
'status': 'cancelled',
'timestamp': None} for order_id in order_state_ids
if order_id not in existing_orders_ids + open_orders_ids]
while len(not_found_orders) > 0 and retry > 0:
self.logger.info(f"not_found_orders: {[order.get('order_id') for order in not_found_orders]}")
time.sleep(1)
existing_orders = [self.get_order_params_from_responce(order)
for order in self._post(method, params)] if len(params.get('id')) > 0 else []
existing_orders_ids = [order.get('order_id') for order in existing_orders]
not_found_orders = [{'price': None,
'size': None,
'side': None,
'order_id': order_id,
'status': 'cancelled',
'timestamp': None} for order_id in order_state_ids
if order_id not in existing_orders_ids + open_orders_ids]
retry -= 1
return open_orders + existing_orders + not_found_orders
def replace_order_status(self, raw_status):
status_mapp = {'ACTIVE': 'open', 'CANCELED': 'cancelled'}
if 'EXECUTED' in raw_status:
status = 'filled'
else:
status = status_mapp.get(raw_status, None)
if status is None:
self.logger.info(f'invalid status: {raw_status}')
return status
def get_order_params_from_responce(self, responce):
side = 'buy' if responce[7] > 0 else 'sell'
ratio = 1 if responce[7] > 0 else -1
return {'price': responce[16],
'size': responce[7] * ratio,
'side': side,
'order_id': str(responce[0]),
'status': self.replace_order_status(responce[13]),
'timestamp': responce[5]}
def create_order(self, order):
ratio = 1 if order['side'] == 'buy' else -1
method = f'auth/w/order/submit'
params = {'type': 'EXCHANGE LIMIT',
'symbol': self.instrument,
'price': str(order['price']),
'amount': str(order['size'] * ratio)
}
result = self._post(method, params)
if 'SUCCESS' in result:
return [self.get_order_params_from_responce(orders) for orders in result[4]][0]
def cancel_order(self, order_id):
method = f'auth/w/order/cancel'
params = {'id': int(order_id)}
result = self._post(method, params)
if 'SUCCESS' in result:
order_id = result[4][0]
return self.get_order_state(order_id)
| 43.450867
| 110
| 0.570972
|
f276a7f1cd80ff53a2de922502c0a1392c9a10dc
| 3,754
|
py
|
Python
|
NAIP/ndwi.py
|
dmendelo/earthengine-py-notebooks
|
515567fa2702b436daf449fff02f5c690003cf94
|
[
"MIT"
] | 2
|
2020-02-05T02:36:18.000Z
|
2021-03-23T11:02:39.000Z
|
NAIP/ndwi.py
|
dmendelo/earthengine-py-notebooks
|
515567fa2702b436daf449fff02f5c690003cf94
|
[
"MIT"
] | null | null | null |
NAIP/ndwi.py
|
dmendelo/earthengine-py-notebooks
|
515567fa2702b436daf449fff02f5c690003cf94
|
[
"MIT"
] | 3
|
2021-01-06T17:33:08.000Z
|
2022-02-18T02:14:18.000Z
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/ndwi.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=NAIP/ndwi.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
# print("lng = {}, lat = {}".format(lng, lat))
# lng_lat = ee.Geometry.Point(lng, lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic().clip(polys)
# print(naip_2015.size().getInfo()) # count = 120
vis = {'bands': ['N', 'R', 'G']}
Map.setCenter(lng, lat, 10)
# Map.addLayer(naip_2015,vis)
Map.addLayer(ppr,vis)
# Map.addLayer(fromFT)
ndwi = ppr.normalizedDifference(['G', 'N'])
ndwiViz = {'min': 0, 'max': 1, 'palette': ['00FFFF', '0000FF']}
ndwiMasked = ndwi.updateMask(ndwi.gte(0.05))
ndwi_bin = ndwiMasked.gt(0)
Map.addLayer(ndwiMasked, ndwiViz)
patch_size = ndwi_bin.connectedPixelCount(256, True)
# Map.addLayer(patch_size)
patch_id = ndwi_bin.connectedComponents(ee.Kernel.plus(1), 256)
Map.addLayer(patch_id)
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 36.446602
| 422
| 0.723229
|
e83aa4518d2303822d9d4eaca231341d01a782f6
| 7,500
|
py
|
Python
|
swagger_server/models/start.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/models/start.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/models/start.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Start(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, repetition: int=None, control_frequency: int=None, horizon_in_steps: int=None, model_name: str=None, solver: str=None, d_t_in_seconds: int=None, optimization_type: str=None, single_ev: bool=False): # noqa: E501
"""Start - a model defined in Swagger
:param repetition: The repetition of this Start. # noqa: E501
:type repetition: int
:param control_frequency: The control_frequency of this Start. # noqa: E501
:type control_frequency: int
:param horizon_in_steps: The horizon_in_steps of this Start. # noqa: E501
:type horizon_in_steps: int
:param model_name: The model_name of this Start. # noqa: E501
:type model_name: str
:param solver: The solver of this Start. # noqa: E501
:type solver: str
:param d_t_in_seconds: The d_t_in_seconds of this Start. # noqa: E501
:type d_t_in_seconds: int
:param optimization_type: The optimization_type of this Start. # noqa: E501
:type optimization_type: str
:param single_ev: The single_ev of this Start. # noqa: E501
:type single_ev: bool
"""
self.swagger_types = {
'repetition': int,
'control_frequency': int,
'horizon_in_steps': int,
'model_name': str,
'solver': str,
'd_t_in_seconds': int,
'optimization_type': str,
'single_ev': bool
}
self.attribute_map = {
'repetition': 'repetition',
'control_frequency': 'control_frequency',
'horizon_in_steps': 'horizon_in_steps',
'model_name': 'model_name',
'solver': 'solver',
'd_t_in_seconds': 'dT_in_seconds',
'optimization_type': 'optimization_type',
'single_ev': 'single_ev'
}
self._repetition = repetition
self._control_frequency = control_frequency
self._horizon_in_steps = horizon_in_steps
self._model_name = model_name
self._solver = solver
self._d_t_in_seconds = d_t_in_seconds
self._optimization_type = optimization_type
self._single_ev = single_ev
@classmethod
def from_dict(cls, dikt) -> 'Start':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Start of this Start. # noqa: E501
:rtype: Start
"""
return util.deserialize_model(dikt, cls)
@property
def repetition(self) -> int:
"""Gets the repetition of this Start.
:return: The repetition of this Start.
:rtype: int
"""
return self._repetition
@repetition.setter
def repetition(self, repetition: int):
"""Sets the repetition of this Start.
:param repetition: The repetition of this Start.
:type repetition: int
"""
if repetition is None:
raise ValueError("Invalid value for `repetition`, must not be `None`") # noqa: E501
self._repetition = repetition
@property
def control_frequency(self) -> int:
"""Gets the control_frequency of this Start.
:return: The control_frequency of this Start.
:rtype: int
"""
return self._control_frequency
@control_frequency.setter
def control_frequency(self, control_frequency: int):
"""Sets the control_frequency of this Start.
:param control_frequency: The control_frequency of this Start.
:type control_frequency: int
"""
if control_frequency is None:
raise ValueError("Invalid value for `control_frequency`, must not be `None`") # noqa: E501
self._control_frequency = control_frequency
@property
def horizon_in_steps(self) -> int:
"""Gets the horizon_in_steps of this Start.
:return: The horizon_in_steps of this Start.
:rtype: int
"""
return self._horizon_in_steps
@horizon_in_steps.setter
def horizon_in_steps(self, horizon_in_steps: int):
"""Sets the horizon_in_steps of this Start.
:param horizon_in_steps: The horizon_in_steps of this Start.
:type horizon_in_steps: int
"""
if horizon_in_steps is None:
raise ValueError("Invalid value for `horizon_in_steps`, must not be `None`") # noqa: E501
self._horizon_in_steps = horizon_in_steps
@property
def model_name(self) -> str:
"""Gets the model_name of this Start.
:return: The model_name of this Start.
:rtype: str
"""
return self._model_name
@model_name.setter
def model_name(self, model_name: str):
"""Sets the model_name of this Start.
:param model_name: The model_name of this Start.
:type model_name: str
"""
self._model_name = model_name
@property
def solver(self) -> str:
"""Gets the solver of this Start.
:return: The solver of this Start.
:rtype: str
"""
return self._solver
@solver.setter
def solver(self, solver: str):
"""Sets the solver of this Start.
:param solver: The solver of this Start.
:type solver: str
"""
self._solver = solver
@property
def d_t_in_seconds(self) -> int:
"""Gets the d_t_in_seconds of this Start.
:return: The d_t_in_seconds of this Start.
:rtype: int
"""
return self._d_t_in_seconds
@d_t_in_seconds.setter
def d_t_in_seconds(self, d_t_in_seconds: int):
"""Sets the d_t_in_seconds of this Start.
:param d_t_in_seconds: The d_t_in_seconds of this Start.
:type d_t_in_seconds: int
"""
if d_t_in_seconds is None:
raise ValueError("Invalid value for `d_t_in_seconds`, must not be `None`") # noqa: E501
self._d_t_in_seconds = d_t_in_seconds
@property
def optimization_type(self) -> str:
"""Gets the optimization_type of this Start.
:return: The optimization_type of this Start.
:rtype: str
"""
return self._optimization_type
@optimization_type.setter
def optimization_type(self, optimization_type: str):
"""Sets the optimization_type of this Start.
:param optimization_type: The optimization_type of this Start.
:type optimization_type: str
"""
if optimization_type is None:
raise ValueError("Invalid value for `optimization_type`, must not be `None`") # noqa: E501
self._optimization_type = optimization_type
@property
def single_ev(self) -> bool:
"""Gets the single_ev of this Start.
:return: The single_ev of this Start.
:rtype: bool
"""
return self._single_ev
@single_ev.setter
def single_ev(self, single_ev: bool):
"""Sets the single_ev of this Start.
:param single_ev: The single_ev of this Start.
:type single_ev: bool
"""
self._single_ev = single_ev
| 29.182879
| 234
| 0.622533
|
56401f393f7944f54169287c9feb2c54550cc76a
| 4,192
|
py
|
Python
|
Snake_test1.py
|
nh8157/LANIF-SCI
|
06172a1f3e5b7d665088b2771d998a1f7fbd8c33
|
[
"CC-BY-3.0"
] | null | null | null |
Snake_test1.py
|
nh8157/LANIF-SCI
|
06172a1f3e5b7d665088b2771d998a1f7fbd8c33
|
[
"CC-BY-3.0"
] | null | null | null |
Snake_test1.py
|
nh8157/LANIF-SCI
|
06172a1f3e5b7d665088b2771d998a1f7fbd8c33
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 22:26:51 2019
@author: wangding
"""
import pygame
from network import Network
class Player():
width = height = 50
def __init__(self, startx, starty, body, speed, direction, p_update, score):
self.x = startx
self.y = starty
self.velocity = 2
self.color = (255,0,0)
self.body = body
self.speed = speed
self.direction = direction
self.p_update = p_update
self.score = score
self.canvas = Canvas(self.width, self.height, "Testing...")
def body_draw(self, color):
for ps in self.body:
pygame.draw.rect(self.canvas.screen, self.color, pygame.Rect(self.x, self.y, self.speed * 2, self.speed * 2))
#Set the bounds
def bounds_set(self):
if self.x < 0 or self.x >= width:
gameover()
if self.y < 0 or self.y >= height:
gameover()
#Hit self
def self_hit(self):
for rect in self.body[1:]:
if [self.x, self.y] == rect:
gameover()
def move(self, dirn):
"""
:param dirn: 0 - 3 (right, left, up, down)
:return: None
"""
if dirn == 0:
self.x += self.velocity
elif dirn == 1:
self.x -= self.velocity
elif dirn == 2:
self.y -= self.velocity
else:
self.y += self.velocity
class Game:
def __init__(self, w, h):
self.net = Network()
self.width = w
self.height = h
self.player = Player(400, 200, [[400, 200], [500, 200],[600, 200]], 5, 'LEFT', '', 0)
self.player2 = Player(400, 300, [[400, 300], [500, 300],[600, 300]], 5, 'RIGHT', '', 0)
self.canvas = Canvas(self.width, self.height, "Testing...")
def run(self):
clock = pygame.time.Clock()
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.K_ESCAPE:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
if self.player.x <= self.width - self.player.velocity:
self.player.move(0)
if keys[pygame.K_LEFT]:
if self.player.x >= self.player.velocity:
self.player.move(1)
if keys[pygame.K_UP]:
if self.player.y >= self.player.velocity:
self.player.move(2)
if keys[pygame.K_DOWN]:
if self.player.y <= self.height - self.player.velocity:
self.player.move(3)
# Send Network Stuff
self.player2.x, self.player2.y = self.parse_data(self.send_data())
# Update Canvas
#self.canvas.draw_background()
self.player.body_draw((255, 0, 0))
self.player2.body_draw((255, 0, 0))
self.canvas.update()
pygame.quit()
def send_data(self):
"""
Send position to server
:return: None
"""
data = str(self.net.id) + ":" + str(self.player.x) + "," + str(self.player.y)
reply = self.net.send(data)
return reply
@staticmethod
def parse_data(data):
try:
d = data.split(":")[1].split(",")
return int(d[0]), int(d[1])
except:
return 0,0
class Canvas:
def __init__(self, w, h, name="None"):
self.width = w
self.height = h
self.screen = pygame.display.set_mode((w,h))
pygame.display.set_caption(name)
@staticmethod
def update():
pygame.display.update()
def draw_text(self, text, size, x, y):
pygame.font.init()
font = pygame.font.SysFont("comicsans", size)
render = font.render(text, 1, (0,0,0))
self.screen.draw(render, (x,y))
def get_canvas(self):
return self.screen
def draw_background(self):
self.screen.fill((255,255,255))
g = Game(720, 450)
g.run()
| 26.36478
| 121
| 0.514552
|
4d4d2cc69b0eec34b626a84ee237c1c4c4c540a2
| 2,986
|
py
|
Python
|
paddlespeech/cli/base_commands.py
|
Honei/PaddleSpeech
|
83b941fc439696b1dd8fc0d044a5b29309574c3b
|
[
"Apache-2.0"
] | 1
|
2021-12-23T01:04:00.000Z
|
2021-12-23T01:04:00.000Z
|
paddlespeech/cli/base_commands.py
|
catcat0921/PaddleSpeech
|
775c4befbd4253eab9440c996f267683e7a2a4f0
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/cli/base_commands.py
|
catcat0921/PaddleSpeech
|
775c4befbd4253eab9440c996f267683e7a2a4f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from .entry import commands
from .utils import cli_register
from .utils import explicit_command_register
from .utils import get_command
__all__ = [
'BaseCommand',
'HelpCommand',
]
@cli_register(name='paddlespeech')
class BaseCommand:
def execute(self, argv: List[str]) -> bool:
help = get_command('paddlespeech.help')
return help().execute(argv)
@cli_register(name='paddlespeech.help', description='Show help for commands.')
class HelpCommand:
def execute(self, argv: List[str]) -> bool:
msg = 'Usage:\n'
msg += ' paddlespeech <command> <options>\n\n'
msg += 'Commands:\n'
for command, detail in commands['paddlespeech'].items():
if command.startswith('_'):
continue
if '_description' not in detail:
continue
msg += ' {:<15} {}\n'.format(command,
detail['_description'])
print(msg)
return True
@cli_register(
name='paddlespeech.version',
description='Show version and commit id of current package.')
class VersionCommand:
def execute(self, argv: List[str]) -> bool:
try:
from .. import __version__
version = __version__
except ImportError:
version = 'Not an official release'
try:
from .. import __commit__
commit_id = __commit__
except ImportError:
commit_id = 'Not found'
msg = 'Package Version:\n'
msg += ' {}\n\n'.format(version)
msg += 'Commit ID:\n'
msg += ' {}\n\n'.format(commit_id)
print(msg)
return True
# Dynamic import when running specific command
_commands = {
'asr': ['Speech to text infer command.', 'ASRExecutor'],
'cls': ['Audio classification infer command.', 'CLSExecutor'],
'st': ['Speech translation infer command.', 'STExecutor'],
'text': ['Text command.', 'TextExecutor'],
'tts': ['Text to Speech infer command.', 'TTSExecutor'],
'vector': ['Speech to vector embedding infer command.', 'VectorExecutor'],
}
for com, info in _commands.items():
explicit_command_register(
name='paddlespeech.{}'.format(com),
description=info[0],
cls='paddlespeech.cli.{}.{}'.format(com, info[1]))
| 31.765957
| 78
| 0.626256
|
8f2eeeb6e47633c4269214f5544171c1f6425eab
| 1,762
|
py
|
Python
|
chapter6/webxample-package/setup.py
|
lixin940207/expert_python_programming
|
26c60e29a21651d9b1f91abc0602b7cd7412869b
|
[
"BSD-3-Clause"
] | 189
|
2016-05-23T22:33:11.000Z
|
2022-03-31T06:39:35.000Z
|
chapter6/webxample-package/setup.py
|
nthssss/Expert-Python-Programming_Second-Edition
|
2ccdbd302dea96aecc3aef04aaf08b0cb937f30a
|
[
"BSD-3-Clause"
] | 2
|
2018-02-26T04:26:43.000Z
|
2020-12-01T07:14:53.000Z
|
chapter6/webxample-package/setup.py
|
nthssss/Expert-Python-Programming_Second-Edition
|
2ccdbd302dea96aecc3aef04aaf08b0cb937f30a
|
[
"BSD-3-Clause"
] | 93
|
2016-12-19T17:37:27.000Z
|
2021-12-09T07:35:48.000Z
|
import os
from setuptools import setup
from setuptools import find_packages
from distutils.cmd import Command
from distutils.command.build import build as _build
try:
from django.core.management.commands.compilemessages \
import Command as CompileCommand
except ImportError:
# note: during installation django may not be available
CompileCommand = None
# this environment is requires
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "webxample.conf.settings"
)
class build_messages(Command):
""" Custom command for building gettext messages in Django
"""
description = """compile gettext messages"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if CompileCommand:
CompileCommand().handle(verbosity=2, locales=[], exclude=[])
else:
raise RuntimeError("could not build translations")
class build(_build):
""" Overriden build command that adds additional build steps
"""
sub_commands = [
('build_messages', None),
('build_sass', None),
] + _build.sub_commands
setup(
name='webxample',
setup_requires=[
'libsass >= 0.6.0',
'django >= 1.9.2',
],
install_requires=[
'django >= 1.9.2',
'gunicorn == 19.4.5',
'djangorestframework == 3.3.2',
'django-allauth == 0.24.1',
],
packages=find_packages('.'),
sass_manifests={
'webxample.myapp': ('static/sass', 'static/css')
},
cmdclass={
'build_messages': build_messages,
'build': build,
},
entry_points={
'console_scripts': {
'webxample = webxample.manage:main',
}
}
)
| 23.184211
| 72
| 0.623156
|
9adf4bac37c3bdfa613e0ef8d875a5ed1bc8c5c5
| 34,801
|
py
|
Python
|
pycognito/__init__.py
|
nook-io/pycognito
|
195be9d23798ea26dbf4318eebc8c1d0fc7f6b28
|
[
"Apache-2.0"
] | null | null | null |
pycognito/__init__.py
|
nook-io/pycognito
|
195be9d23798ea26dbf4318eebc8c1d0fc7f6b28
|
[
"Apache-2.0"
] | null | null | null |
pycognito/__init__.py
|
nook-io/pycognito
|
195be9d23798ea26dbf4318eebc8c1d0fc7f6b28
|
[
"Apache-2.0"
] | null | null | null |
import ast
import datetime
import re
import boto3
from envs import env
from jose import JWTError, jwt
import requests
from .aws_srp import AWSSRP
from .exceptions import TokenVerificationException
def cognito_to_dict(attr_list, attr_map=None):
if attr_map is None:
attr_map = {}
attr_dict = dict()
for attr in attr_list:
name = attr.get("Name")
value = attr.get("Value")
if value in ["true", "false"]:
value = ast.literal_eval(value.capitalize())
name = attr_map.get(name, name)
attr_dict[name] = value
return attr_dict
def dict_to_cognito(attributes, attr_map=None):
"""
:param attributes: Dictionary of User Pool attribute names/values
:return: list of User Pool attribute formatted dicts: {'Name': <attr_name>, 'Value': <attr_value>}
"""
if attr_map is None:
attr_map = {}
for key, value in attr_map.items():
if value in attributes.keys():
attributes[key] = attributes.pop(value)
return [{"Name": key, "Value": value} for key, value in attributes.items()]
def camel_to_snake(camel_str):
"""
:param camel_str: string
:return: string converted from a CamelCase to a snake_case
"""
return re.sub(
"([a-z0-9])([A-Z])", r"\1_\2", re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_str)
).lower()
def snake_to_camel(snake_str):
"""
:param snake_str: string
:return: string converted from a snake_case to a CamelCase
"""
components = snake_str.split("_")
return "".join(x.title() for x in components)
class UserObj:
def __init__(
self, username, attribute_list, cognito_obj, metadata=None, attr_map=None
):
"""
:param username:
:param attribute_list:
:param metadata: Dictionary of User metadata
"""
self.username = username
self._cognito = cognito_obj
self._attr_map = {} if attr_map is None else attr_map
self._data = cognito_to_dict(attribute_list, self._attr_map)
self.sub = self._data.pop("sub", None)
self.email_verified = self._data.pop("email_verified", None)
self.phone_number_verified = self._data.pop("phone_number_verified", None)
self._metadata = {} if metadata is None else metadata
def __repr__(self):
return "<{class_name}: {uni}>".format(
class_name=self.__class__.__name__, uni=self.__unicode__()
)
def __unicode__(self):
return self.username
def __getattr__(self, name):
if name in list(self.__dict__.get("_data", {}).keys()):
return self._data.get(name)
if name in list(self.__dict__.get("_metadata", {}).keys()):
return self._metadata.get(name)
raise AttributeError(name)
def __setattr__(self, name, value):
if name in list(self.__dict__.get("_data", {}).keys()):
self._data[name] = value
else:
super().__setattr__(name, value)
def save(self, admin=False):
if admin:
self._cognito.admin_update_profile(self._data, self._attr_map)
return
self._cognito.update_profile(self._data, self._attr_map)
def delete(self, admin=False):
if admin:
self._cognito.admin_delete_user()
return
self._cognito.delete_user()
class GroupObj:
def __init__(self, group_data, cognito_obj):
"""
:param group_data: a dictionary with information about a group
:param cognito_obj: an instance of the Cognito class
"""
self._data = group_data
self._cognito = cognito_obj
self.group_name = self._data.pop("GroupName", None)
self.description = self._data.pop("Description", None)
self.creation_date = self._data.pop("CreationDate", None)
self.last_modified_date = self._data.pop("LastModifiedDate", None)
self.role_arn = self._data.pop("RoleArn", None)
self.precedence = self._data.pop("Precedence", None)
def __unicode__(self):
return self.group_name
def __repr__(self):
return "<{class_name}: {uni}>".format(
class_name=self.__class__.__name__, uni=self.__unicode__()
)
class Cognito:
user_class = UserObj
group_class = GroupObj
def __init__(
self,
user_pool_id,
client_id,
user_pool_region=None,
username=None,
id_token=None,
refresh_token=None,
access_token=None,
client_secret=None,
device=None,
access_key=None,
secret_key=None,
session=None,
botocore_config=None,
client_ip=None,
use_admin=True
):
"""
:param user_pool_id: Cognito User Pool ID
:param client_id: Cognito User Pool Application client ID
:param username: User Pool username
:param id_token: ID Token returned by authentication
:param refresh_token: Refresh Token returned by authentication
:param access_token: Access Token returned by authentication
:param access_key: AWS IAM access key
:param secret_key: AWS IAM secret key
:param session: Boto3 client session
:param botocore_config: Botocore Config object for the client
"""
self.user_pool_id = user_pool_id
self.client_id = client_id
self.user_pool_region = (
user_pool_region if user_pool_region else self.user_pool_id.split("_")[0]
)
self.username = username
self.id_token = id_token
self.access_token = access_token
self.refresh_token = refresh_token
self.client_secret = client_secret
self.device = device
self.token_type = None
self.id_claims = None
self.access_claims = None
self.custom_attributes = None
self.base_attributes = None
self.pool_jwk = None
self.client_ip = client_ip
self.session = session
self.use_admin = use_admin
boto3_client_kwargs = {}
if access_key and secret_key:
boto3_client_kwargs["aws_access_key_id"] = access_key
boto3_client_kwargs["aws_secret_access_key"] = secret_key
if self.user_pool_region:
boto3_client_kwargs["region_name"] = self.user_pool_region
if botocore_config:
boto3_client_kwargs["config"] = botocore_config
if session:
self.client = session.client("cognito-idp", **boto3_client_kwargs)
else:
self.client = boto3.client("cognito-idp", **boto3_client_kwargs)
@property
def user_pool_url(self):
return f"https://cognito-idp.{self.user_pool_region}.amazonaws.com/{self.user_pool_id}"
def get_keys(self):
if self.pool_jwk:
return self.pool_jwk
# Check for the dictionary in environment variables.
pool_jwk_env = env("COGNITO_JWKS", {}, var_type="dict")
if pool_jwk_env:
self.pool_jwk = pool_jwk_env
# If it is not there use the requests library to get it
else:
self.pool_jwk = requests.get(
f"{self.user_pool_url}/.well-known/jwks.json"
).json()
return self.pool_jwk
def get_key(self, kid):
keys = self.get_keys().get("keys")
key = list(filter(lambda x: x.get("kid") == kid, keys))
return key[0]
def verify_tokens(self):
"""
Verify the current id_token and access_token. An exception will be
thrown if they do not pass verification. It can be useful to call this
method after creating a Cognito instance where you've provided
externally-remembered token values.
"""
self.verify_token(self.id_token, "id_token", "id")
self.verify_token(self.access_token, "access_token", "access")
def verify_token(self, token, id_name, token_use):
# https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
kid = jwt.get_unverified_header(token).get("kid")
hmac_key = self.get_key(kid)
try:
verified = jwt.decode(
token,
hmac_key,
algorithms=["RS256"],
audience=self.client_id,
issuer=self.user_pool_url,
options={
"require_aud": token_use != "access",
"require_iss": True,
"require_exp": True,
},
)
except JWTError as err:
raise TokenVerificationException(
f"Your {id_name!r} token could not be verified ({err})."
) from None
token_use_verified = verified.get("token_use") == token_use
if not token_use_verified:
raise TokenVerificationException(
f"Your {id_name!r} token use ({token_use!r}) could not be verified."
)
setattr(self, id_name, token)
setattr(self, f"{token_use}_claims", verified)
return verified
def get_user_obj(
self, username=None, attribute_list=None, metadata=None, attr_map=None
):
"""
Returns the specified
:param username: Username of the user
:param attribute_list: List of tuples that represent the user's
attributes as returned by the admin_get_user or get_user boto3 methods
:param metadata: Metadata about the user
:param attr_map: Dictionary that maps the Cognito attribute names to
what we'd like to display to the users
:return:
"""
return self.user_class(
username=username,
attribute_list=attribute_list,
cognito_obj=self,
metadata=metadata,
attr_map=attr_map,
)
def get_group_obj(self, group_data):
"""
Instantiates the self.group_class
:param group_data: a dictionary with information about a group
:return: an instance of the self.group_class
"""
return self.group_class(group_data=group_data, cognito_obj=self)
def switch_session(self, session):
"""
Primarily used for unit testing so we can take advantage of the
placebo library (https://githhub.com/garnaat/placebo)
:param session: boto3 session
:return:
"""
self.client = session.client("cognito-idp")
def check_token(self, renew=True):
"""
Checks the exp attribute of the access_token and either refreshes
the tokens by calling the renew_access_tokens method or does nothing
:param renew: bool indicating whether to refresh on expiration
:return: bool indicating whether access_token has expired
"""
if not self.access_token:
raise AttributeError("Access Token Required to Check Token")
now = datetime.datetime.now()
dec_access_token = jwt.get_unverified_claims(self.access_token)
if now > datetime.datetime.fromtimestamp(dec_access_token["exp"]):
expired = True
if renew:
self.renew_access_token()
else:
expired = False
return expired
def set_base_attributes(self, **kwargs):
self.base_attributes = kwargs
def add_custom_attributes(self, **kwargs):
custom_key = "custom"
custom_attributes = {}
for old_key, value in kwargs.items():
new_key = custom_key + ":" + old_key
custom_attributes[new_key] = value
self.custom_attributes = custom_attributes
def register(self, password, username=None, attr_map=None, client_metadata=None, user_context_data=None):
"""
Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:param client_metadata: Metadata about the user that will be used for ClientMetadata
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
}
"""
if not username:
username = self.username
if self.base_attributes is None:
attributes = {}
else:
attributes = self.base_attributes.copy()
if self.custom_attributes:
attributes.update(self.custom_attributes)
user_attributes = dict_to_cognito(attributes, attr_map)
aws = AWSSRP(
username=username,
password=password,
user_pool_id=self.user_pool_id,
client_id=self.client_id,
client=self.client,
client_secret=self.client_secret,
)
response = aws.register_user(self.client, user_attributes, client_metadata, user_context_data)
attributes.update(username=username, password=password)
self._set_attributes(response, attributes)
response.pop("ResponseMetadata")
return response
def admin_confirm_sign_up(self, username=None):
"""
Confirms user registration as an admin without using a confirmation
code. Works on any user.
:param username: User's username
:return:
"""
if not username:
username = self.username
self.client.admin_confirm_sign_up(
UserPoolId=self.user_pool_id,
Username=username,
)
def confirm_sign_up(self, confirmation_code, username=None, user_context_data=None):
"""
Using the confirmation code that is either sent via email or text
message.
:param confirmation_code: Confirmation code sent via text or email
:param username: User's username
:return:
"""
if not username:
username = self.username
params = {
"ClientId": self.client_id,
"Username": username,
"ConfirmationCode": confirmation_code,
"UserContextData": user_context_data,
}
self._add_secret_hash(params, "SecretHash")
self.client.confirm_sign_up(**params)
def resend_confirmation_code(self, username=None, user_context_data=None):
"""
Trigger resending the confirmation code message.
:param username: User's username
:return:
"""
if not username:
username = self.username
params = {
"ClientId": self.client_id,
"Username": username,
"UserContextData": user_context_data
}
self._add_secret_hash(params, "SecretHash")
self.client.resend_confirmation_code(**params)
def admin_authenticate(self, password):
"""
Authenticate the user using admin super privileges
:param password: User's password
:return:
"""
auth_params = {"USERNAME": self.username, "PASSWORD": password}
self._add_secret_hash(auth_params, "SECRET_HASH")
tokens = self.client.admin_initiate_auth(
UserPoolId=self.user_pool_id,
ClientId=self.client_id,
# AuthFlow='USER_SRP_AUTH'|'REFRESH_TOKEN_AUTH'|'REFRESH_TOKEN'|'CUSTOM_AUTH'|'ADMIN_NO_SRP_AUTH',
AuthFlow="ADMIN_NO_SRP_AUTH",
AuthParameters=auth_params,
)
self._set_tokens(tokens)
def authenticate(
self,
password,
username=None,
new_password=None,
otp_code=None,
save_device=False,
device_group_key=None,
device_key=None,
user_context_data=None,
attr_map=None,
):
"""
Authenticate the user using the SRP protocol
:param password:
:param new_password:
:param otp_code:
:param save_device:
:param device_group_key:
:param device_key:
:param user_context_data:
:return:
"""
for k in attr_map.keys():
attr_map[f"userAttributes.{k}"] = attr_map.pop(k)
aws = AWSSRP(
username=username,
password=password,
new_password=new_password,
otp_code=otp_code,
user_pool_id=self.user_pool_id,
client_id=self.client_id,
client=self.client,
client_secret=self.client_secret,
save_device=save_device,
device_group_key=device_group_key,
device_key=device_key,
)
if self.use_admin:
tokens = aws.admin_authenticate_user(user_context_data=user_context_data, attr_map=attr_map)
else:
tokens = aws.authenticate_user(user_context_data=user_context_data, attr_map=attr_map)
self._set_tokens(tokens)
def new_password_challenge(self, password, new_password):
"""
Respond to the new password challenge using the SRP protocol
:param password: The user's current passsword
:param password: The user's new passsword
"""
aws = AWSSRP(
username=self.username,
password=password,
pool_id=self.user_pool_id,
client_id=self.client_id,
client=self.client,
client_secret=self.client_secret,
)
tokens = aws.set_new_password_challenge(new_password)
self._set_tokens(tokens)
def logout(self):
"""
Logs the user out of all clients and removes the expires_in,
expires_datetime, id_token, refresh_token, access_token, and token_type
attributes
:return:
"""
self.client.global_sign_out(AccessToken=self.access_token)
self.id_token = None
self.refresh_token = None
self.access_token = None
self.token_type = None
def admin_update_profile(self, attrs, attr_map=None):
user_attrs = dict_to_cognito(attrs, attr_map)
self.client.admin_update_user_attributes(
UserPoolId=self.user_pool_id,
Username=self.username,
UserAttributes=user_attrs,
)
def update_profile(self, attrs, attr_map=None):
"""
Updates User attributes
:param attrs: Dictionary of attribute name, values
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
"""
user_attrs = dict_to_cognito(attrs, attr_map)
self.client.update_user_attributes(
UserAttributes=user_attrs, AccessToken=self.access_token
)
def get_user(self, attr_map=None):
"""
Returns a UserObj (or whatever the self.user_class is) by using the
user's access token.
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return:
"""
user = self.client.get_user(AccessToken=self.access_token)
user_metadata = {
"username": user.get("Username"),
"id_token": self.id_token,
"access_token": self.access_token,
"refresh_token": self.refresh_token,
}
return self.get_user_obj(
username=self.username,
attribute_list=user.get("UserAttributes"),
metadata=user_metadata,
attr_map=attr_map,
)
def get_users(self, attr_map=None):
"""
Returns all users for a user pool. Returns instances of the
self.user_class.
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return: list of self.user_class
"""
response = self.client.list_users(UserPoolId=self.user_pool_id)
user_list = response.get("Users")
page_token = response.get("PaginationToken")
while page_token:
response = self.client.list_users(
UserPoolId=self.user_pool_id, PaginationToken=page_token
)
user_list.extend(response.get("Users"))
page_token = response.get("PaginationToken")
return [
self.get_user_obj(
user.get("Username"),
attribute_list=user.get("Attributes"),
metadata={"username": user.get("Username")},
attr_map=attr_map,
)
for user in user_list
]
def admin_get_user(self, username=None, attr_map=None):
"""
Get the user's details using admin super privileges.
:param username: Username to look up
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return: UserObj object
"""
if not username:
username = self.username
user = self.client.admin_get_user(
UserPoolId=self.user_pool_id, Username=username
)
user_metadata = {
"enabled": user.get("Enabled"),
"user_status": user.get("UserStatus"),
"username": user.get("Username"),
"id_token": self.id_token,
"access_token": self.access_token,
"refresh_token": self.refresh_token,
}
return self.get_user_obj(
username=self.username,
attribute_list=user.get("UserAttributes"),
metadata=user_metadata,
attr_map=attr_map,
)
def admin_create_user(
self,
username,
temporary_password="",
additional_kwargs=None,
attr_map=None,
**kwargs,
):
"""
Create a user using admin super privileges.
:param username: User Pool username
:param temporary_password: The temporary password to give the user.
Leave blank to make Cognito generate a temporary password for the user.
:param additional_kwargs: Dictionary with request params, such as MessageAction.
:param attr_map: Attribute map to Cognito's attributes
:param kwargs: Additional User Pool attributes
:return response: Response from Cognito
"""
if additional_kwargs is None:
additional_kwargs = {}
response = self.client.admin_create_user(
UserPoolId=self.user_pool_id,
Username=username,
UserAttributes=dict_to_cognito(kwargs, attr_map),
TemporaryPassword=temporary_password,
**additional_kwargs,
)
kwargs.update(username=username)
self._set_attributes(response, kwargs)
response.pop("ResponseMetadata")
return response
def send_verification(self, attribute="email"):
"""
Sends the user an attribute verification code for the specified attribute name.
:param attribute: Attribute to confirm
"""
self.check_token()
self.client.get_user_attribute_verification_code(
AccessToken=self.access_token, AttributeName=attribute
)
def validate_verification(self, confirmation_code, attribute="email"):
"""
Verifies the specified user attributes in the user pool.
:param confirmation_code: Code sent to user upon intiating verification
:param attribute: Attribute to confirm
"""
self.check_token()
return self.client.verify_user_attribute(
AccessToken=self.access_token,
AttributeName=attribute,
Code=confirmation_code,
)
def renew_access_token(self, user_context_data=None):
"""
Sets a new access token on the User using the refresh token.
"""
auth_params = {"REFRESH_TOKEN": self.refresh_token}
self._add_secret_hash(auth_params, "SECRET_HASH")
refresh_response = self.client.initiate_auth(
ClientId=self.client_id,
AuthFlow="REFRESH_TOKEN_AUTH",
AuthParameters=auth_params,
UserContextData=dict(EncodedData=user_context_data['EncodedData']),
)
self._set_tokens(refresh_response)
def admin_renew_access_token(self, user_context_data=None):
"""
Sets a new access token on the User using the refresh token.
"""
auth_params = {"REFRESH_TOKEN": self.refresh_token}
self._add_secret_hash(auth_params, "SECRET_HASH")
refresh_response = self.client.admin_initiate_auth(
ClientId=self.client_id,
AuthFlow="REFRESH_TOKEN_AUTH",
AuthParameters=auth_params,
ContextData=user_context_data['EncodedData'],
)
self._set_tokens(refresh_response)
def initiate_forgot_password(self, username=None, user_context_data=None):
"""
Sends a verification code to the user to use to change their password.
"""
if not username:
username = self.username
params = {
"ClientId": self.client_id,
"Username": username,
"UserContextData": dict(EncodedData=user_context_data['EncodedData'])
}
self._add_secret_hash(params, "SecretHash")
self.client.forgot_password(**params)
def delete_user(self):
self.client.delete_user(AccessToken=self.access_token)
def admin_delete_user(self):
self.client.admin_delete_user(
UserPoolId=self.user_pool_id, Username=self.username
)
def admin_reset_password(self, username, client_metadata=None):
self.client.admin_reset_user_password(
UserPoolId=self.user_pool_id,
Username=username,
ClientMetadata=client_metadata,
)
def confirm_forgot_password(self, confirmation_code, password, user_context_data=None):
"""
Allows a user to enter a code provided when they reset their password
to update their password.
:param confirmation_code: The confirmation code sent by a user's request
to retrieve a forgotten password
:param password: New password
"""
params = {
"ClientId": self.client_id,
"Username": self.username,
"ConfirmationCode": confirmation_code,
"Password": password,
"UserContextData": dict(EncodedData=user_context_data['EncodedData'])
}
self._add_secret_hash(params, "SecretHash")
response = self.client.confirm_forgot_password(**params)
self._set_attributes(response, {"password": password})
def change_password(self, previous_password, proposed_password):
"""
Change the User password
"""
self.check_token()
response = self.client.change_password(
PreviousPassword=previous_password,
ProposedPassword=proposed_password,
AccessToken=self.access_token,
)
self._set_attributes(response, {"password": proposed_password})
def _add_secret_hash(self, parameters, key):
"""
Helper function that computes SecretHash and adds it
to a parameters dictionary at a specified key
"""
if self.client_secret is not None:
secret_hash = AWSSRP.get_secret_hash(
self.username, self.client_id, self.client_secret
)
parameters[key] = secret_hash
def _set_tokens(self, tokens):
"""
Helper function to verify and set token attributes based on a Cognito
AuthenticationResult.
"""
self.verify_token(tokens["AuthenticationResult"]["IdToken"], "id_token", "id")
if "RefreshToken" in tokens["AuthenticationResult"]:
self.refresh_token = tokens["AuthenticationResult"]["RefreshToken"]
self.verify_token(
tokens["AuthenticationResult"]["AccessToken"], "access_token", "access"
)
self.token_type = tokens["AuthenticationResult"]["TokenType"]
def _set_attributes(self, response, attribute_dict):
"""
Set user attributes based on response code
:param response: HTTP response from Cognito
:attribute dict: Dictionary of attribute name and values
"""
status_code = response.get(
"HTTPStatusCode", response["ResponseMetadata"]["HTTPStatusCode"]
)
if status_code == 200:
for key, value in attribute_dict.items():
setattr(self, key, value)
def get_group(self, group_name):
"""
Get a group by a name
:param group_name: name of a group
:return: instance of the self.group_class
"""
response = self.client.get_group(
GroupName=group_name, UserPoolId=self.user_pool_id
)
return self.get_group_obj(response.get("Group"))
def get_groups(self):
"""
Returns all groups for a user pool. Returns instances of the
self.group_class.
:return: list of instances
"""
response = self.client.list_groups(UserPoolId=self.user_pool_id)
return [self.get_group_obj(group_data) for group_data in response.get("Groups")]
def admin_add_user_to_group(self, username, group_name):
"""
Add the user to the specified group
:param username: the username
:param group_name: the name of the group to add the user to
:return:
"""
self.client.admin_add_user_to_group(
UserPoolId=self.user_pool_id,
Username=username,
GroupName=group_name,
)
def admin_remove_user_from_group(self, username, group_name):
"""
Remove the user from the specified group
:param username: the username
:param group_name: the name of the group to remove the user from
:return:
"""
self.client.admin_remove_user_from_group(
UserPoolId=self.user_pool_id,
Username=username,
GroupName=group_name,
)
def admin_list_groups_for_user(self, username):
"""
Get the list of groups a user belongs to
:param username:
:return: List
"""
def process_groups_response(groups_response):
groups = []
for group_dict in groups_response["Groups"]:
groups.append(group_dict["GroupName"])
return groups
groups_response = self.client.admin_list_groups_for_user(
Username=username, UserPoolId=self.user_pool_id, Limit=60
)
user_groups = process_groups_response(groups_response)
while "NextToken" in groups_response.keys():
groups_response = self.client.admin_list_groups_for_user(
Username=username,
UserPoolId=self.user_pool_id,
Limit=60,
NextToken=groups_response["NextToken"],
)
new_groups = process_groups_response(groups_response)
user_groups.extend(new_groups)
return user_groups
def admin_enable_user(self, username):
"""
Enable a user
:param username:
:return:
"""
self.client.admin_enable_user(
UserPoolId=self.user_pool_id,
Username=username,
)
def admin_disable_user(self, username):
"""
Disable a user
:param username:
:return:
"""
self.client.admin_disable_user(
UserPoolId=self.user_pool_id,
Username=self.username,
)
def admin_create_identity_provider(
self, pool_id, provider_name, provider_type, provider_details, **kwargs
):
"""
Creates an identity provider
:param pool_id: The user pool ID
:param provider_name: The identity provider name
:param provider_type: The identity provider type
:param provider_details: The identity provider details
:return:
"""
self.client.create_identity_provider(
UserPoolId=pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails=provider_details,
**kwargs,
)
def associate_software_token(self):
"""
Returns a unique generated shared secret key code for the user account.
The request takes an access token or a session string, but not both.
"""
response = self.client.associate_software_token(AccessToken=self.access_token)
return response["SecretCode"]
def verify_software_token(self, user_code):
"""
Use this API to register a user's entered TOTP code and mark the user's software token
MFA status as "verified" if successful.
The request takes an access token or a session string, but not both.
"""
params = {"AccessToken": self.access_token, "UserCode": user_code}
if self.session is not None:
params.update(**{"Session": self.session})
response = self.client.verify_software_token(**params)
return response["Status"]
def set_user_mfa_preference(self):
"""
Set the user's multi-factor authentication (MFA) method preference, including which MFA
factors are enabled and if any are preferred. Only one factor can be set as preferred.
The preferred MFA factor will be used to authenticate a user if multiple factors are
enabled. If multiple options are enabled and no preference is set, a challenge to choose
an MFA option will be returned during sign in. If an MFA type is enabled for a user,
the user will be prompted for MFA during all sign in attempts, unless device tracking is
turned on and the device has been trusted. If you would like MFA to be applied selectively
based on the assessed risk level of sign in attempts, disable MFA for users and turn on
Adaptive Authentication for the user pool.
"""
otp_settings = {"Enabled": True, "PreferredMfa": True}
self.client.set_user_mfa_preference(AccessToken=self.access_token, SoftwareTokenMfaSettings=otp_settings)
| 35.693333
| 127
| 0.622338
|
7310c13900a51bbaa125ff2cc4ac89c2525c9753
| 633
|
py
|
Python
|
model/hooks.py
|
peternara/graph-based-image-classification-gcn
|
60e93b47691e960b7f06f7a5dc11191efe881178
|
[
"MIT"
] | 44
|
2017-02-26T16:52:48.000Z
|
2022-02-17T18:50:02.000Z
|
model/hooks.py
|
hungerzs/graph-based-image-classification
|
d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8
|
[
"MIT"
] | 2
|
2018-11-14T05:11:25.000Z
|
2020-06-23T16:24:41.000Z
|
model/hooks.py
|
hungerzs/graph-based-image-classification
|
d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8
|
[
"MIT"
] | 13
|
2018-04-26T07:46:35.000Z
|
2022-02-28T15:38:53.000Z
|
import tensorflow as tf
from .logger import (StepLoggerHook,
LossLoggerHook,
AccuracyLoggerHook,
TimeLoggerHook,
EolLoggerHook)
def hooks(display_step, last_step, batch_size, loss, accuracy):
return [
tf.train.StopAtStepHook(last_step=last_step),
tf.train.NanTensorHook(loss),
StepLoggerHook(display_step, last_step),
LossLoggerHook(display_step, loss),
AccuracyLoggerHook(display_step, accuracy),
TimeLoggerHook(display_step, batch_size, last_step),
EolLoggerHook(display_step),
]
| 31.65
| 63
| 0.635071
|
e46458e530b98d8d9c81e24bc9147d6ab9c7c6aa
| 1,074
|
py
|
Python
|
kubernetes_asyncio/test/test_extensions_v1beta1_scale_status.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_extensions_v1beta1_scale_status.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_extensions_v1beta1_scale_status.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.extensions_v1beta1_scale_status import ExtensionsV1beta1ScaleStatus # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestExtensionsV1beta1ScaleStatus(unittest.TestCase):
"""ExtensionsV1beta1ScaleStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1ScaleStatus(self):
"""Test ExtensionsV1beta1ScaleStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.extensions_v1beta1_scale_status.ExtensionsV1beta1ScaleStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.85
| 127
| 0.753259
|
0d332c7ac2835cd8ade9734abd527d574b2894c3
| 4,520
|
py
|
Python
|
filer/admin/clipboardadmin.py
|
pbmbrands/django-filer
|
199903f4ab1ad58c2979737cf269e25347941f32
|
[
"BSD-3-Clause"
] | 1
|
2017-09-27T01:07:05.000Z
|
2017-09-27T01:07:05.000Z
|
filer/admin/clipboardadmin.py
|
fusionbox/django-filer
|
5980b07497cb0021f0933977a3c1bf3273f2557f
|
[
"BSD-3-Clause"
] | null | null | null |
filer/admin/clipboardadmin.py
|
fusionbox/django-filer
|
5980b07497cb0021f0933977a3c1bf3273f2557f
|
[
"BSD-3-Clause"
] | null | null | null |
#-*- coding: utf-8 -*-
import json as simplejson
from django.forms.models import modelform_factory
from django.contrib import admin
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from filer import settings as filer_settings
from filer.models import Clipboard, ClipboardItem
from filer.utils.files import handle_upload, UploadException
from filer.utils.loader import load_object
# ModelAdmins
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
try:
# django >=1.4
from django.conf.urls import patterns, url
except ImportError:
# django <1.4
from django.conf.urls.defaults import patterns, url
urls = super(ClipboardAdmin, self).get_urls()
from filer import views
url_patterns = patterns('',
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
# upload does it's own permission stuff (because of the stupid
# flash missing cookie stuff)
url(r'^operations/upload/$',
self.ajax_upload,
name='filer-ajax_upload'),
)
url_patterns.extend(urls)
return url_patterns
@csrf_exempt
def ajax_upload(self, request, folder_id=None):
"""
receives an upload from the uploader. Receives only one file at the time.
"""
mimetype = "application/json" if request.is_ajax() else "text/html"
try:
upload, filename, is_raw = handle_upload(request)
# Get clipboad
clipboard = Clipboard.objects.get_or_create(user=request.user)[0]
# find the file type
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_object(filer_class)
#TODO: What if there are more than one that qualify?
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model = FileSubClass,
fields = ('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
# Enforce the FILER_IS_PUBLIC_DEFAULT
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.save()
clipboard_item = ClipboardItem(
clipboard=clipboard, file=file_obj)
clipboard_item.save()
json_response = {
'thumbnail': file_obj.icons['32'],
'alt_text': '',
'label': unicode(file_obj),
}
return HttpResponse(simplejson.dumps(json_response),
mimetype=mimetype)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in uploadform.errors.items()
])
raise UploadException("AJAX request not valid: form invalid '%s'" % (form_errors,))
except UploadException, e:
return HttpResponse(simplejson.dumps({'error': unicode(e)}),
mimetype=mimetype)
def get_model_perms(self, request):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
| 39.649123
| 99
| 0.571018
|
8e7011b180d3a41650793f900a23110c3f602844
| 8,679
|
py
|
Python
|
neo_finrl/config.py
|
revelation-space/NeoFinRL
|
5dae2d861abd0c9f105f01a448488ef4fb92a5d6
|
[
"MIT"
] | null | null | null |
neo_finrl/config.py
|
revelation-space/NeoFinRL
|
5dae2d861abd0c9f105f01a448488ef4fb92a5d6
|
[
"MIT"
] | null | null | null |
neo_finrl/config.py
|
revelation-space/NeoFinRL
|
5dae2d861abd0c9f105f01a448488ef4fb92a5d6
|
[
"MIT"
] | null | null | null |
TRAIN_START_DATE = '2019-01-01'
TRAIN_END_DATE = '2019-12-31'
TEST_START_DATE = '2020-01-01'
TEST_END_DATE = '2020-12-31'
TRADE_START_DATE = '2021-01-01'
TRADE_START_DATE = '2021-07-31'
TECHNICAL_INDICATORS_LIST = ['macd', 'boll_ub', 'boll_lb', 'rsi_30', 'dx_30',
'close_30_sma', 'close_60_sma']
FAANG_TICKER = ['FB', 'AMZN', 'AAPL', 'NFLX', 'GOOG']
# Dow 30 constituents in 2021/10
DOW_30_TICKER = [
"AXP",
"AMGN",
"AAPL",
"BA",
"CAT",
"CSCO",
"CVX",
"GS",
"HD",
"HON",
"IBM",
"INTC",
"JNJ",
"KO",
"JPM",
"MCD",
"MMM",
"MRK",
"MSFT",
"NKE",
"PG",
"TRV",
"UNH",
"CRM",
"VZ",
"V",
"WBA",
"WMT",
"DIS",
"DOW"
]
'''DOW_30_TICKER_2019 = [
"AAPL",
"MSFT",
"JPM",
"V",
"RTX",
"PG",
"GS",
"NKE",
"DIS",
"AXP",
"HD",
"INTC",
"WMT",
"IBM",
"MRK",
"UNH",
"KO",
"CAT",
"TRV",
"JNJ",
"CVX",
"MCD",
"VZ",
"CSCO",
"XOM",
"BA",
"MMM",
"PFE",
"WBA",
"DD",
]'''
# Nasdaq 100 constituents at 2019/01
NAS_100_TICKER = [
"AMGN",
"AAPL",
"AMAT",
"INTC",
"PCAR",
"PAYX",
"MSFT",
"ADBE",
"CSCO",
"XLNX",
"QCOM",
"COST",
"SBUX",
"FISV",
"CTXS",
"INTU",
"AMZN",
"EBAY",
"BIIB",
"CHKP",
"GILD",
"NLOK",
"CMCSA",
"FAST",
"ADSK",
"CTSH",
"NVDA",
"GOOGL",
"ISRG",
"VRTX",
"HSIC",
"BIDU",
"ATVI",
"ADP",
"ROST",
"ORLY",
"CERN",
"BKNG",
"MYL",
"MU",
"DLTR",
"ALXN",
"SIRI",
"MNST",
"AVGO",
"TXN",
"MDLZ",
"FB",
"ADI",
"WDC",
"REGN",
"LBTYK",
"VRSK",
"NFLX",
"TSLA",
"CHTR",
"MAR",
"ILMN",
"LRCX",
"EA",
"AAL",
"WBA",
"KHC",
"BMRN",
"JD",
"SWKS",
"INCY",
"PYPL",
"CDW",
"FOXA",
"MXIM",
"TMUS",
"EXPE",
"TCOM",
"ULTA",
"CSX",
"NTES",
"MCHP",
"CTAS",
"KLAC",
"HAS",
"JBHT",
"IDXX",
"WYNN",
"MELI",
"ALGN",
"CDNS",
"WDAY",
"SNPS",
"ASML",
"TTWO",
"PEP",
"NXPI",
"XEL",
"AMD",
"NTAP",
"VRSN",
"LULU",
"WLTW",
"UAL",
]
# SP 500 constituents at 2019
SP_500_TICKER = [
"A",
"AAL",
"AAP",
"AAPL",
"ABBV",
"ABC",
"ABMD",
"ABT",
"ACN",
"ADBE",
"ADI",
"ADM",
"ADP",
"ADS",
"ADSK",
"AEE",
"AEP",
"AES",
"AFL",
"AGN",
"AIG",
"AIV",
"AIZ",
"AJG",
"AKAM",
"ALB",
"ALGN",
"ALK",
"ALL",
"ALLE",
"ALXN",
"AMAT",
"AMCR",
"AMD",
"AME",
"AMG",
"AMGN",
"AMP",
"AMT",
"AMZN",
"ANET",
"ANSS",
"ANTM",
"AON",
"AOS",
"APA",
"APD",
"APH",
"APTV",
"ARE",
"ARNC",
"ATO",
"ATVI",
"AVB",
"AVGO",
"AVY",
"AWK",
"AXP",
"AZO",
"BA",
"BAC",
"BAX",
"BBT",
"BBY",
"BDX",
"BEN",
"BF.B",
"BHGE",
"BIIB",
"BK",
"BKNG",
"BLK",
"BLL",
"BMY",
"BR",
"BRK.B",
"BSX",
"BWA",
"BXP",
"C",
"CAG",
"CAH",
"CAT",
"CB",
"CBOE",
"CBRE",
"CBS",
"CCI",
"CCL",
"CDNS",
"CE",
"CELG",
"CERN",
"CF",
"CFG",
"CHD",
"CHRW",
"CHTR",
"CI",
"CINF",
"CL",
"CLX",
"CMA",
"CMCSA",
"CME",
"CMG",
"CMI",
"CMS",
"CNC",
"CNP",
"COF",
"COG",
"COO",
"COP",
"COST",
"COTY",
"CPB",
"CPRI",
"CPRT",
"CRM",
"CSCO",
"CSX",
"CTAS",
"CTL",
"CTSH",
"CTVA",
"CTXS",
"CVS",
"CVX",
"CXO",
"D",
"DAL",
"DD",
"DE",
"DFS",
"DG",
"DGX",
"DHI",
"DHR",
"DIS",
"DISCK",
"DISH",
"DLR",
"DLTR",
"DOV",
"DOW",
"DRE",
"DRI",
"DTE",
"DUK",
"DVA",
"DVN",
"DXC",
"EA",
"EBAY",
"ECL",
"ED",
"EFX",
"EIX",
"EL",
"EMN",
"EMR",
"EOG",
"EQIX",
"EQR",
"ES",
"ESS",
"ETFC",
"ETN",
"ETR",
"EVRG",
"EW",
"EXC",
"EXPD",
"EXPE",
"EXR",
"F",
"FANG",
"FAST",
"FB",
"FBHS",
"FCX",
"FDX",
"FE",
"FFIV",
"FIS",
"FISV",
"FITB",
"FLIR",
"FLS",
"FLT",
"FMC",
"FOXA",
"FRC",
"FRT",
"FTI",
"FTNT",
"FTV",
"GD",
"GE",
"GILD",
"GIS",
"GL",
"GLW",
"GM",
"GOOG",
"GPC",
"GPN",
"GPS",
"GRMN",
"GS",
"GWW",
"HAL",
"HAS",
"HBAN",
"HBI",
"HCA",
"HCP",
"HD",
"HES",
"HFC",
"HIG",
"HII",
"HLT",
"HOG",
"HOLX",
"HON",
"HP",
"HPE",
"HPQ",
"HRB",
"HRL",
"HSIC",
"HST",
"HSY",
"HUM",
"IBM",
"ICE",
"IDXX",
"IEX",
"IFF",
"ILMN",
"INCY",
"INFO",
"INTC",
"INTU",
"IP",
"IPG",
"IPGP",
"IQV",
"IR",
"IRM",
"ISRG",
"IT",
"ITW",
"IVZ",
"JBHT",
"JCI",
"JEC",
"JEF",
"JKHY",
"JNJ",
"JNPR",
"JPM",
"JWN",
"K",
"KEY",
"KEYS",
"KHC",
"KIM",
"KLAC",
"KMB",
"KMI",
"KMX",
"KO",
"KR",
"KSS",
"KSU",
"L",
"LB",
"LDOS",
"LEG",
"LEN",
"LH",
"LHX",
"LIN",
"LKQ",
"LLY",
"LMT",
"LNC",
"LNT",
"LOW",
"LRCX",
"LUV",
"LW",
"LYB",
"M",
"MA",
"MAA",
"MAC",
"MAR",
"MAS",
"MCD",
"MCHP",
"MCK",
"MCO",
"MDLZ",
"MDT",
"MET",
"MGM",
"MHK",
"MKC",
"MKTX",
"MLM",
"MMC",
"MMM",
"MNST",
"MO",
"MOS",
"MPC",
"MRK",
"MRO",
"MS",
"MSCI",
"MSFT",
"MSI",
"MTB",
"MTD",
"MU",
"MXIM",
"MYL",
"NBL",
"NCLH",
"NDAQ",
"NEE",
"NEM",
"NFLX",
"NI",
"NKE",
"NKTR",
"NLSN",
"NOC",
"NOV",
"NRG",
"NSC",
"NTAP",
"NTRS",
"NUE",
"NVDA",
"NWL",
"NWS",
"O",
"OI",
"OKE",
"OMC",
"ORCL",
"ORLY",
"OXY",
"PAYX",
"PBCT",
"PCAR",
"PEG",
"PEP",
"PFE",
"PFG",
"PG",
"PGR",
"PH",
"PHM",
"PKG",
"PKI",
"PLD",
"PM",
"PNC",
"PNR",
"PNW",
"PPG",
"PPL",
"PRGO",
"PRU",
"PSA",
"PSX",
"PVH",
"PWR",
"PXD",
"PYPL",
"QCOM",
"QRVO",
"RCL",
"RE",
"REG",
"REGN",
"RF",
"RHI",
"RJF",
"RL",
"RMD",
"ROK",
"ROL",
"ROP",
"ROST",
"RSG",
"RTN",
"SBAC",
"SBUX",
"SCHW",
"SEE",
"SHW",
"SIVB",
"SJM",
"SLB",
"SLG",
"SNA",
"SNPS",
"SO",
"SPG",
"SPGI",
"SRE",
"STI",
"STT",
"STX",
"STZ",
"SWK",
"SWKS",
"SYF",
"SYK",
"SYMC",
"SYY",
"T",
"TAP",
"TDG",
"TEL",
"TFX",
"TGT",
"TIF",
"TJX",
"TMO",
"TMUS",
"TPR",
"TRIP",
"TROW",
"TRV",
"TSCO",
"TSN",
"TSS",
"TTWO",
"TWTR",
"TXN",
"TXT",
"UA",
"UAL",
"UDR",
"UHS",
"ULTA",
"UNH",
"UNM",
"UNP",
"UPS",
"URI",
"USB",
"UTX",
"V",
"VAR",
"VFC",
"VIAB",
"VLO",
"VMC",
"VNO",
"VRSK",
"VRSN",
"VRTX",
"VTR",
"VZ",
"WAB",
"WAT",
"WBA",
"WCG",
"WDC",
"WEC",
"WELL",
"WFC",
"WHR",
"WLTW",
"WM",
"WMB",
"WMT",
"WRK",
"WU",
"WY",
"WYNN",
"XEC",
"XEL",
"XLNX",
"XOM",
"XRAY",
"XRX",
"XYL",
"YUM",
"ZBH",
"ZION",
"ZTS",
]
| 12.541908
| 78
| 0.287821
|
68c6eff56769928891e1b77da8c79b8a08c70d68
| 12,794
|
py
|
Python
|
selfdrive/controls/lib/lateral_planner.py
|
seanchen5716/op
|
f9e5e95cc98e822fa0651493ad809340db6d2f8a
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lateral_planner.py
|
seanchen5716/op
|
f9e5e95cc98e822fa0651493ad809340db6d2f8a
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lateral_planner.py
|
seanchen5716/op
|
f9e5e95cc98e822fa0651493ad809340db6d2f8a
|
[
"MIT"
] | null | null | null |
import os
import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LAT, MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
from common.params import Params
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', True)
LANE_CHANGE_SPEED_MIN = 60 * CV.KPH_TO_MS
LANE_CHANGE_TIME_MAX = 5.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP):
self.LP = LanePlanner()
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_enabled = Params().get('LaneChangeEnabled') == b'1'
self.auto_lane_change_enabled = Params().get('AutoLaneChangeEnabled') == b'1'
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.auto_lane_change_timer = 0.0
self.prev_torque_applied = False
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.angle_steers_des = 0.0
self.angle_steers_des_mpc = 0.0
self.angle_steers_des_time = 0.0
def update(self, sm, CP, VM):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
steering_wheel_angle_offset_deg = sm['liveParameters'].angleOffsetDeg
steering_wheel_angle_deg = sm['carState'].steeringAngleDeg
lateral_control_pid = sm['controlsState'].lateralControlPid
lateral_control_indi = sm['controlsState'].lateralControlIndi
lateral_control_lqr = sm['controlsState'].lateralControlLqr
if lateral_control_pid == 1:
output_scale = sm['controlsState'].lateralControlState.pidState.output
elif lateral_control_indi == 1:
output_scale = sm['controlsState'].lateralControlState.indiState.output
elif lateral_control_lqr == 1:
output_scale = sm['controlsState'].lateralControlState.lqrState.output
# Update vehicle model
x = max(sm['liveParameters'].stiffnessFactor, 0.1)
sr = max(sm['liveParameters'].steerRatio, 0.1)
VM.update_params(x, sr)
curvature_factor = VM.curvature_factor(v_ego)
measured_curvature = -curvature_factor * math.radians(steering_wheel_angle_deg - steering_wheel_angle_offset_deg) / VM.sR
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not self.lane_change_enabled):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right)) or \
self.auto_lane_change_enabled and 3.25 > self.auto_lane_change_timer > 3.
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
# State transitions
# off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# pre
elif self.lane_change_state == LaneChangeState.preLaneChange:
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and (not blindspot_detected or self.prev_torque_applied):
self.lane_change_state = LaneChangeState.laneChangeStarting
elif torque_applied and blindspot_detected and self.auto_lane_change_timer != 10.0:
self.auto_lane_change_timer = 10.0
elif not torque_applied and self.auto_lane_change_timer == 10.0 and not self.prev_torque_applied:
self.prev_torque_applied = True
# starting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# finishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
if self.lane_change_state == LaneChangeState.off:
self.auto_lane_change_timer = 0.0
self.prev_torque_applied = False
elif self.auto_lane_change_timer < 3.25: # stop afer 3 sec resume from 10 when torque applied
self.auto_lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
y_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == MPC_N + 1
assert len(heading_pts) == MPC_N + 1
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:MPC_N+1], self.mpc_solution.curvature)
# TODO this needs more thought, use .2s extra for now to estimate other delays
delay = CP.steerActuatorDelay + .2
current_curvature = self.mpc_solution.curvature[0]
psi = interp(delay, self.t_idxs[:MPC_N+1], self.mpc_solution.psi)
next_curvature_rate = self.mpc_solution.curvature_rate[0]
# MPC can plan to turn the wheel and turn back before t_delay. This means
# in high delay cases some corrections never even get commanded. So just use
# psi to calculate a simple linearization of desired curvature
curvature_diff_from_psi = psi/(max(v_ego, 1e-1) * delay) - current_curvature
next_curvature = current_curvature + 2*curvature_diff_from_psi
# reset to current steer angle if not active or overriding
if active:
curvature_desired = next_curvature
desired_curvature_rate = next_curvature_rate
else:
curvature_desired = measured_curvature
desired_curvature_rate = 0.0
# negative sign, controls uses different convention
self.desired_steering_wheel_angle_deg = -float(math.degrees(curvature_desired * VM.sR)/curvature_factor) + steering_wheel_angle_offset_deg
self.desired_steering_wheel_angle_rate_deg = -float(math.degrees(desired_curvature_rate * VM.sR)/curvature_factor)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'liveParameters', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.steeringAngleDeg = float(self.desired_steering_wheel_angle_deg)
plan_send.lateralPlan.steeringRateDeg = float(self.desired_steering_wheel_angle_rate_deg)
plan_send.lateralPlan.angleOffsetDeg = float(sm['liveParameters'].angleOffsetAverageDeg)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
plan_send.lateralPlan.autoLaneChangeEnabled = self.auto_lane_change_enabled
plan_send.lateralPlan.autoLaneChangeTimer = 3 - int(self.auto_lane_change_timer)
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution[0].x)
dat.liveMpc.y = list(self.mpc_solution[0].y)
dat.liveMpc.psi = list(self.mpc_solution[0].psi)
# dat.liveMpc.tire_angle = list(self.mpc_solution[0].tire_angle)
dat.liveMpc.curvature = list(self.mpc_solution[0].curvature)
dat.liveMpc.cost = self.mpc_solution[0].cost
pm.send('liveMpc', dat)
| 45.208481
| 142
| 0.735501
|
48e3c18a0bc01797e75b581834c2d9d13255bc22
| 2,609
|
py
|
Python
|
notcm.py
|
NKID00/NotCloudMusic
|
48bc45b5442b766cf9d4f93a7e3d902747bd637b
|
[
"MIT"
] | 1
|
2020-10-06T08:39:44.000Z
|
2020-10-06T08:39:44.000Z
|
notcm.py
|
NKID00/NotCloudMusic
|
48bc45b5442b766cf9d4f93a7e3d902747bd637b
|
[
"MIT"
] | null | null | null |
notcm.py
|
NKID00/NotCloudMusic
|
48bc45b5442b766cf9d4f93a7e3d902747bd637b
|
[
"MIT"
] | null | null | null |
'''NotCloudMusic
又一个网易云音乐 CUI 客户端
https://github.com/NKID00/NotCloudMusic
MIT License
Copyright (c) 2020 NKID00
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from NeteaseCloudMusicApiPy.ncmapi import NeteaseCloudMusicApi
from sys import argv
from os import walk
from os.path import splitext
from importlib import import_module
from requests import ConnectionError, Timeout, HTTPError
USAGE = '''NotCloudMusic
又一个网易云音乐 CLI 客户端
usage: notcm <command> [<args>...]
commands:'''
USAGE_COMMAND = ' %-11s %s'
VERSION = 'NotCloudMusic 0.1.0'
def load_plugins():
plugins = {}
for file in next(walk('./plugins/'))[2]:
name, ext = splitext(file)
if ext == '.py':
module = import_module(f'plugins.{name}')
try:
info = module.notcloudmusic_plugin_info
plugins[info['name']] = info
except AttributeError:
continue
commands = {}
for name, plugin in plugins.items():
try:
commands.update(plugin['commands'])
except AttributeError:
continue
return plugins, commands
def main():
plugins, commands = load_plugins()
if len(argv) < 2 or argv[1] not in commands.keys():
print(USAGE)
for name, command in sorted(commands.items()):
print(USAGE_COMMAND % (name, command['description']))
else:
api = NeteaseCloudMusicApi()
try:
commands[argv[1]]['callable'](api, argv)
except ConnectionError or Timeout or HTTPError:
print('网络连接错误,请检查API进程是否启动且连接到互联网。')
if __name__ == "__main__":
main()
| 32.209877
| 78
| 0.697969
|
de4aa87f6e1057eee513853a33ce1c691b577224
| 11,268
|
py
|
Python
|
droidlet/perception/craftassist/low_level_perception.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
droidlet/perception/craftassist/low_level_perception.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
droidlet/perception/craftassist/low_level_perception.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import sys
import numpy as np
from droidlet.lowlevel.minecraft.mc_util import XYZ, IDM, pos_to_np, euclid_dist, diag_adjacent
from droidlet.base_util import to_block_pos
from typing import Tuple, List
from droidlet.lowlevel.minecraft.craftassist_cuberite_utils.block_data import BORING_BLOCKS
from droidlet.memory.memory_nodes import PlayerNode, AttentionNode
from droidlet.memory.craftassist.mc_memory_nodes import BlockObjectNode
def capped_line_of_sight(agent, player_struct, cap=20):
"""Return the block directly in the entity's line of sight, or a point in the distance"""
xsect = agent.get_player_line_of_sight(player_struct)
if xsect is not None and euclid_dist(pos_to_np(xsect), pos_to_np(player_struct.pos)) <= cap:
return pos_to_np(xsect)
# default to cap blocks in front of entity
vec = agent.coordinate_transforms.look_vec(player_struct.look.yaw, player_struct.look.pitch)
return cap * np.array(vec) + to_block_pos(pos_to_np(player_struct.pos))
class LowLevelMCPerception:
"""Perceive the world at a given frequency and update agent
memory.
updates positions of other players, mobs, self, and changed blocks,
takes this information directly from the craftassist_cuberite_utils server
Args:
agent (LocoMCAgent): reference to the minecraft Agent
perceive_freq (int): if not forced, how many Agent steps between perception
"""
def __init__(self, agent, perceive_freq=5):
self.agent = agent
self.memory = agent.memory
self.pending_agent_placed_blocks = set()
self.perceive_freq = perceive_freq
def perceive(self, force=False):
"""
Every n agent_steps (defined by perceive_freq), update in agent memory
location/pose of all agents, players, mobs; item stack positions and
changed blocks.
Args:
force (boolean): set to True to run all perceptual heuristics right now,
as opposed to waiting for perceive_freq steps (default: False)
"""
# FIXME (low pri) remove these in code, get from sql
self.agent.pos = to_block_pos(pos_to_np(self.agent.get_player().pos))
if self.agent.count % self.perceive_freq == 0 or force:
for mob in self.agent.get_mobs():
if euclid_dist(self.agent.pos, pos_to_np(mob.pos)) < self.memory.perception_range:
self.memory.set_mob_position(mob)
item_stack_set = set()
for item_stack in self.agent.get_item_stacks():
item_stack_set.add(item_stack.entityId)
if (
euclid_dist(self.agent.pos, pos_to_np(item_stack.pos))
< self.memory.perception_range
):
self.memory.set_item_stack_position(item_stack)
old_item_stacks = self.memory.get_all_item_stacks()
if old_item_stacks:
for old_item_stack in old_item_stacks:
memid = old_item_stack[0]
eid = old_item_stack[1]
if eid not in item_stack_set:
self.memory.untag(memid, "_on_ground")
else:
self.memory.tag(memid, "_on_ground")
# note: no "force"; these run on every perceive call. assumed to be fast
self.update_self_memory()
self.update_other_players(self.agent.get_other_players())
# use safe_get_changed_blocks to deal with pointing
for (xyz, idm) in self.agent.safe_get_changed_blocks():
self.on_block_changed(xyz, idm)
def update_self_memory(self):
"""Update agent's current position and attributes in memory"""
p = self.agent.get_player()
memid = self.memory.get_player_by_eid(p.entityId).memid
cmd = "UPDATE ReferenceObjects SET eid=?, name=?, x=?, y=?, z=?, pitch=?, yaw=? WHERE "
cmd = cmd + "uuid=?"
self.memory.db_write(
cmd, p.entityId, p.name, p.pos.x, p.pos.y, p.pos.z, p.look.pitch, p.look.yaw, memid
)
def update_other_players(self, player_list: List, force=False):
"""Update other in-game players in agen't memory
Args:
a list of player_structs from agent
"""
for p in player_list:
mem = self.memory.get_player_by_eid(p.entityId)
if mem is None:
memid = PlayerNode.create(self.memory, p)
else:
memid = mem.memid
cmd = (
"UPDATE ReferenceObjects SET eid=?, name=?, x=?, y=?, z=?, pitch=?, yaw=? WHERE "
)
cmd = cmd + "uuid=?"
self.memory.db_write(
cmd, p.entityId, p.name, p.pos.x, p.pos.y, p.pos.z, p.look.pitch, p.look.yaw, memid
)
loc = capped_line_of_sight(self.agent, p)
loc[1] += 1
memids = self.memory._db_read_one(
'SELECT uuid FROM ReferenceObjects WHERE ref_type="attention" AND type_name=?',
p.entityId,
)
if memids:
self.memory.db_write(
"UPDATE ReferenceObjects SET x=?, y=?, z=? WHERE uuid=?",
loc[0],
loc[1],
loc[2],
memids[0],
)
else:
AttentionNode.create(self.memory, loc, attender=p.entityId)
# TODO replace name by eid everywhere
def get_player_struct_by_name(self, name):
"""Get the raw player struct by player name
Returns:
a raw player struct, e.g. to use in agent.get_player_line_of_sight
"""
for p in self.agent.get_other_players():
if p.name == name:
return p
return None
def on_block_changed(self, xyz: XYZ, idm: IDM):
"""Update the state of the world when a block is changed."""
# TODO don't need to do this for far away blocks if this is slowing down bot
self.maybe_remove_inst_seg(xyz)
self.maybe_remove_block_from_memory(xyz, idm)
self.maybe_add_block_to_memory(xyz, idm)
def clear_air_surrounded_negatives(self):
pass
def maybe_remove_inst_seg(self, xyz):
"""if the block is changed, the old instance segmentation is considered no longer valid"""
# get all associated instseg nodes
# FIXME make this into a basic search
inst_seg_memids = self.memory.get_instseg_object_ids_by_xyz(xyz)
if inst_seg_memids:
# delete the InstSeg, they are ephemeral and should be recomputed
# TODO/FIXME more refined approach: if a block changes
# ask the models to recompute. if the tags are the same, keep it
for i in inst_seg_memids:
self.memory.forget(i[0])
# clean all this up...
# eventually some conditions for not committing air/negative blocks
def maybe_add_block_to_memory(self, xyz: XYZ, idm: IDM, agent_placed=False):
"""Update blocks to memory when any change in the environment
is caused either by agent or player"""
if not agent_placed:
interesting, player_placed, agent_placed = self.is_placed_block_interesting(
xyz, idm[0]
)
else:
interesting = True
player_placed = False
if not interesting:
return
# TODO remove this, clean up
if agent_placed:
try:
self.pending_agent_placed_blocks.remove(xyz)
except:
pass
adjacent = [
self.memory.get_object_info_by_xyz(a, "BlockObjects", just_memid=False)
for a in diag_adjacent(xyz)
]
if idm[0] == 0:
# block removed / air block added
adjacent_memids = [a[0][0] for a in adjacent if len(a) > 0 and a[0][1] == 0]
else:
# normal block added
adjacent_memids = [a[0][0] for a in adjacent if len(a) > 0 and a[0][1] > 0]
adjacent_memids = list(set(adjacent_memids))
if len(adjacent_memids) == 0:
# new block object
BlockObjectNode.create(self.agent.memory, [(xyz, idm)])
elif len(adjacent_memids) == 1:
# update block object
memid = adjacent_memids[0]
self.memory.upsert_block(
(xyz, idm), memid, "BlockObjects", player_placed, agent_placed
)
self.memory.set_memory_updated_time(memid)
self.memory.set_memory_attended_time(memid)
else:
chosen_memid = adjacent_memids[0]
self.memory.set_memory_updated_time(chosen_memid)
self.memory.set_memory_attended_time(chosen_memid)
# merge tags
where = " OR ".join(["subj=?"] * len(adjacent_memids))
self.memory.db_write(
"UPDATE Triples SET subj=? WHERE " + where, chosen_memid, *adjacent_memids
)
# merge multiple block objects (will delete old ones)
where = " OR ".join(["uuid=?"] * len(adjacent_memids))
cmd = "UPDATE VoxelObjects SET uuid=? WHERE "
self.memory.db_write(cmd + where, chosen_memid, *adjacent_memids)
# insert new block
self.memory.upsert_block(
(xyz, idm), chosen_memid, "BlockObjects", player_placed, agent_placed
)
def maybe_remove_block_from_memory(self, xyz: XYZ, idm: IDM):
"""Update agent's memory with blocks that have been destroyed."""
tables = ["BlockObjects"]
for table in tables:
info = self.memory.get_object_info_by_xyz(xyz, table, just_memid=False)
if not info or len(info) == 0:
continue
assert len(info) == 1
memid, b, m = info[0]
delete = (b == 0 and idm[0] > 0) or (b > 0 and idm[0] == 0)
if delete:
self.memory.remove_voxel(*xyz, table)
self.agent.areas_to_perceive.append((xyz, 3))
# FIXME move removal of block to parent
def is_placed_block_interesting(self, xyz: XYZ, bid: int) -> Tuple[bool, bool, bool]:
"""Return three values:
- bool: is the placed block interesting?
- bool: is it interesting because it was placed by a player?
- bool: is it interesting because it was placed by the agent?
"""
interesting = False
player_placed = False
agent_placed = False
# TODO record *which* player placed it
if xyz in self.pending_agent_placed_blocks:
interesting = True
agent_placed = True
for player_struct in self.agent.get_other_players():
if (
euclid_dist(pos_to_np(player_struct.pos), xyz) < 5
and player_struct.mainHand.id == bid
):
interesting = True
if not agent_placed:
player_placed = True
if bid not in BORING_BLOCKS:
interesting = True
return interesting, player_placed, agent_placed
| 41.426471
| 99
| 0.600639
|
336493d222803ddd7fe86cafd30d82fd7dd581dd
| 5,395
|
py
|
Python
|
getbaidutoken.py
|
DozingWolf/pybaiduapi
|
6bae0dceeaeb8520e6bfc1376cc8038984dace31
|
[
"MIT"
] | null | null | null |
getbaidutoken.py
|
DozingWolf/pybaiduapi
|
6bae0dceeaeb8520e6bfc1376cc8038984dace31
|
[
"MIT"
] | null | null | null |
getbaidutoken.py
|
DozingWolf/pybaiduapi
|
6bae0dceeaeb8520e6bfc1376cc8038984dace31
|
[
"MIT"
] | null | null | null |
# 获取百度接口使用的token和aksk
from configparser import ConfigParser
import requests
import json
import sqlite3
import os.path
import time
def getBaiduToken(conf_path):
#todayis = time.strftime('%Y/%m/%d', time.localtime())
todayis = time.time()
paraLoader = ConfigParser()
paraLoader.read(conf_path)
ai_ak = paraLoader.get('baidu_ai','ak')
ai_sk = paraLoader.get('baidu_ai','sk')
map_ak = paraLoader.get('baidu_map','ak')
baseDir = os.path.dirname(os.path.abspath(__file__))
absPath = os.path.join(baseDir,'storage\Token.db')
print('create db')
dbHandle = sqlite3.connect(absPath)
dbCursor = dbHandle.cursor()
countSql = 'select count(1) from sqlite_master where name = "Token"'
dbCursor.execute(countSql)
dbReturn = dbCursor.fetchall()
print('dbReturn[0][0]:',dbReturn[0][0])
if dbReturn[0][0] == 0:
print('create table')
dbCreateTable = 'CREATE TABLE Token (id integer PRIMARY KEY autoincrement,tokentype varchar(20),accesstoken varchar(500),accesskey varchar(500),expirestime numeric,gettokendate numeric)'
dbCursor.execute(dbCreateTable)
urlFlag,urlData = requestsAIToken(ai_ak,ai_sk)
print('urlflag : ',urlFlag)
print('urlData : ',urlData)
rtnToken = urlData[0]
insertSql = 'INSERT INTO Token(tokentype,accesstoken,accesskey,expirestime,gettokendate) VALUES(?,?,?,?,?)'
insertPara = ('BaiduAI',urlData[0],urlData[1],urlData[3],todayis)
try:
dbCursor.execute(insertSql,insertPara)
dbHandle.commit()
dbReturn = dbCursor.fetchall()
print(dbReturn)
except Exception as err:
print(err)
else:
dbSql = 'select count(1) from Token where tokentype = ?'
dbSqlParaTokenType = 'BaiduAI'
dbCursor.execute(dbSql,(dbSqlParaTokenType,))
dbReturn = dbCursor.fetchall()
if dbReturn[0][0] == 1:
print('DB has data')
selectSql = 'select accesstoken,accesskey,expirestime,gettokendate from Token where tokentype = ? '
dbCursor.execute(selectSql,(dbSqlParaTokenType,))
dbReturn = dbCursor.fetchall()
print('sql result = ',dbReturn)
rtnExptime = dbReturn[0][2]
rtnGetTokendate = dbReturn[0][3]
if (rtnGetTokendate + rtnExptime <= todayis):
# token has expired,u must get new token from baidu api service
print('token has expired,u must get new token from baidu api service')
urlFlag,urlData = requestsAIToken(ai_ak,ai_sk)
print('urlflag : ',urlFlag)
print('urlData : ',urlData)
rtnToken = urlData[0]
updateSql = 'update Token set accesstoken = ?,accesskey = ?,expirestime = ?,gettokendate = ? where tokentype = ?'
updatePara = (urlData[0],urlData[1],urlData[3],todayis,'BaiduAI')
try:
dbCursor.execute(updateSql,updatePara)
dbHandle.commit()
dbReturn = dbCursor.fetchall()
print(dbReturn)
except Exception as err:
print(err)
else:
# token was unexpired
print('goodluck,token was unexpired')
rtnToken = dbReturn[0][0]
else:
print('DB hasn`t data')
urlFlag,urlData = requestsAIToken(ai_ak,ai_sk)
if urlFlag == 0:
insertSql = 'INSERT INTO Token(tokentype,accesstoken,accesskey,expirestime,gettokendate) VALUES(?,?,?,?,?)'
insertPara = ('BaiduAI',urlData[0],urlData[1],urlData[3])
dbCursor.execute(insertSql,insertPara)
dbHandle.commit()
rtnToken = urlData[0]
if dbCursor.rowcount == 1:
pass
else:
raise Exception('update date error')
else:
raise Exception(''.join(urlData))
dbCursor.close()
dbHandle.commit()
return rtnToken
def requestsAIToken(akkey,skkey):
baiduAITokenUrl = 'https://aip.baidubce.com/oauth/2.0/token'
baiduAITokenPara = {
'grant_type':'client_credentials',
'client_id':akkey,
'client_secret':skkey
}
r = requests.get(url=baiduAITokenUrl,params=baiduAITokenPara)
formatData = json.loads(r.text)
if formatData.get('access_token') is not None:
return 0,[formatData.get('access_token'),formatData.get('session_key'),formatData.get('session_secret'),formatData.get('expires_in')]
elif formatData.get('error') is not None:
return -1,['None Data']
# raise Exception(formatData.get('error'))
def delExpiredToken(ttype):
baseDir = os.path.dirname(os.path.abspath(__file__))
absPath = os.path.join(baseDir,'storage\Token.db')
dbHandle = sqlite3.connect(absPath)
dbCursor = dbHandle.cursor()
deleteSql = 'delete from Token where tokentype = ?'
deleteSqlPara = (ttype,)
try:
dbCursor.execute(deleteSql,deleteSqlPara)
except Exception as err:
print(err)
dbCursor.close()
return -1,['err']
dbHandle.commit()
dbCursor.close()
return 0,['Delete Success']
#getBaiduToken(conf_path = './conf/para.conf')
#delExpiredToken(ttype='Baidu')
| 41.183206
| 194
| 0.609268
|
7aed3a4b895dcde6c1f7068f8c33c6ae232d4e70
| 7,660
|
py
|
Python
|
tools/wptrunner/wptrunner/browsers/safari.py
|
BasixKOR/wpt
|
aa27d567c10dcdb2aea6884d5155dfaaa177a800
|
[
"BSD-3-Clause"
] | null | null | null |
tools/wptrunner/wptrunner/browsers/safari.py
|
BasixKOR/wpt
|
aa27d567c10dcdb2aea6884d5155dfaaa177a800
|
[
"BSD-3-Clause"
] | 59
|
2022-01-19T21:35:57.000Z
|
2022-03-30T21:35:27.000Z
|
tools/wptrunner/wptrunner/browsers/safari.py
|
BasixKOR/wpt
|
aa27d567c10dcdb2aea6884d5155dfaaa177a800
|
[
"BSD-3-Clause"
] | null | null | null |
# mypy: allow-untyped-defs
import os
import plistlib
from distutils.spawn import find_executable
from distutils.version import LooseVersion
import psutil
from .base import WebDriverBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.base import WdspecExecutor # noqa: F401
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor, # noqa: F401
WebDriverCrashtestExecutor) # noqa: F401
__wptrunner__ = {"product": "safari",
"check_args": "check_args",
"browser": "SafariBrowser",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "WdspecExecutor",
"crashtest": "WebDriverCrashtestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"timeout_multiplier": "get_timeout_multiplier"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args"),
"kill_safari": kwargs.get("kill_safari", False)}
def executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs):
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = {}
if test_type == "testharness":
executor_kwargs["capabilities"]["pageLoadStrategy"] = "eager"
if kwargs["binary"] is not None:
raise ValueError("Safari doesn't support setting executable location")
V = LooseVersion
browser_bundle_version = run_info_data["browser_bundle_version"]
if browser_bundle_version is not None and V(browser_bundle_version[2:]) >= V("613.1.7.1"):
logger.debug("using acceptInsecureCerts=True")
executor_kwargs["capabilities"]["acceptInsecureCerts"] = True
else:
logger.warning("not using acceptInsecureCerts, Safari will require certificates to be trusted")
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
def run_info_extras(**kwargs):
webdriver_binary = kwargs["webdriver_binary"]
rv = {}
safari_bundle, safari_info = get_safari_info(webdriver_binary)
if safari_info is not None:
assert safari_bundle is not None # if safari_info is not None, this can't be
_, webkit_info = get_webkit_info(safari_bundle)
if webkit_info is None:
webkit_info = {}
else:
safari_info = {}
webkit_info = {}
rv["browser_marketing_version"] = safari_info.get("CFBundleShortVersionString")
rv["browser_bundle_version"] = safari_info.get("CFBundleVersion")
rv["browser_webkit_bundle_version"] = webkit_info.get("CFBundleVersion")
with open("/System/Library/CoreServices/SystemVersion.plist", "rb") as fp:
system_version = plistlib.load(fp)
rv["os_build"] = system_version["ProductBuildVersion"]
return rv
def get_safari_info(wd_path):
bundle_paths = [
os.path.join(os.path.dirname(wd_path), "..", ".."), # bundled Safari (e.g. STP)
os.path.join(os.path.dirname(wd_path), "Safari.app"), # local Safari build
"/Applications/Safari.app", # system Safari
]
for bundle_path in bundle_paths:
info_path = os.path.join(bundle_path, "Contents", "Info.plist")
if not os.path.isfile(info_path):
continue
with open(info_path, "rb") as fp:
info = plistlib.load(fp)
# check we have a Safari family bundle
ident = info.get("CFBundleIdentifier")
if not isinstance(ident, str) or not ident.startswith("com.apple.Safari"):
continue
return (bundle_path, info)
return (None, None)
def get_webkit_info(safari_bundle_path):
framework_paths = [
os.path.join(os.path.dirname(safari_bundle_path), "Contents", "Frameworks"), # bundled Safari (e.g. STP)
os.path.join(os.path.dirname(safari_bundle_path), ".."), # local Safari build
"/System/Library/PrivateFrameworks",
"/Library/Frameworks",
"/System/Library/Frameworks",
]
for framework_path in framework_paths:
info_path = os.path.join(framework_path, "WebKit.framework", "Versions", "Current", "Resources", "Info.plist")
if not os.path.isfile(info_path):
continue
with open(info_path, "rb") as fp:
info = plistlib.load(fp)
return (framework_path, info)
return (None, None)
class SafariBrowser(WebDriverBrowser):
"""Safari is backed by safaridriver, which is supplied through
``wptrunner.webdriver.SafariDriverServer``.
"""
def __init__(self, logger, binary=None, webdriver_binary=None, webdriver_args=None,
port=None, env=None, kill_safari=False, **kwargs):
"""Creates a new representation of Safari. The `webdriver_binary`
argument gives the WebDriver binary to use for testing. (The browser
binary location cannot be specified, as Safari and SafariDriver are
coupled.) If `kill_safari` is True, then `Browser.stop` will stop Safari."""
super().__init__(logger,
binary,
webdriver_binary,
webdriver_args=webdriver_args,
port=None,
env=env)
if "/" not in webdriver_binary:
wd_path = find_executable(webdriver_binary)
else:
wd_path = webdriver_binary
self.safari_path = self._find_safari_executable(wd_path)
logger.debug("WebDriver executable path: %s" % wd_path)
logger.debug("Safari executable path: %s" % self.safari_path)
self.kill_safari = kill_safari
def _find_safari_executable(self, wd_path):
bundle_path, info = get_safari_info(wd_path)
exe = info.get("CFBundleExecutable")
if not isinstance(exe, str):
return None
exe_path = os.path.join(bundle_path, "Contents", "MacOS", exe)
if not os.path.isfile(exe_path):
return None
return exe_path
def make_command(self):
return [self.webdriver_binary, f"--port={self.port}"] + self.webdriver_args
def stop(self, force=False):
super().stop(force)
if self.kill_safari:
self.logger.debug("Going to stop Safari")
for proc in psutil.process_iter(attrs=["exe"]):
if (proc.info["exe"] is not None and
os.path.samefile(proc.info["exe"], self.safari_path)):
self.logger.debug("Stopping Safari %s" % proc.pid)
try:
proc.terminate()
try:
proc.wait(10)
except psutil.TimeoutExpired:
proc.kill()
proc.wait(10)
except psutil.NoSuchProcess:
pass
| 37.004831
| 118
| 0.623107
|
ac97b9df7afb18c061c736422e11baeecefd1d82
| 8,891
|
py
|
Python
|
test/test_adb.py
|
anuraagbaishya/uiautomator
|
e74de3bb579f623c66c47e5b3cbc1efd69180122
|
[
"MIT"
] | 1,794
|
2015-01-05T08:15:20.000Z
|
2022-03-29T01:06:31.000Z
|
test/test_adb.py
|
anuraagbaishya/uiautomator
|
e74de3bb579f623c66c47e5b3cbc1efd69180122
|
[
"MIT"
] | 217
|
2015-01-08T20:11:32.000Z
|
2022-03-05T08:41:30.000Z
|
test/test_adb.py
|
anuraagbaishya/uiautomator
|
e74de3bb579f623c66c47e5b3cbc1efd69180122
|
[
"MIT"
] | 621
|
2015-01-06T13:36:09.000Z
|
2022-03-30T08:19:29.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from mock import MagicMock, patch
import os
import subprocess
from uiautomator import Adb
class TestAdb(unittest.TestCase):
def setUp(self):
self.os_name = os.name
def tearDown(self):
os.name = self.os_name
def test_serial(self):
serial = "abcdef1234567890"
adb = Adb(serial)
self.assertEqual(adb.default_serial, serial)
adb.devices = MagicMock()
adb.devices.return_value = [serial, "123456"]
self.assertEqual(adb.device_serial(), serial)
def test_adb_from_env(self):
home_dir = '/android/home'
with patch.dict('os.environ', {'ANDROID_HOME': home_dir}):
with patch('os.path.exists') as exists:
exists.return_value = True
os.name = "posix" # linux
adb_obj = Adb()
adb_path = os.path.join(home_dir, "platform-tools", "adb")
self.assertEqual(adb_obj.adb(), adb_path)
exists.assert_called_once_with(adb_path)
self.assertEqual(adb_obj.adb(), adb_path)
# the second call will return the __adb_cmd directly
exists.assert_called_once_with(adb_path)
os.name = "nt" # linux
adb_obj = Adb()
adb_path = os.path.join(home_dir, "platform-tools", "adb.exe")
self.assertEqual(adb_obj.adb(), adb_path)
exists.return_value = False
with self.assertRaises(EnvironmentError):
Adb().adb()
def test_adb_from_find(self):
with patch.dict('os.environ', {}, clear=True):
with patch("distutils.spawn.find_executable") as find_executable:
find_executable.return_value = "/usr/bin/adb"
with patch("os.path.realpath") as realpath:
realpath.return_value = "/home/user/android/platform-tools/adb"
self.assertEqual(realpath.return_value, Adb().adb())
find_executable.assert_called_once_with("adb") # find_exectable should be called once
realpath.assert_called_once_with(find_executable.return_value)
realpath.return_value = find_executable.return_value
self.assertEqual(find_executable.return_value, Adb().adb())
find_executable.return_value = None
call_count = find_executable.call_count
with self.assertRaises(EnvironmentError):
Adb().adb()
self.assertEqual(call_count + 1, find_executable.call_count)
def test_devices(self):
adb = Adb()
adb.raw_cmd = MagicMock()
adb.raw_cmd.return_value.communicate.return_value = (b"List of devices attached \r\n014E05DE0F02000E\tdevice\r\n489328DKFL7DF\tdevice", b"")
self.assertEqual(adb.devices(), {"014E05DE0F02000E": "device", "489328DKFL7DF": "device"})
adb.raw_cmd.assert_called_once_with("devices")
adb.raw_cmd.return_value.communicate.return_value = (b"List of devices attached \n\r014E05DE0F02000E\tdevice\n\r489328DKFL7DF\tdevice", b"")
self.assertEqual(adb.devices(), {"014E05DE0F02000E": "device", "489328DKFL7DF": "device"})
adb.raw_cmd.return_value.communicate.return_value = (b"List of devices attached \r014E05DE0F02000E\tdevice\r489328DKFL7DF\tdevice", b"")
self.assertEqual(adb.devices(), {"014E05DE0F02000E": "device", "489328DKFL7DF": "device"})
adb.raw_cmd.return_value.communicate.return_value = (b"List of devices attached \n014E05DE0F02000E\tdevice\n489328DKFL7DF\tdevice", b"")
self.assertEqual(adb.devices(), {"014E05DE0F02000E": "device", "489328DKFL7DF": "device"})
adb.raw_cmd.return_value.communicate.return_value = (b"not match", "")
with self.assertRaises(EnvironmentError):
adb.devices()
def test_forward(self):
adb = Adb()
adb.cmd = MagicMock()
adb.forward(90, 91)
adb.cmd.assert_called_once_with("forward", "tcp:90", "tcp:91")
adb.cmd.return_value.wait.assert_called_once_with()
def test_adb_raw_cmd(self):
import subprocess
adb = Adb()
adb.adb = MagicMock()
adb.adb.return_value = "adb"
args = ["a", "b", "c"]
with patch("subprocess.Popen") as Popen:
os.name = "posix"
adb.raw_cmd(*args)
Popen.assert_called_once_with(["%s %s" % (adb.adb(), " ".join(args))], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with patch("subprocess.Popen") as Popen:
os.name = "nt"
adb.raw_cmd(*args)
Popen.assert_called_once_with([adb.adb()] + list(args), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def test_adb_cmd(self):
adb = Adb()
adb.device_serial = MagicMock()
adb.device_serial.return_value = "ANDROID_SERIAL"
adb.raw_cmd = MagicMock()
args = ["a", "b", "c"]
adb.cmd(*args)
adb.raw_cmd.assert_called_once_with("-s", "%s" % adb.device_serial(), *args)
adb.device_serial.return_value = "ANDROID SERIAL"
adb.raw_cmd = MagicMock()
args = ["a", "b", "c"]
adb.cmd(*args)
adb.raw_cmd.assert_called_once_with("-s", "'%s'" % adb.device_serial(), *args)
def test_adb_cmd_server_host(self):
adb = Adb(adb_server_host="localhost", adb_server_port=5037)
adb.adb = MagicMock()
adb.adb.return_value = "adb"
adb.device_serial = MagicMock()
adb.device_serial.return_value = "ANDROID_SERIAL"
args = ["a", "b", "c"]
with patch("subprocess.Popen") as Popen:
os.name = "nt"
adb.raw_cmd(*args)
Popen.assert_called_once_with(
[adb.adb()] + args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
adb = Adb(adb_server_host="test.com", adb_server_port=1000)
adb.adb = MagicMock()
adb.adb.return_value = "adb"
adb.device_serial = MagicMock()
adb.device_serial.return_value = "ANDROID_SERIAL"
args = ["a", "b", "c"]
with patch("subprocess.Popen") as Popen:
os.name = "posix"
adb.raw_cmd(*args)
Popen.assert_called_once_with(
[" ".join([adb.adb()] + ["-H", "test.com", "-P", "1000"] + args)],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
def test_device_serial(self):
with patch.dict('os.environ', {'ANDROID_SERIAL': "ABCDEF123456"}):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {"ABCDEF123456": "device"}
self.assertEqual(adb.device_serial(), "ABCDEF123456")
with patch.dict('os.environ', {'ANDROID_SERIAL': "ABCDEF123456"}):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {"ABCDEF123456": "device", "123456ABCDEF": "device"}
self.assertEqual(adb.device_serial(), "ABCDEF123456")
with patch.dict('os.environ', {'ANDROID_SERIAL': "HIJKLMN098765"}):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {"ABCDEF123456": "device", "123456ABCDEF": "device"}
self.assertEqual(adb.device_serial(), "HIJKLMN098765")
with patch.dict('os.environ', {}, clear=True):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {"ABCDEF123456": "device", "123456ABCDEF": "device"}
with self.assertRaises(EnvironmentError):
adb.device_serial()
with patch.dict('os.environ', {}, clear=True):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {"ABCDEF123456": "device"}
self.assertEqual(adb.device_serial(), "ABCDEF123456")
with self.assertRaises(EnvironmentError):
adb = Adb()
adb.devices = MagicMock()
adb.devices.return_value = {}
adb.device_serial()
def test_forward_list(self):
adb = Adb()
adb.version = MagicMock()
adb.version.return_value = ['1.0.31', '1', '0', '31']
adb.raw_cmd = MagicMock()
adb.raw_cmd.return_value.communicate.return_value = (b"014E05DE0F02000E tcp:9008 tcp:9008\r\n489328DKFL7DF tcp:9008 tcp:9008", b"")
self.assertEqual(adb.forward_list(), [['014E05DE0F02000E', 'tcp:9008', 'tcp:9008'], ['489328DKFL7DF', 'tcp:9008', 'tcp:9008']])
adb.version.return_value = ['1.0.29', '1', '0', '29']
with self.assertRaises(EnvironmentError):
adb.forward_list()
| 44.455
| 151
| 0.595321
|
b5463ae25365b58d1c06b2583efdd3d2a87ed0c7
| 332
|
py
|
Python
|
certificados/migrations/0002_remove_certificados_estudiante.py
|
Yursksf1/DG_pulls20
|
d80b6dc37a1bf20341572b8850c21467554ea84e
|
[
"MIT"
] | null | null | null |
certificados/migrations/0002_remove_certificados_estudiante.py
|
Yursksf1/DG_pulls20
|
d80b6dc37a1bf20341572b8850c21467554ea84e
|
[
"MIT"
] | null | null | null |
certificados/migrations/0002_remove_certificados_estudiante.py
|
Yursksf1/DG_pulls20
|
d80b6dc37a1bf20341572b8850c21467554ea84e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-04-07 15:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('certificados', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='certificados',
name='estudiante',
),
]
| 18.444444
| 47
| 0.596386
|
8d1f82bcef57e409b15a80e965d419521ef4d9fb
| 1,055
|
py
|
Python
|
exercicios/ex093.py
|
RicardoAugusto-RCD/exercicios_python
|
8a803f9cbc8b2ad0b5a6d61f0e7b6c2bc615b5ff
|
[
"MIT"
] | null | null | null |
exercicios/ex093.py
|
RicardoAugusto-RCD/exercicios_python
|
8a803f9cbc8b2ad0b5a6d61f0e7b6c2bc615b5ff
|
[
"MIT"
] | null | null | null |
exercicios/ex093.py
|
RicardoAugusto-RCD/exercicios_python
|
8a803f9cbc8b2ad0b5a6d61f0e7b6c2bc615b5ff
|
[
"MIT"
] | null | null | null |
# Crie um programa que gerencie o aproveitamento de um jogador de futebol. O programa vai ler o nome do jogador e
# quantas partidas ele jogou. Depois vai ler a quantidade de gols feitos em cada partida. No final, tudo isso será
# guardado em um dicionário, incluindo o total de gols feitos durante o campeonato.
jogador = dict()
jogador['nome'] = str(input('Nome do jogador? '))
jogador['gols'] = list()
jogador['total'] = 0
partidas = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for contador in range(0, partidas):
golPartida = int(input(f'Quantos gols na {contador + 1}ª partida? '))
jogador['gols'].append(golPartida)
jogador['total'] += golPartida
print('—' * 50)
print(f'{jogador}')
print('—' * 50)
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}')
print('—' * 50)
print(f'O jogador {jogador["nome"]} jogou {partidas} partidas')
for contador in range(0, partidas):
print(f'→ Na partida {contador + 1} fez {jogador["gols"][contador]} gols')
print(f'Foi um total de {jogador["total"]} gols')
| 34.032258
| 114
| 0.686256
|
cafbee031573ee989a340babebc18faaa1596f70
| 7,401
|
py
|
Python
|
t3f/approximate.py
|
towadroid/t3f
|
1ce4f6985037a6aac34dd64fbe68d75a9dc1474f
|
[
"MIT"
] | 217
|
2017-01-19T17:56:28.000Z
|
2022-03-04T08:05:55.000Z
|
t3f/approximate.py
|
Dean-Go-kr/t3f
|
b9694eb2aa3ada4e87b5e5ea6027770d669ce1ff
|
[
"MIT"
] | 155
|
2017-03-06T11:03:18.000Z
|
2022-03-21T17:52:54.000Z
|
t3f/approximate.py
|
Dean-Go-kr/t3f
|
b9694eb2aa3ada4e87b5e5ea6027770d669ce1ff
|
[
"MIT"
] | 61
|
2017-03-07T06:24:16.000Z
|
2022-03-29T09:17:23.000Z
|
import itertools
import numpy as np
import tensorflow as tf
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import decompositions
from t3f import batch_ops
def add_n(tt_objects, max_tt_rank, name='t3f_approximate_add_n'):
"""Adds a bunch of TT-object and round after each summation.
This version implements a slow-to-compile but fast-to-execute (at least on
a GPU) version: summing in a binary tree order.
I.e. it uses the following idea:
round(a + b + c + d) ~= round(round(a + b) + round(c + d))
and so is able to compute the answer in log(N) parallel adds/rounds.
Args:
tt_objects: a list of `TensorTrainBase` objects.
max_tt_rank: a number, TT-rank for each individual rounding.
name: string, name of the Op.
Returns:
Object of the same type as each input.
See Also:
t3f.approximate.reduce_sum_batch
"""
list_of_cores_lists = [tt.tt_cores for tt in tt_objects]
all_cores = tuple(itertools.chain.from_iterable(list_of_cores_lists))
with tf.name_scope(name):
prev_level = tt_objects
while len(prev_level) > 1:
next_level = []
for i in range(0, len(prev_level), 2):
curr = prev_level[i]
if i + 1 < len(prev_level):
curr = decompositions.round(curr + prev_level[i + 1], max_tt_rank)
next_level.append(curr)
prev_level = next_level
return prev_level[0]
def reduce_sum_batch(tt_batch, max_tt_rank, coef=None,
name='t3f_approximate_reduce_sum_batch'):
"""Sum of all TT-objects in the batch with rounding after each summation.
This version implements a slow-to-compile but fast-to-execute (at least on
a GPU) version: summing in a binary tree order.
I.e. it uses the following idea:
round(a + b + c + d) ~= round(round(a + b) + round(c + d))
and so is able to compute the answer in log(batch_size) parallel adds/rounds.
Args:
tt_batch: `TensorTrainBatch` object.
max_tt_rank: a number, TT-rank for each individual rounding.
coef: tf.Tensor, its shape is either batch_size, or batch_size x N.
If coef is a vecotor of size batch_size, the result will
be (approximate) weighted sum.
If coef is a matrix of shape batch_size x N, the result will be
a `TensorTrainBatch` res containing N TT-object such that
res[j] ~= sum_i tt_batch[i] coef[i, j]
name: string, name of the Op.
Returns:
If coefficients are absent or is a vector of numbers, returns
a `TensorTrain` object representing (approximate) element-wise sum of all
the objects in the batch, weighted if coef is provided.
If coefficients is a matrix, returns `TensorTrainBatch`.
See Also:
t3f.approximate.add_n
"""
ndims = tt_batch.ndims()
left_tt_rank_dim = tt_batch.left_tt_rank_dim
right_tt_rank_dim = tt_batch.right_tt_rank_dim
shape = tt_batch.get_raw_shape()
dtype = tt_batch.dtype
all_tensors = tt_batch.tt_cores
if coef is not None:
all_tensors += (coef, )
with tf.name_scope(name):
is_batch_output = False
if coef is not None:
coef = tf.convert_to_tensor(coef, dtype=tt_batch.dtype)
if len(coef.get_shape()) == 1:
tt_batch = batch_ops.multiply_along_batch_dim(tt_batch, coef)
elif len(coef.get_shape()) == 2:
is_batch_output = True
output_size = coef.get_shape().as_list()[1]
# Coef is of size batch_size x N, need to duplicate the batch
# dimension xN.
if coef.shape[0] != tt_batch.batch_size:
raise ValueError('If coef is a matrix, it should be of shape '
'batch_size x N, got %d x %d instead '
'(batch size is %d).' % (coef.shape[0], coef.shape[1],
tt_batch.batch_size))
tt_batch_cores = []
for core_idx in range(ndims):
curr_core = tt_batch.tt_cores[core_idx]
curr_shape = curr_core.get_shape().as_list()
new_shape = np.insert(curr_shape, 1, 1)
tiling = np.ones(len(new_shape), dtype=int)
tiling[1] = output_size
curr_core = tf.tile(tf.reshape(curr_core, new_shape), tiling)
if core_idx == 0:
# Multiply the first TT-core by the provided coefficients.
# TODO: add t3f.utils.expands_dims_like(coef, curr_core)
shaped_coef = coef
for _ in range(len(curr_core.get_shape()) - len(coef.shape)):
shaped_coef = tf.expand_dims(shaped_coef, -1)
curr_core = curr_core * shaped_coef
# Merge the first two dimensions back into one.
raveled_shape = np.array(curr_shape).copy()
raveled_shape[0] *= output_size
curr_core = tf.reshape(curr_core, raveled_shape)
tt_batch_cores.append(curr_core)
tt_batch = TensorTrainBatch(tt_batch_cores, shape,
tt_batch.get_tt_ranks())
else:
raise ValueError('Coef cannot be more than 2-d.')
if not is_batch_output:
output_size = 1
prev_level = tt_batch
while prev_level.batch_size > output_size:
current_level_cores = []
for core_idx in range(ndims):
curr_orig_core = prev_level.tt_cores[core_idx]
if is_batch_output:
# Split the first dimension into batch_size x N
unraveled_shape = curr_orig_core.get_shape().as_list()
unraveled_shape = np.array(unraveled_shape).copy()
unraveled_shape[0] /= output_size
unraveled_shape = np.insert(unraveled_shape, 1, output_size)
curr_orig_core = tf.reshape(curr_orig_core, unraveled_shape)
a_core = curr_orig_core[::2]
b_core = curr_orig_core[1::2]
if a_core.get_shape()[0] > b_core.get_shape()[0]:
# Odd number of elements in the batch, will have to add dummy
# TT-object with the tt-cores filled with zeros.
zeros_shape = b_core.get_shape().as_list()
zeros_shape[0] = 1
zeros = tf.zeros(zeros_shape, dtype)
b_core = tf.concat((b_core, zeros), axis=0)
if is_batch_output:
# Merge the first two dimensions back into one.
a_core_shape = a_core.get_shape().as_list()
a_core_shape[0] = a_core_shape[0] * a_core_shape[1]
a_core_shape = np.delete(a_core_shape, 1)
a_core = tf.reshape(a_core, a_core_shape)
b_core_shape = b_core.get_shape().as_list()
b_core_shape[0] = b_core_shape[0] * b_core_shape[1]
b_core_shape = np.delete(b_core_shape, 1)
b_core = tf.reshape(b_core, b_core_shape)
if core_idx == 0:
curr_sum_core = tf.concat((a_core, b_core), axis=right_tt_rank_dim)
elif core_idx == ndims - 1:
curr_sum_core = tf.concat((a_core, b_core), axis=left_tt_rank_dim)
else:
zeros = tf.zeros(b_core.get_shape(), dtype)
upper = tf.concat((a_core, zeros), axis=right_tt_rank_dim)
lower = tf.concat((zeros, b_core), axis=right_tt_rank_dim)
curr_sum_core = tf.concat((upper, lower), axis=left_tt_rank_dim)
current_level_cores.append(curr_sum_core)
current_level = TensorTrainBatch(current_level_cores, shape)
prev_level = decompositions.round(current_level, max_tt_rank)
if is_batch_output:
return prev_level
else:
return prev_level[0]
| 41.346369
| 81
| 0.652615
|
68b1198125d7b425d2e9a177623ae9658ce6deb9
| 2,204
|
py
|
Python
|
src/darjeeling/coverage/coveragepy.py
|
UC-Mind-Lab/Darjeeling
|
61dc4fd3c62f43bb7fcfafce6f964f82144e293e
|
[
"Apache-2.0"
] | null | null | null |
src/darjeeling/coverage/coveragepy.py
|
UC-Mind-Lab/Darjeeling
|
61dc4fd3c62f43bb7fcfafce6f964f82144e293e
|
[
"Apache-2.0"
] | 1
|
2021-04-16T16:53:05.000Z
|
2021-07-23T15:21:52.000Z
|
src/darjeeling/coverage/coveragepy.py
|
UC-Mind-Lab/Darjeeling
|
61dc4fd3c62f43bb7fcfafce6f964f82144e293e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = ('CoveragePyCollector', 'CoveragePyCollectorConfig')
from typing import Any, ClassVar, Dict, Mapping, Optional, Set, FrozenSet
import json
import typing
import attr
from .collector import CoverageCollector, CoverageCollectorConfig
from ..core import FileLineSet
if typing.TYPE_CHECKING:
from ..container import ProgramContainer
from ..environment import Environment
from ..program import ProgramDescription
@attr.s(frozen=True)
class CoveragePyCollectorConfig(CoverageCollectorConfig):
NAME: ClassVar[str] = 'coverage.py'
@classmethod
def from_dict(cls,
dict_: Mapping[str, Any],
dir_: Optional[str] = None
) -> 'CoverageCollectorConfig':
assert dict_['type'] == cls.NAME
return CoveragePyCollectorConfig()
def build(self,
environment: 'Environment',
program: 'ProgramDescription',
ban_files: Optional[FrozenSet[str]]
) -> 'CoverageCollector':
return CoveragePyCollector(program=program)
@attr.s(frozen=True, slots=True, auto_attribs=True)
class CoveragePyCollector(CoverageCollector):
program: 'ProgramDescription'
def _read_report_json(self, json_: Mapping[str, Any]) -> FileLineSet:
filename_to_lines: Dict[str, Set[int]] = {}
filename_to_json_report = json_['files']
for filename, file_json in filename_to_json_report.items():
filename_to_lines[filename] = set(file_json['executed_lines'])
return FileLineSet(filename_to_lines)
def _read_report_text(self, text: str) -> FileLineSet:
json_ = json.loads(text)
return self._read_report_json(json_)
def _extract(self, container: 'ProgramContainer') -> FileLineSet:
files = container.filesystem
shell = container.shell
temporary_filename = files.mktemp()
command = (f'coverage json -o {temporary_filename} '
'--omit="tests/* && coverage erase"')
shell.check_call(command, cwd=self.program.source_directory)
report_text = files.read(temporary_filename)
return self._read_report_text(report_text)
| 34.984127
| 74
| 0.678312
|
4939ca68b9e18792c6f2aeb8b6247720c917ac7e
| 4,120
|
py
|
Python
|
tests/test_multidimensional_problem.py
|
dayyass/calculus_of_variations
|
d016c4824bfb5595569b156fd38f2a841c92d3ec
|
[
"Apache-2.0"
] | 10
|
2020-11-16T21:29:31.000Z
|
2021-09-05T15:02:55.000Z
|
tests/test_multidimensional_problem.py
|
dayyass/calculus_of_variations
|
d016c4824bfb5595569b156fd38f2a841c92d3ec
|
[
"Apache-2.0"
] | 15
|
2020-12-17T19:28:26.000Z
|
2021-08-06T12:01:22.000Z
|
tests/test_multidimensional_problem.py
|
dayyass/calculus_of_variations
|
d016c4824bfb5595569b156fd38f2a841c92d3ec
|
[
"Apache-2.0"
] | 1
|
2021-02-12T17:39:12.000Z
|
2021-02-12T17:39:12.000Z
|
import sys
import unittest
from parameterized import parameterized_class
# TODO: fix it
sys.path.append("./")
from calculus_of_variations import MultidimensionalSolver
from calculus_of_variations.utils import E, I, exp, t
C1 = MultidimensionalSolver.C1
C2 = MultidimensionalSolver.C2
C3 = MultidimensionalSolver.C3
C4 = MultidimensionalSolver.C4
def make_solution(L: str, t0: str, t1: str, x1_0: str, x1_1: str, x2_0: str, x2_1: str):
solution = MultidimensionalSolver(
L=L, t0=t0, t1=t1, x1_0=x1_0, x1_1=x1_1, x2_0=x2_0, x2_1=x2_1
)
solution.solve(verbose=False)
return solution
test_case_1 = {
"solution": make_solution(
L="x1_diff**2 + x2_diff**2",
t0="0",
t1="1",
x1_0="0",
x1_1="1",
x2_0="0",
x2_1="1",
),
"general_solution_1": C1 + C2 * t,
"general_solution_2": C3 + C4 * t,
"coefficients": {C1: 0, C2: 1, C3: 0, C4: 1},
"particular_solution_1": t,
"particular_solution_2": t,
"extrema_value": 2,
}
test_case_2 = {
"solution": make_solution(
L="x1_diff ** 2 + x2_diff ** 2",
t0="0",
t1="1",
x1_0="0",
x1_1="1",
x2_0="1",
x2_1="E",
),
"general_solution_1": C1 + C2 * t,
"general_solution_2": C3 + C4 * t,
"coefficients": {C1: 0, C2: 1, C3: 1, C4: E - 1},
"particular_solution_1": t,
"particular_solution_2": t * (E - 1) + 1,
"extrema_value": 1 + (E - 1) ** 2,
}
test_case_3 = {
"solution": make_solution(
L="x2 ** 2 + x1_diff ** 2 + x2_diff ** 2",
t0="0",
t1="1",
x1_0="0",
x1_1="1",
x2_0="1",
x2_1="E",
),
"general_solution_1": C1 + C2 * t,
"general_solution_2": C3 * exp(-t) + C4 * exp(t),
"coefficients": {C1: 0, C2: 1, C3: 0, C4: 1},
"particular_solution_1": t,
"particular_solution_2": exp(t),
"extrema_value": exp(2),
}
test_case_4 = {
"solution": make_solution(
L="x1_diff * x2_diff - x1 * x2",
t0="0",
t1="pi / 2",
x1_0="0",
x1_1="1",
x2_0="1",
x2_1="0",
),
"general_solution_1": 2 * I * C1 * exp(I * t) + 2 * I * C2 * exp(-I * t),
"general_solution_2": C3 * exp(I * t) + C4 * exp(-I * t),
"coefficients": {C1: -1 / 4, C2: 1 / 4, C3: 1 / 2, C4: 1 / 2},
"particular_solution_1": -I * exp(I * t) / 2 + I * exp(-I * t) / 2,
"particular_solution_2": exp(I * t) / 2 + exp(-I * t) / 2,
"extrema_value": -1,
}
test_case_5 = {
"solution": make_solution(
L="2 * x1 + x2 ** 2 + x1_diff ** 2 + x2_diff ** 2",
t0="0",
t1="1",
x1_0="0",
x1_1="0.5",
x2_0="1",
x2_1="exp(-1)",
),
"general_solution_1": C1 + C2 * t + t ** 2 / 2,
"general_solution_2": C3 * exp(-t) + C4 * exp(t),
"coefficients": {C1: 0, C2: 0, C3: 1, C4: 0},
"particular_solution_1": t ** 2 / 2,
"particular_solution_2": exp(-t),
"extrema_value": 5 / 3 - exp(-2),
}
@parameterized_class([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5])
class TestSolver(unittest.TestCase):
def test_general_solution_1(self):
self.assertAlmostEqual(
self.solution.general_solution_1, self.general_solution_1
)
def test_general_solution_2(self):
self.assertAlmostEqual(
self.solution.general_solution_2, self.general_solution_2
)
def test_coefficients(self):
for coef in self.coefficients.keys():
self.assertAlmostEqual(
self.solution.coefficients[coef], self.coefficients[coef]
)
def test_particular_solution_1(self):
self.assertAlmostEqual(
self.solution.particular_solution_1, self.particular_solution_1
)
def test_particular_solution_2(self):
self.assertAlmostEqual(
self.solution.particular_solution_2, self.particular_solution_2
)
def test_extrema_value(self):
self.assertAlmostEqual(self.solution.extrema_value, self.extrema_value)
if __name__ == "__main__":
unittest.main()
| 28.219178
| 88
| 0.571359
|
7ea68435dfadf5ce8fad3a38ff9f1920cbbf0587
| 95,693
|
py
|
Python
|
testsuite/driver/testlib.py
|
nrdmn/ghc-1
|
26a928b8fdb1b4ccb75e8edb620b8cf12cb38621
|
[
"BSD-3-Clause"
] | 1
|
2022-01-02T23:00:33.000Z
|
2022-01-02T23:00:33.000Z
|
testsuite/driver/testlib.py
|
abolfazl11fdfdf/ghc
|
bd877edd9499a351db947cd51ed583872b2facdf
|
[
"BSD-3-Clause"
] | null | null | null |
testsuite/driver/testlib.py
|
abolfazl11fdfdf/ghc
|
bd877edd9499a351db947cd51ed583872b2facdf
|
[
"BSD-3-Clause"
] | 2
|
2021-10-11T09:59:57.000Z
|
2022-01-02T22:59:50.000Z
|
# coding=utf8
#
# (c) Simon Marlow 2002
#
import io
import shutil
import os
import re
import traceback
import time
import datetime
import copy
import glob
import sys
from math import ceil, trunc
from pathlib import Path, PurePath
import collections
import subprocess
from testglobals import config, ghc_env, default_testopts, brokens, t, \
TestRun, TestResult, TestOptions, PerfMetric
from testutil import strip_quotes, lndir, link_or_copy_file, passed, \
failBecause, testing_metrics, \
PassFail, memoize
from term_color import Color, colored
import testutil
from cpu_features import have_cpu_feature
import perf_notes as Perf
from perf_notes import MetricChange, PerfStat, MetricOracles
extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
from my_typing import *
global pool_sema
if config.use_threads:
import threading
pool_sema = threading.BoundedSemaphore(value=config.threads)
global wantToStop
wantToStop = False
# I have no idea what the type of this is
global thisdir_settings
thisdir_settings = None # type: ignore
def stopNow() -> None:
global wantToStop
wantToStop = True
def stopping() -> bool:
return wantToStop
_all_ways = None
def get_all_ways() -> Set[WayName]:
global _all_ways
if _all_ways is None:
_all_ways = set(config.way_flags.keys())
return _all_ways
# Options valid for the current test only (these get reset to
# testdir_testopts after each test).
global testopts_local
if config.use_threads:
testopts_local = threading.local()
else:
class TestOpts_Local:
pass
testopts_local = TestOpts_Local() # type: ignore
def getTestOpts() -> TestOptions:
return testopts_local.x
def setLocalTestOpts(opts: TestOptions) -> None:
global testopts_local
testopts_local.x = opts
def isCompilerStatsTest() -> bool:
opts = getTestOpts()
return bool(opts.is_compiler_stats_test)
def isStatsTest() -> bool:
opts = getTestOpts()
return opts.is_stats_test
# This can be called at the top of a file of tests, to set default test options
# for the following tests.
def setTestOpts( f ):
global thisdir_settings
thisdir_settings = [thisdir_settings, f]
# -----------------------------------------------------------------------------
# Canned setup functions for common cases. eg. for a test you might say
#
# test('test001', normal, compile, [''])
#
# to run it without any options, but change it to
#
# test('test001', expect_fail, compile, [''])
#
# to expect failure for this test.
#
# type TestOpt = (name :: String, opts :: Object) -> IO ()
def normal( name, opts ):
return;
def skip( name, opts ):
opts.skip = True
def expect_fail( name, opts ):
# The compiler, testdriver, OS or platform is missing a certain
# feature, and we don't plan to or can't fix it now or in the
# future.
opts.expect = 'fail';
def no_lint( name, opts ):
"""Disable Core, STG and Cmm lints. Useful when testing compiler perf."""
opts.compiler_always_flags = \
[opt for opt in opts.compiler_always_flags \
if opt not in ['-dcore-lint', '-dstg-lint', '-dcmm-lint']]
def reqlib( lib ):
return lambda name, opts, l=lib: _reqlib (name, opts, l )
def stage1(name, opts):
# See Note [Why is there no stage1 setup function?]
framework_fail(name, 'stage1 setup function does not exist',
'add your test to testsuite/tests/stage1 instead')
# Note [Why is there no stage1 setup function?]
#
# Presumably a stage1 setup function would signal that the stage1
# compiler should be used to compile a test.
#
# Trouble is, the path to the compiler + the `ghc --info` settings for
# that compiler are currently passed in from the `make` part of the
# testsuite driver.
#
# Switching compilers in the Python part would be entirely too late, as
# all ghc_with_* settings would be wrong. See config/ghc for possible
# consequences (for example, config.run_ways would still be
# based on the default compiler, quite likely causing ./validate --slow
# to fail).
#
# It would be possible to let the Python part of the testsuite driver
# make the call to `ghc --info`, but doing so would require quite some
# work. Care has to be taken to not affect the run_command tests for
# example, as they also use the `ghc --info` settings:
# quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
#
# If you want a test to run using the stage1 compiler, add it to the
# testsuite/tests/stage1 directory. Validate runs the tests in that
# directory with `make stage=1`.
# Cache the results of looking to see if we have a library or not.
# This makes quite a difference, especially on Windows.
have_lib_cache = {} # type: Dict[str, bool]
def have_library(lib: str) -> bool:
""" Test whether the given library is available """
if lib in have_lib_cache:
got_it = have_lib_cache[lib]
else:
cmd = strip_quotes(config.ghc_pkg)
cmd_line = [cmd, '--no-user-package-db']
for db in config.test_package_db:
cmd_line.append("--package-db="+db)
cmd_line.extend(['describe', lib])
print(cmd_line)
p = subprocess.Popen(cmd_line,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ghc_env)
# read from stdout and stderr to avoid blocking due to
# buffers filling
p.communicate()
r = p.wait()
got_it = r == 0
have_lib_cache[lib] = got_it
return got_it
def _reqlib( name, opts, lib ):
if not have_library(lib):
opts.expect = 'missing-lib'
else:
opts.extra_hc_opts = opts.extra_hc_opts + ' -package ' + lib + ' '
for db in config.test_package_db:
opts.extra_hc_opts = opts.extra_hc_opts + ' -package-db=' + db + ' '
def req_haddock( name, opts ):
if not config.haddock:
opts.expect = 'missing-lib'
def req_profiling( name, opts ):
'''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
if not config.have_profiling:
opts.expect = 'fail'
def req_shared_libs( name, opts ):
if not config.have_shared_libs:
opts.expect = 'fail'
def req_interp( name, opts ):
if not config.have_interp:
opts.expect = 'fail'
def req_rts_linker( name, opts ):
if not config.have_RTS_linker:
opts.expect = 'fail'
def req_th( name, opts ):
"""
Mark a test as requiring TemplateHaskell. In addition to having interpreter
support, currently this means that we don't run the test in the profasm when
when GHC is dynamically-linked since we can't load profiled objects in this
case.
"""
req_interp(name, opts)
if ghc_dynamic():
return _omit_ways(name, opts, ['profasm', 'profthreaded'])
def req_smp( name, opts ):
if not config.have_smp:
opts.expect = 'fail'
def ignore_stdout(name, opts):
opts.ignore_stdout = True
def ignore_stderr(name, opts):
opts.ignore_stderr = True
def combined_output( name, opts ):
opts.combined_output = True
def use_specs( specs ):
"""
use_specs allows one to override files based on suffixes. e.g. 'stdout',
'stderr', 'asm', 'prof.sample', etc.
Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
prof002.stdout.
Full Example:
test('T5889', [only_ways(['normal']), req_profiling,
extra_files(['T5889/A.hs', 'T5889/B.hs']),
use_specs({'stdout' : 'prof002.stdout'})],
multimod_compile,
['A B', '-O -prof -fno-prof-count-entries -v0'])
"""
assert isinstance(specs, dict)
return lambda name, opts, s=specs: _use_specs( name, opts, s )
def _use_specs( name, opts, specs ):
opts.extra_files.extend(specs.values ())
opts.use_specs = specs
# -----
def _lint_ways(name: TestName, ways: List[WayName]) -> None:
""" Check that all of the ways in a list are valid. """
unknown_ways = [way
for way in get_all_ways()
if way not in get_all_ways()
]
if len(unknown_ways) > 0:
framework_fail(name, None, 'Unknown ways: %s' % (unknown_ways,))
def expect_fail_for( ways: List[WayName] ):
def helper( name: TestName, opts ):
_lint_ways(name, ways)
opts.expect_fail_for = ways
return helper
def expect_broken( bug: IssueNumber ):
"""
This test is a expected not to work due to the indicated issue number.
"""
def helper( name: TestName, opts ):
record_broken(name, opts, bug)
opts.expect = 'fail';
return helper
def expect_broken_for( bug: IssueNumber, ways: List[WayName] ):
def helper( name: TestName, opts ):
_lint_ways(name, ways)
record_broken(name, opts, bug)
opts.expect_fail_for = ways
return helper
def record_broken(name: TestName, opts, bug: IssueNumber):
me = (bug, opts.testdir, name)
if not me in brokens:
brokens.append(me)
def _expect_pass(way):
# Helper function. Not intended for use in .T files.
opts = getTestOpts()
return opts.expect == 'pass' and way not in opts.expect_fail_for
# -----
def fragile( bug: IssueNumber ):
"""
Indicates that failures of this test should be ignored due to fragility
documented in the given ticket.
"""
def helper( name, opts, bug=bug ):
record_broken(name, opts, bug)
opts.fragile_ways += config.way_flags.keys()
return helper
def fragile_for( bug: IssueNumber, ways: List[WayName] ):
"""
Indicates that failures of this test should be ignored due to fragility in
the given test ways as documented in the given ticket.
"""
def helper( name: TestName, opts ):
_lint_ways(name, ways)
record_broken(name, opts, bug)
opts.fragile_ways += ways
return helper
# -----
def omit_ways( ways: List[WayName] ):
return lambda name, opts: _omit_ways(name, opts, ways)
def _omit_ways( name: TestName, opts, ways: List[WayName] ):
_lint_ways(name, ways)
opts.omit_ways += ways
# -----
def only_ways( ways: List[WayName] ):
def helper( name: TestName, opts ):
_lint_ways(name, ways)
opts.only_ways = ways
return helper
# -----
def extra_ways( ways: List[WayName] ):
def helper( name: TestName, opts ):
_lint_ways(name, ways)
opts.extra_ways = ways
return helper
# -----
def set_stdin( file ):
return lambda name, opts, f=file: _set_stdin(name, opts, f);
def _set_stdin( name, opts, f ):
opts.stdin = f
# -----
def exit_code( val: int ):
return lambda name, opts, v=val: _exit_code(name, opts, v);
def _exit_code( name, opts, v ):
opts.exit_code = v
def signal_exit_code( val: int ):
if opsys('solaris2'):
return exit_code( val )
else:
# When application running on Linux receives fatal error
# signal, then its exit code is encoded as 128 + signal
# value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
# I assume that Mac OS X behaves in the same way at least Mac
# OS X builder behavior suggests this.
return exit_code( val+128 )
# -----
def compile_timeout_multiplier( val: float ):
return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
def _compile_timeout_multiplier( name, opts, v ):
opts.compile_timeout_multiplier = v
def run_timeout_multiplier( val: float ):
return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
def _run_timeout_multiplier( name, opts, v ):
opts.run_timeout_multiplier = v
# -----
def extra_run_opts( val ):
return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
def _extra_run_opts( name, opts, v ):
opts.extra_run_opts += " " + v
# -----
def extra_hc_opts( val ):
return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
def _extra_hc_opts( name, opts, v ):
opts.extra_hc_opts += " " + v
# -----
def extra_clean( files ):
# TODO. Remove all calls to extra_clean.
return lambda _name, _opts: None
def extra_files(files):
return lambda name, opts: _extra_files(name, opts, files)
def _extra_files(name, opts, files):
opts.extra_files.extend(files)
# -----
# Defaults to "test everything, and only break on extreme cases"
#
# The inputs to this function are slightly interesting:
# metric can be either:
# - 'all', in which case all 3 possible metrics are collected and compared.
# - The specific metric one wants to use in the test.
# - A list of the metrics one wants to use in the test.
#
# Deviation defaults to 20% because the goal is correctness over performance.
# The testsuite should avoid breaking when there is not an actual error.
# Instead, the testsuite should notify of regressions in a non-breaking manner.
#
# collect_compiler_stats is used when the metrics collected are about the compiler.
# collect_stats is used in the majority case when the metrics to be collected
# are about the performance of the runtime code generated by the compiler.
def collect_compiler_stats(metric='all',deviation=20):
return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
def collect_stats(metric='all', deviation=20):
return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
# This is an internal function that is used only in the implementation.
# 'is_compiler_stats_test' is somewhat of an unfortunate name.
# If the boolean is set to true, it indicates that this test is one that
# measures the performance numbers of the compiler.
# As this is a fairly rare case in the testsuite, it defaults to false to
# indicate that it is a 'normal' performance test.
def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_test=False):
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
failBecause('This test has an invalid name.')
# Normalize metrics to a list of strings.
if isinstance(metrics, str):
if metrics == 'all':
metrics = testing_metrics()
else:
metrics = [metrics]
opts.is_stats_test = True
if is_compiler_stats_test:
opts.is_compiler_stats_test = True
tag = 'compile_time'
else:
tag = 'runtime'
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if config.compiler_debugged and is_compiler_stats_test:
opts.skip = True
for metric_name in metrics:
metric = '{}/{}'.format(tag, metric_name)
def baselineByWay(way, target_commit, metric=metric):
return Perf.baseline_metric( \
target_commit, name, config.test_env, metric, way, \
config.baseline_commit )
opts.stats_range_fields[metric] = MetricOracles(baseline=baselineByWay,
deviation=deviation)
# -----
def when(b: bool, f):
# When list_brokens is on, we want to see all expect_broken calls,
# so we always do f
if b or config.list_broken:
return f
else:
return normal
def unless(b: bool, f):
return when(not b, f)
def doing_ghci() -> bool:
return 'ghci' in config.run_ways
def ghc_dynamic() -> bool:
return config.ghc_dynamic
# Symbols have a leading underscore
def leading_underscore() -> bool:
return config.leading_underscore
def fast() -> bool:
return config.speed == 2
def platform( plat: str ) -> bool:
return config.platform == plat
KNOWN_OPERATING_SYSTEMS = set([
'mingw32',
'freebsd',
'openbsd',
'aix',
'linux',
'darwin',
'solaris2',
])
def opsys( os: str ) -> bool:
assert os in KNOWN_OPERATING_SYSTEMS
return config.os == os
def arch( arch: str ) -> bool:
return config.arch == arch
def wordsize( ws: int ) -> bool:
return config.wordsize == str(ws)
def msys( ) -> bool:
return config.msys
def cygwin( ) -> bool:
return config.cygwin
def have_vanilla( ) -> bool:
return config.have_vanilla
def have_ncg( ) -> bool:
return config.have_ncg
def have_dynamic( ) -> bool:
return config.have_dynamic
def have_profiling( ) -> bool:
return config.have_profiling
def in_tree_compiler( ) -> bool:
return config.in_tree_compiler
def unregisterised( ) -> bool:
return config.unregisterised
def compiler_profiled( ) -> bool:
return config.compiler_profiled
def compiler_debugged( ) -> bool:
return config.compiler_debugged
def have_gdb( ) -> bool:
return config.have_gdb
def have_readelf( ) -> bool:
return config.have_readelf
def have_fast_bignum( ) -> bool:
return config.have_fast_bignum
def have_slow_bignum( ) -> bool:
return not(have_fast_bignum())
def llvm_build ( ) -> bool:
return config.ghc_built_by_llvm
def have_thread_sanitizer( ) -> bool:
return config.have_thread_sanitizer
# ---
# Note [Measuring residency]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Residency (peak_megabytes_allocated and max_bytes_used) is sensitive
# to when the major GC runs, which makes it inherently inaccurate.
# Sometime an innocuous change somewhere can shift things around such
# that the samples occur at a different time, and the residency
# appears to change (up or down) when the underlying profile hasn't
# really changed. To further minimize this effect we run with a single
# generation (meaning we get a residency sample on every GC) with a small
# allocation area (as suggested in #17387). That's what +RTS -h -i0 will do.
# If you find that a test is flaky, sampling frequency can be adjusted by
# shrinking the allocation area (+RTS -A64k, for example).
#
# However, please don't just ignore changes in residency. If you see
# a change in one of these figures, please check whether it is real or
# not as follows:
#
# * Run the test with old and new compilers, adding +RTS -h -i0.001
# (you don't need to compile anything for profiling or enable profiling
# libraries to get a heap profile).
# * view the heap profiles, read off the maximum residency. If it has
# really changed, then you know there's an issue.
RESIDENCY_OPTS = '+RTS -A256k -i0 -h -RTS'
# See Note [Measuring residency].
def collect_runtime_residency(tolerance_pct: float):
return [
collect_stats(['peak_megabytes_allocated', 'max_bytes_used'], tolerance_pct),
extra_run_opts(RESIDENCY_OPTS),
# The nonmoving collector does not support -G1
omit_ways([WayName(name) for name in ['nonmoving', 'nonmoving_thr', 'nonmoving_thr_ghc']])
]
# See Note [Measuring residency].
def collect_compiler_residency(tolerance_pct: float):
return [
collect_compiler_stats(['peak_megabytes_allocated', 'max_bytes_used'], tolerance_pct),
extra_hc_opts(RESIDENCY_OPTS),
# The nonmoving collector does not support -G1
omit_ways([WayName('nonmoving_thr_ghc')])
]
# ---
def high_memory_usage(name, opts):
opts.alone = True
# ThreadSanitizer significantly increases memory footprint; skip
if have_thread_sanitizer():
opts.skip = True
# If a test is for a multi-CPU race, then running the test alone
# increases the chance that we'll actually see it.
def multi_cpu_race(name, opts):
opts.alone = True
# ---
def literate( name, opts ):
opts.literate = True
def c_src( name, opts ):
opts.c_src = True
def objc_src( name, opts ):
opts.objc_src = True
def objcpp_src( name, opts ):
opts.objcpp_src = True
def cmm_src( name, opts ):
opts.cmm_src = True
def outputdir( odir ):
return lambda name, opts, d=odir: _outputdir(name, opts, d)
def _outputdir( name, opts, odir ):
opts.outputdir = odir;
# ----
def pre_cmd( cmd ):
return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
def _pre_cmd( name, opts, cmd ):
opts.pre_cmd = cmd
# ----
def cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
def _cmd_prefix( name, opts, prefix ):
opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
# ----
def cmd_wrapper( fun ):
return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
def _cmd_wrapper( name, opts, fun ):
opts.cmd_wrapper = fun
# ----
def compile_cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
def _compile_cmd_prefix( name, opts, prefix ):
opts.compile_cmd_prefix = prefix
# ----
def check_stdout( f ):
return lambda name, opts, f=f: _check_stdout(name, opts, f)
def _check_stdout( name, opts, f ):
opts.check_stdout = f
def no_check_hp(name, opts):
opts.check_hp = False
# ----
def filter_stdout_lines( regex ):
""" Filter lines of stdout with the given regular expression """
def f( name, opts ):
_normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
return f
def normalise_slashes( name, opts ):
_normalise_fun(name, opts, normalise_slashes_)
def normalise_exe( name, opts ):
_normalise_fun(name, opts, normalise_exe_)
def normalise_fun( *fs ):
return lambda name, opts: _normalise_fun(name, opts, fs)
def _normalise_fun( name, opts, *fs ):
opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
def normalise_errmsg_fun( *fs ):
return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
def _normalise_errmsg_fun( name, opts, *fs ):
opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
def check_errmsg(needle):
def norm(str):
if needle in str:
return "%s contained in -ddump-simpl\n" % needle
else:
return "%s not contained in -ddump-simpl\n" % needle
return normalise_errmsg_fun(norm)
def grep_errmsg(needle):
def norm(str):
return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
return normalise_errmsg_fun(norm)
def normalise_whitespace_fun(f):
return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
def _normalise_whitespace_fun(name, opts, f):
opts.whitespace_normaliser = f
def normalise_win32_io_errors(name, opts):
"""
On Windows we currently have two IO manager implementations: both WinIO IO
manager and the old POSIX-emulated implementation. These currently differ
slightly in the error messages that they provide. Normalise these
differences away, preferring the new WinIO errors.
This normalization can be dropped when the old IO manager is removed.
"""
SUBS = [
('Bad file descriptor', 'The handle is invalid.'),
('Permission denied', 'Access is denied.'),
('No such file or directory', 'The system cannot find the file specified.'),
]
def normalizer(s: str) -> str:
for old,new in SUBS:
s = s.replace(old, new)
return s
if opsys('mingw32'):
_normalise_fun(name, opts, normalizer)
_normalise_errmsg_fun(name, opts, normalizer)
def normalise_version_( *pkgs ):
def normalise_version__( str ):
return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
'\\1-<VERSION>', str)
return normalise_version__
def normalise_version( *pkgs ):
def normalise_version__( name, opts ):
_normalise_fun(name, opts, normalise_version_(*pkgs))
_normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
return normalise_version__
def normalise_drive_letter(name, opts):
# Windows only. Change D:\\ to C:\\.
_normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
def keep_prof_callstacks(name, opts):
"""Keep profiling callstacks.
Use together with `only_ways(prof_ways)`.
"""
opts.keep_prof_callstacks = True
def join_normalisers(*a):
"""
Compose functions, flattening sequences.
join_normalisers(f1,[f2,f3],f4)
is the same as
lambda x: f1(f2(f3(f4(x))))
"""
def flatten(l):
"""
Taken from http://stackoverflow.com/a/2158532/946226
"""
for el in l:
if (isinstance(el, collections.Iterable)
and not isinstance(el, (bytes, str))):
for sub in flatten(el):
yield sub
else:
yield el
a = flatten(a)
fn = lambda x:x # identity function
for f in a:
assert callable(f)
fn = lambda x,f=f,fn=fn: fn(f(x))
return fn
# ----
# Function for composing two opt-fns together
def executeSetups(fs, name, opts):
if type(fs) is list:
# If we have a list of setups, then execute each one
for f in fs:
executeSetups(f, name, opts)
else:
# fs is a single function, so just apply it
fs(name, opts)
# -----------------------------------------------------------------------------
# The current directory of tests
def newTestDir(tempdir, dir):
global thisdir_settings
# reset the options for this test directory
def settings(name, opts, tempdir=tempdir, dir=dir):
return _newTestDir(name, opts, tempdir, dir)
thisdir_settings = settings
# Should be equal to entry in toplevel .gitignore.
testdir_suffix = '.run'
def _newTestDir(name: TestName, opts: TestOptions, tempdir, dir):
testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
opts.srcdir = Path.cwd() / dir
opts.testdir = Path(os.path.join(tempdir, testdir, name + testdir_suffix))
opts.compiler_always_flags = config.compiler_always_flags
# -----------------------------------------------------------------------------
# Actually doing tests
parallelTests = []
aloneTests = []
allTestNames = set([]) # type: Set[TestName]
def runTest(watcher, opts, name: TestName, func, args):
if config.use_threads:
pool_sema.acquire()
t = threading.Thread(target=test_common_thread,
name=name,
args=(watcher, name, opts, func, args))
t.daemon = False
t.start()
else:
test_common_work(watcher, name, opts, func, args)
# name :: String
# setup :: [TestOpt] -> IO ()
def test(name: TestName,
setup: "Callable[[List[TestOptions]], None]",
func, args) -> None:
global aloneTests
global parallelTests
global allTestNames
global thisdir_settings
if name in allTestNames:
framework_fail(name, None, 'There are multiple tests with this name')
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
framework_fail(name, None, 'This test has an invalid name')
if config.run_only_some_tests:
if name not in config.only:
return
else:
# Note [Mutating config.only]
# config.only is initially the set of tests requested by
# the user (via 'make TEST='). We then remove all tests that
# we've already seen (in .T files), so that we can later
# report on any tests we couldn't find and error out.
config.only.remove(name)
# Make a deep copy of the default_testopts, as we need our own copy
# of any dictionaries etc inside it. Otherwise, if one test modifies
# them, all tests will see the modified version!
myTestOpts = copy.deepcopy(default_testopts)
executeSetups([thisdir_settings, setup], name, myTestOpts)
if name in config.broken_tests:
myTestOpts.expect = 'fail'
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
aloneTests.append(thisTest)
else:
parallelTests.append(thisTest)
allTestNames.add(name)
if config.use_threads:
def test_common_thread(watcher, name, opts, func, args):
try:
test_common_work(watcher, name, opts, func, args)
finally:
pool_sema.release()
def get_package_cache_timestamp() -> float:
if config.package_conf_cache_file is None:
return 0.0
else:
try:
return config.package_conf_cache_file.stat().st_mtime
except:
return 0.0
do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
def test_common_work(watcher: testutil.Watcher,
name: TestName, opts,
func, args) -> None:
try:
t.total_tests += 1
setLocalTestOpts(opts)
package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
# All the ways we might run this test
if func == compile or func == multimod_compile:
all_ways = config.compile_ways
elif func in [compile_and_run, multi_compile_and_run, multimod_compile_and_run]:
all_ways = config.run_ways
elif func == ghci_script:
if WayName('ghci') in config.run_ways:
all_ways = [WayName('ghci')]
else:
all_ways = []
elif func in [makefile_test, run_command]:
# makefile tests aren't necessarily runtime or compile-time
# specific. Assume we can run them in all ways. See #16042 for what
# happened previously.
all_ways = config.compile_ways + config.run_ways
else:
all_ways = [WayName('normal')]
# A test itself can request extra ways by setting opts.extra_ways
all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
t.total_test_cases += len(all_ways)
only_ways = getTestOpts().only_ways
ok_way = lambda way: \
not getTestOpts().skip \
and (only_ways is None
or (only_ways is not None and way in only_ways)) \
and (config.cmdline_ways == [] or way in config.cmdline_ways) \
and (not (config.skip_perf_tests and isStatsTest())) \
and (not (config.only_perf_tests and not isStatsTest())) \
and way not in getTestOpts().omit_ways
# Which ways we are asked to skip
do_ways = list(filter (ok_way,all_ways))
# Only run all ways in slow mode.
# See Note [validate and testsuite speed] in toplevel Makefile.
if config.accept:
# Only ever run one way
do_ways = do_ways[:1]
elif config.speed > 0:
# However, if we EXPLICITLY asked for a way (with extra_ways)
# please test it!
explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
do_ways = other_ways[:1] + explicit_ways
# Find all files in the source directory that this test
# depends on. Do this only once for all ways.
# Generously add all filenames that start with the name of
# the test to this set, as a convenience to test authors.
# They will have to use the `extra_files` setup function to
# specify all other files that their test depends on (but
# this seems to be necessary for only about 10% of all
# tests).
files = set(f for f in os.listdir(str(opts.srcdir))
if f.startswith(name) and not f == name and
not f.endswith(testdir_suffix) and
not os.path.splitext(f)[1] in do_not_copy)
for filename in (opts.extra_files + extra_src_files.get(name, [])):
if filename.startswith('/'):
framework_fail(name, None,
'no absolute paths in extra_files please: ' + filename)
elif '*' in filename:
# Don't use wildcards in extra_files too much, as
# globbing is slow.
files.update(str(Path(f).relative_to(opts.srcdir))
for f in glob.iglob(str(in_srcdir(filename))))
elif filename:
files.add(filename)
else:
framework_fail(name, None, 'extra_file is empty string')
# Run the required tests...
for way in do_ways:
if stopping():
break
try:
do_test(name, way, func, args, files)
except KeyboardInterrupt:
stopNow()
except Exception as e:
traceback.print_exc()
framework_fail(name, way, traceback.format_exc())
t.n_tests_skipped += len(set(all_ways) - set(do_ways))
if config.cleanup and do_ways:
try:
cleanup()
except Exception as e:
framework_fail(name, None, 'Unhandled exception during cleanup: ' + str(e))
package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
framework_fail(name, None, 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
except Exception as e:
framework_fail(name, None, 'Unhandled exception: ' + str(e))
finally:
watcher.notify()
def do_test(name: TestName,
way: WayName,
func: Callable[..., PassFail],
args,
files: Set[str]
) -> None:
opts = getTestOpts()
full_name = name + '(' + way + ')'
progress_args = [ full_name, t.total_tests, len(allTestNames),
[len(t.unexpected_passes),
len(t.unexpected_failures),
len(t.framework_failures)]]
if_verbose(2, "=====> {0} {1} of {2} {3}".format(*progress_args))
# Update terminal title
# useful progress indicator even when make test VERBOSE=1
if config.supports_colors:
print("\033]0;{0} {1} of {2} {3}\007".format(*progress_args), end="")
sys.stdout.flush()
# Clean up prior to the test, so that we can't spuriously conclude
# that it passed on the basis of old run outputs.
cleanup()
os.makedirs(str(opts.testdir))
# Link all source files for this test into a new directory in
# /tmp, and run the test in that directory. This makes it
# possible to run tests in parallel, without modification, that
# would otherwise (accidentally) write to the same output file.
# It also makes it easier to keep the testsuite clean.
for extra_file in files:
src = in_srcdir(extra_file)
dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
if src.is_file():
link_or_copy_file(src, dst)
elif src.is_dir():
if dst.exists():
shutil.rmtree(str(dst))
dst.mkdir()
lndir(src, dst)
else:
if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
# When using a ghc built without haddock support, .t
# files are rightfully missing. Don't
# framework_fail. Test will be skipped later.
pass
else:
framework_fail(name, way,
'extra_file does not exist: ' + extra_file)
if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
# When running 'MAKE' make sure 'TOP' still points to the
# root of the testsuite.
src_makefile = in_srcdir('Makefile')
dst_makefile = in_testdir('Makefile')
if src_makefile.exists():
makefile = src_makefile.read_text(encoding='UTF-8')
makefile = re.sub('TOP=.*', 'TOP=%s' % config.top, makefile, 1)
dst_makefile.write_text(makefile, encoding='UTF-8')
if opts.pre_cmd:
exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
stderr = subprocess.STDOUT,
print_output = config.verbose >= 3)
# If user used expect_broken then don't record failures of pre_cmd
if exit_code != 0 and opts.expect not in ['fail']:
framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
result = func(*[name,way] + args)
if opts.expect not in ['pass', 'fail', 'missing-lib']:
framework_fail(name, way, 'bad expected ' + opts.expect)
try:
passFail = result.passFail
except (KeyError, TypeError):
passFail = 'No passFail found'
directory = re.sub('^\\.[/\\\\]', '', str(opts.testdir))
if way in opts.fragile_ways:
if_verbose(1, '*** fragile test %s resulted in %s' % (full_name, passFail))
if passFail == 'pass':
t.fragile_passes.append(TestResult(directory, name, 'fragile', way))
else:
t.fragile_failures.append(TestResult(directory, name, 'fragile', way,
stdout=result.stdout,
stderr=result.stderr))
elif passFail == 'pass':
if _expect_pass(way):
t.expected_passes.append(TestResult(directory, name, "", way))
t.n_expected_passes += 1
else:
if_verbose(1, '*** unexpected pass for %s' % full_name)
t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
elif passFail == 'fail':
if _expect_pass(way):
reason = result.reason
tag = result.tag
if tag == 'stat':
if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
else:
if_verbose(1, '*** unexpected failure for %s' % full_name)
tr = TestResult(directory, name, reason, way,
stdout=result.stdout,
stderr=result.stderr)
t.unexpected_failures.append(tr)
else:
if opts.expect == 'missing-lib':
t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
else:
framework_fail(name, way, 'bad result ' + passFail)
# Make is often invoked with -s, which means if it fails, we get
# no feedback at all. This is annoying. So let's remove the option
# if found and instead have the testsuite decide on what to do
# with the output.
def override_options(pre_cmd):
if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
return pre_cmd.replace('-s' , '') \
.replace('--silent', '') \
.replace('--quiet' , '')
return pre_cmd
def framework_fail(name: Optional[TestName], way: Optional[WayName], reason: str) -> None:
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', str(opts.testdir))
full_name = '%s(%s)' % (name, way)
if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
name2 = name if name is not None else TestName('none')
way2 = way if way is not None else WayName('none')
t.framework_failures.append(TestResult(directory, name2, reason, way2))
def framework_warn(name: TestName, way: WayName, reason: str) -> None:
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', str(opts.testdir))
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
t.framework_warnings.append(TestResult(directory, name, reason, way))
def badResult(result: PassFail) -> bool:
try:
if result.passFail == 'pass':
return False
return True
except (KeyError, TypeError):
return True
# -----------------------------------------------------------------------------
# Generic command tests
# A generic command test is expected to run and exit successfully.
#
# The expected exit code can be changed via exit_code() as normal, and
# the expected stdout/stderr are stored in <testname>.stdout and
# <testname>.stderr. The output of the command can be ignored
# altogether by using the setup function ignore_stdout instead of
# run_command.
def run_command( name, way, cmd ):
return simple_run( name, '', override_options(cmd), '' )
def makefile_test( name, way, target=None ):
if target is None:
target = name
cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
return run_command(name, way, cmd)
# -----------------------------------------------------------------------------
# GHCi tests
def ghci_script( name, way, script):
flags = ' '.join(get_compiler_flags())
way_flags = ' '.join(config.way_flags[way])
# We pass HC and HC_OPTS as environment variables, so that the
# script can invoke the correct compiler by using ':! $HC $HC_OPTS'
cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
).format(flags=flags, way_flags=way_flags)
# NB: put way_flags before flags so that flags in all.T can override others
getTestOpts().stdin = script
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
# -----------------------------------------------------------------------------
# Compile-only tests
def compile( name, way, extra_hc_opts ):
return do_compile( name, way, False, None, [], extra_hc_opts )
def compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, True, None, [], extra_hc_opts )
def backpack_typecheck( name, way, extra_hc_opts ):
return do_compile( name, way, False, None, [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
def backpack_typecheck_fail( name, way, extra_hc_opts ):
return do_compile( name, way, True, None, [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
def backpack_compile( name, way, extra_hc_opts ):
return do_compile( name, way, False, None, [], extra_hc_opts, backpack=True )
def backpack_compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, True, None, [], extra_hc_opts, backpack=True )
def backpack_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, None, [], extra_hc_opts, backpack=True )
def multimod_compile( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, False, top_mod, [], extra_hc_opts )
def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, True, top_mod, [], extra_hc_opts )
def multimod_compile_filter( name, way, top_mod, extra_hc_opts, filter_with, suppress_stdout=True ):
return do_compile( name, way, False, top_mod, [], extra_hc_opts, filter_with=filter_with, suppress_stdout=suppress_stdout )
def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, False, top_mod, extra_mods, extra_hc_opts)
def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, True, top_mod, extra_mods, extra_hc_opts)
def do_compile(name: TestName,
way: WayName,
should_fail: bool,
top_mod: Optional[Path],
extra_mods: List[str],
extra_hc_opts: str,
**kwargs
) -> PassFail:
# print 'Compile only, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result.hc_opts
result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, False, True, **kwargs)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_stderr_file = find_expected_file(name, 'stderr')
actual_stderr_file = add_suffix(name, 'comp.stderr')
diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
if not compare_outputs(way, 'stderr',
join_normalisers(getTestOpts().extra_errmsg_normaliser,
normalise_errmsg),
expected_stderr_file, actual_stderr_file,
diff_file=diff_file_name,
whitespace_normaliser=getattr(getTestOpts(),
"whitespace_normaliser",
normalise_whitespace)):
stderr = diff_file_name.read_text()
diff_file_name.unlink()
return failBecause('stderr mismatch', stderr=stderr)
# no problems found, this test passed
return passed()
def compile_cmp_asm(name: TestName,
way: WayName,
ext: str,
extra_hc_opts: str
) -> PassFail:
print('Compile only, extra args = ', extra_hc_opts)
result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, False, None, False, False)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_asm_file = find_expected_file(name, 'asm')
actual_asm_file = add_suffix(name, 's')
if not compare_outputs(way, 'asm',
join_normalisers(normalise_errmsg, normalise_asm),
expected_asm_file, actual_asm_file):
return failBecause('asm mismatch')
# no problems found, this test passed
return passed()
def compile_grep_asm(name: TestName,
way: WayName,
ext: str,
is_substring: bool,
extra_hc_opts: str
) -> PassFail:
print('Compile only, extra args = ', extra_hc_opts)
result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, False, None, False, False)
if badResult(result):
return result
expected_pat_file = find_expected_file(name, 'asm')
actual_asm_file = add_suffix(name, 's')
if not grep_output(join_normalisers(normalise_errmsg),
expected_pat_file, actual_asm_file,
is_substring):
return failBecause('asm mismatch')
# no problems found, this test passed
return passed()
def compile_grep_core(name: TestName,
way: WayName,
extra_hc_opts: str
) -> PassFail:
print('Compile only, extra args = ', extra_hc_opts)
result = simple_build(name + '.hs', way, '-ddump-to-file -dsuppress-all -ddump-simpl -O ' + extra_hc_opts, False, None, False, False)
if badResult(result):
return result
expected_pat_file = find_expected_file(name, 'substr-simpl')
actual_core_file = add_suffix(name, 'dump-simpl')
if not grep_output(join_normalisers(normalise_errmsg),
expected_pat_file, actual_core_file):
return failBecause('simplified core mismatch')
# no problems found, this test passed
return passed()
# -----------------------------------------------------------------------------
# Compile-and-run tests
def compile_and_run__(name: TestName,
way: WayName,
top_mod: Path,
extra_mods: List[str],
extra_hc_opts: str,
backpack: bool=False
) -> PassFail:
# print 'Compile and run, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result.hc_opts
assert extra_hc_opts is not None
if way.startswith('ghci'): # interpreted...
return interpreter_run(name, way, extra_hc_opts, top_mod)
else: # compiled...
result = simple_build(name, way, extra_hc_opts, False, top_mod, True, True, backpack = backpack)
if badResult(result):
return result
cmd = './' + name;
# we don't check the compiler's stderr for a compile-and-run test
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
def compile_and_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, None, [], extra_hc_opts)
def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
def stats( name, way, stats_file ):
opts = getTestOpts()
return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
def static_stats( name, way, stats_file ):
opts = getTestOpts()
return check_stats(name, way, in_statsdir(stats_file), opts.stats_range_fields)
def metric_dict(name, way, metric, value) -> PerfStat:
return Perf.PerfStat(
test_env = config.test_env,
test = name,
way = way,
metric = metric,
value = value)
# -----------------------------------------------------------------------------
# Check test stats. This prints the results for the user.
# name: name of the test.
# way: the way.
# stats_file: the path of the stats_file containing the stats for the test.
# range_fields: see TestOptions.stats_range_fields
# Returns a pass/fail object. Passes if the stats are within the expected value ranges.
# This prints the results for the user.
def check_stats(name: TestName,
way: WayName,
stats_file: Path,
range_fields: Dict[MetricName, MetricOracles]
) -> PassFail:
head_commit = Perf.commit_hash(GitRef('HEAD')) if Perf.inside_git_repo() else None
if head_commit is None:
return passed()
result = passed()
if range_fields:
try:
stats_file_contents = stats_file.read_text()
except IOError as e:
return failBecause(str(e))
for (metric, baseline_and_dev) in range_fields.items():
# Remove any metric prefix e.g. "runtime/" and "compile_time/"
stat_file_metric = metric.split("/")[-1]
field_match = re.search('\\("' + stat_file_metric + '", "([0-9]+)"\\)', stats_file_contents)
if field_match is None:
print('Failed to find metric: ', stat_file_metric)
metric_result = failBecause('no such stats metric')
else:
val = field_match.group(1)
assert val is not None
actual_val = int(val)
# Store the metric so it can later be stored in a git note.
perf_stat = metric_dict(name, way, metric, actual_val)
change = None
# If this is the first time running the benchmark, then pass.
baseline = baseline_and_dev.baseline(way, head_commit) \
if Perf.inside_git_repo() else None
if baseline is None:
metric_result = passed()
change = MetricChange.NewMetric
else:
tolerance_dev = baseline_and_dev.deviation
(change, metric_result) = Perf.check_stats_change(
perf_stat,
baseline,
tolerance_dev,
config.allowed_perf_changes,
config.verbose >= 4)
t.metrics.append(PerfMetric(change=change, stat=perf_stat, baseline=baseline))
# If any metric fails then the test fails.
# Note, the remaining metrics are still run so that
# a complete list of changes can be presented to the user.
if metric_result.passFail == 'fail':
result = metric_result
return result
# -----------------------------------------------------------------------------
# Build a single-module program
def extras_build( way, extra_mods, extra_hc_opts ):
for mod, opts in extra_mods:
result = simple_build(mod, way, opts + ' ' + extra_hc_opts, False, None, False, False)
if not (mod.endswith('.hs') or mod.endswith('.lhs')):
extra_hc_opts += ' %s' % Path(mod).with_suffix('.o')
if badResult(result):
return result
return passed(hc_opts=extra_hc_opts)
def simple_build(name: Union[TestName, str],
way: WayName,
extra_hc_opts: str,
should_fail: bool,
top_mod: Optional[Path],
link: bool,
addsuf: bool,
backpack: bool = False,
suppress_stdout: bool = False,
filter_with: str = '') -> Any:
opts = getTestOpts()
# Redirect stdout and stderr to the same file
stdout = in_testdir(name, 'comp.stderr')
stderr = subprocess.STDOUT if not suppress_stdout else None
if top_mod is not None:
srcname = top_mod
elif addsuf:
if backpack:
srcname = add_suffix(name, 'bkp')
else:
srcname = add_hs_lhs_suffix(name)
else:
srcname = Path(name)
if top_mod is not None:
to_do = '--make '
if link:
to_do = to_do + '-o ' + name
elif backpack:
if link:
to_do = '-o ' + name + ' '
else:
to_do = ''
to_do = to_do + '--backpack '
elif link:
to_do = '-o ' + name
else:
to_do = '-c' # just compile
stats_file = name + '.comp.stats'
if isCompilerStatsTest():
extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
if backpack:
extra_hc_opts += ' -outputdir ' + name + '.out'
# Required by GHC 7.3+, harmless for earlier versions:
if (getTestOpts().c_src or
getTestOpts().objc_src or
getTestOpts().objcpp_src):
extra_hc_opts += ' -no-hs-main '
if getTestOpts().compile_cmd_prefix == '':
cmd_prefix = ''
else:
cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
'{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
if filter_with != '':
cmd = cmd + ' | ' + filter_with
exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
actual_stderr_path = in_testdir(name, 'comp.stderr')
if exit_code != 0 and not should_fail:
if config.verbose >= 1 and _expect_pass(way):
print('Compile failed (exit code {0}) errors were:'.format(exit_code))
dump_file(actual_stderr_path)
# ToDo: if the sub-shell was killed by ^C, then exit
if isCompilerStatsTest():
statsResult = check_stats(TestName(name), way, in_testdir(stats_file), opts.stats_range_fields)
if badResult(statsResult):
return statsResult
if should_fail:
if exit_code == 0:
stderr_contents = actual_stderr_path.read_text(encoding='UTF-8', errors='replace')
return failBecause('exit code 0', stderr=stderr_contents)
else:
if exit_code != 0:
stderr_contents = actual_stderr_path.read_text(encoding='UTF-8', errors='replace')
return failBecause('exit code non-0', stderr=stderr_contents)
return passed()
# -----------------------------------------------------------------------------
# Run a program and check its output
#
# If testname.stdin exists, route input from that, else
# from /dev/null. Route output to testname.run.stdout and
# testname.run.stderr. Returns the exit code of the run.
def simple_run(name: TestName, way: WayName, prog: str, extra_run_opts: str) -> Any:
opts = getTestOpts()
# figure out what to use for stdin
if opts.stdin:
stdin_arg = in_testdir(opts.stdin) # type: Optional[Path]
elif in_testdir(name, 'stdin').exists():
stdin_arg = in_testdir(name, 'stdin')
else:
stdin_arg = None
stdout_arg = in_testdir(name, 'run.stdout')
if opts.combined_output:
stderr_arg = subprocess.STDOUT # type: Union[int, Path]
else:
stderr_arg = in_testdir(name, 'run.stderr')
my_rts_flags = rts_flags(way)
# Collect runtime stats if necessary:
# isStatsTest and not isCompilerStatsTest():
# assume we are running a ghc compiled program. Collect stats.
# isStatsTest and way == 'ghci':
# assume we are running a program via ghci. Collect stats
stats_file = None # type: Optional[str]
if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
stats_file = name + '.stats'
stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
else:
stats_args = ''
# Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
cmd = ' '.join([prog, stats_args, my_rts_flags, extra_run_opts])
if opts.cmd_wrapper is not None:
cmd = opts.cmd_wrapper(cmd)
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
# run the command
exit_code = runCmd(cmd, stdin_arg, stdout_arg, stderr_arg, opts.run_timeout_multiplier)
# check the exit code
if exit_code != opts.exit_code:
if config.verbose >= 1 and _expect_pass(way):
print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code (%d)' % exit_code)
if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
return failBecause('bad stderr',
stderr=read_stderr(name),
stdout=read_stdout(name))
if not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout',
stderr=read_stderr(name),
stdout=read_stdout(name))
check_hp = '-h' in my_rts_flags and opts.check_hp
check_prof = '-p' in my_rts_flags
# exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
return failBecause('bad heap profile')
if check_prof and not check_prof_ok(name, way):
return failBecause('bad profile')
# Check runtime stats if desired.
if stats_file is not None:
return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
else:
return passed()
def rts_flags(way: WayName) -> str:
args = config.way_rts_flags.get(way, [])
return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
# -----------------------------------------------------------------------------
# Run a program in the interpreter and check its output
def interpreter_run(name: TestName,
way: WayName,
extra_hc_opts: str,
top_mod: Path
) -> PassFail:
opts = getTestOpts()
stdout = in_testdir(name, 'interp.stdout')
stderr = in_testdir(name, 'interp.stderr')
script = in_testdir(name, 'genscript')
if opts.combined_output:
framework_fail(name, WayName('unsupported'),
'WAY=ghci and combined_output together is not supported')
if top_mod is None:
srcname = add_hs_lhs_suffix(name)
else:
srcname = Path(top_mod)
delimiter = '===== program output begins here\n'
with script.open('w', encoding='UTF-8') as f:
# set the prog name and command-line args to match the compiled
# environment.
f.write(':set prog ' + name + '\n')
f.write(':set args ' + opts.extra_run_opts + '\n')
# Add marker lines to the stdout and stderr output files, so we
# can separate GHCi's output from the program's.
f.write(':! echo ' + delimiter)
f.write(':! echo 1>&2 ' + delimiter)
# Set stdout to be line-buffered to match the compiled environment.
f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
# wrapping in GHC.TopHandler.runIO ensures we get the same output
# in the event of an exception as for the compiled program.
f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
if stdin.exists():
os.system('cat "{0}" >> "{1}"'.format(stdin, script))
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
if opts.cmd_wrapper is not None:
cmd = opts.cmd_wrapper(cmd);
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
# split the stdout into compilation/program output
split_file(stdout, delimiter,
in_testdir(name, 'comp.stdout'),
in_testdir(name, 'run.stdout'))
split_file(stderr, delimiter,
in_testdir(name, 'comp.stderr'),
in_testdir(name, 'run.stderr'))
# check the exit code
if exit_code != getTestOpts().exit_code:
print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code (%d)' % exit_code,
stderr=read_stderr(name),
stdout=read_stdout(name))
# ToDo: if the sub-shell was killed by ^C, then exit
if not (opts.ignore_stderr or stderr_ok(name, way)):
return failBecause('bad stderr',
stderr=read_stderr(name),
stdout=read_stdout(name))
elif not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout',
stderr=read_stderr(name),
stdout=read_stdout(name))
else:
return passed()
def split_file(in_fn: Path, delimiter: str, out1_fn: Path, out2_fn: Path):
# See Note [Universal newlines].
with in_fn.open('r', encoding='utf8', errors='replace', newline=None) as infile:
with out1_fn.open('w', encoding='utf8', newline='') as out1:
with out2_fn.open('w', encoding='utf8', newline='') as out2:
line = infile.readline()
while re.sub('^\s*','',line) != delimiter and line != '':
out1.write(line)
line = infile.readline()
line = infile.readline()
while line != '':
out2.write(line)
line = infile.readline()
# -----------------------------------------------------------------------------
# Utils
def get_compiler_flags() -> List[str]:
opts = getTestOpts()
flags = copy.copy(opts.compiler_always_flags)
flags.append(opts.extra_hc_opts)
if opts.outputdir is not None:
flags.extend(["-outputdir", opts.outputdir])
return flags
def stdout_ok(name: TestName, way: WayName) -> bool:
actual_stdout_file = add_suffix(name, 'run.stdout')
expected_stdout_file = find_expected_file(name, 'stdout')
extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
check_stdout = getTestOpts().check_stdout
if check_stdout is not None:
actual_stdout_path = in_testdir(actual_stdout_file)
return check_stdout(actual_stdout_path, extra_norm)
return compare_outputs(way, 'stdout', extra_norm,
expected_stdout_file, actual_stdout_file)
def read_stdout( name: TestName ) -> str:
path = in_testdir(name, 'run.stdout')
if path.exists():
return path.read_text(encoding='UTF-8')
else:
return ''
def dump_stdout( name: TestName ) -> None:
s = read_stdout(name).strip()
if s:
print("Stdout (", name, "):")
safe_print(s)
def stderr_ok(name: TestName, way: WayName) -> bool:
actual_stderr_file = add_suffix(name, 'run.stderr')
expected_stderr_file = find_expected_file(name, 'stderr')
return compare_outputs(way, 'stderr',
join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
expected_stderr_file, actual_stderr_file,
whitespace_normaliser=normalise_whitespace)
def read_stderr( name: TestName ) -> str:
path = in_testdir(name, 'run.stderr')
if path.exists():
return path.read_text(encoding='UTF-8')
else:
return ''
def dump_stderr( name: TestName ) -> None:
s = read_stderr(name).strip()
if s:
print("Stderr (", name, "):")
safe_print(s)
def read_no_crs(f: Path) -> str:
s = ''
try:
# See Note [Universal newlines].
with f.open('r', encoding='utf8', errors='replace', newline=None) as h:
s = h.read()
except Exception:
# On Windows, if the program fails very early, it seems the
# files stdout/stderr are redirected to may not get created
pass
return s
def write_file(f: Path, s: str) -> None:
# See Note [Universal newlines].
with f.open('w', encoding='utf8', newline='') as h:
h.write(s)
# Note [Universal newlines]
#
# We don't want to write any Windows style line endings ever, because
# it would mean that `make accept` would touch every line of the file
# when switching between Linux and Windows.
#
# Furthermore, when reading a file, it is convenient to translate all
# Windows style endings to '\n', as it simplifies searching or massaging
# the content.
#
# Solution: use `Path.open` instead of `open`
# * when reading: use newline=None to translate '\r\n' to '\n'
# * when writing: use newline='' to not translate '\n' to '\r\n'
#
# See https://docs.python.org/3/library/pathlib.html#pathlib.Path.open
#
# This should work with both python2 and python3, and with both mingw*
# as msys2 style Python.
#
# Do note that Path.open returns unicode strings. So we have to specify
# the expected encoding. But there is at least one file which is not
# valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
# Another solution would be to open files in binary mode always, and
# operate on bytes.
def check_hp_ok(name: TestName) -> bool:
opts = getTestOpts()
# do not qualify for hp2ps because we should be in the right directory
hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
hp2psResult = runCmd(hp2psCmd, print_output=True)
actual_ps_path = in_testdir(name, 'ps')
if hp2psResult == 0:
if actual_ps_path.exists():
if does_ghostscript_work():
gsResult = runCmd(genGSCmd(actual_ps_path))
if (gsResult == 0):
return True
else:
print("hp2ps output for " + name + " is not valid PostScript")
return False
else:
return True # assume postscript is valid without ghostscript
else:
print("hp2ps did not generate PostScript for " + name)
return False
else:
print("hp2ps error when processing heap profile for " + name)
return False
def check_prof_ok(name: TestName, way: WayName) -> bool:
expected_prof_file = find_expected_file(name, 'prof.sample')
expected_prof_path = in_testdir(expected_prof_file)
# Check actual prof file only if we have an expected prof file to
# compare it with.
if not expected_prof_path.exists():
return True
actual_prof_file = add_suffix(name, 'prof')
actual_prof_path = in_testdir(actual_prof_file)
if not actual_prof_path.exists():
print("%s does not exist" % actual_prof_path)
return(False)
if actual_prof_path.stat().st_size == 0:
print("%s is empty" % actual_prof_path)
return(False)
return compare_outputs(way, 'prof', normalise_prof,
expected_prof_file, actual_prof_file,
whitespace_normaliser=normalise_whitespace)
# Compare expected output to actual output, and optionally accept the
# new output. Returns true if output matched or was accepted, false
# otherwise. See Note [Output comparison] for the meaning of the
# normaliser and whitespace_normaliser parameters.
def compare_outputs(way: WayName,
kind: str,
normaliser: OutputNormalizer,
expected_file, actual_file, diff_file=None,
whitespace_normaliser: OutputNormalizer=lambda x:x) -> bool:
expected_path = in_srcdir(expected_file)
actual_path = in_testdir(actual_file)
if expected_path.exists():
expected_str = normaliser(read_no_crs(expected_path))
# Create the .normalised file in the testdir, not in the srcdir.
expected_normalised_file = add_suffix(expected_file, 'normalised')
expected_normalised_path = in_testdir(expected_normalised_file)
else:
expected_str = ''
# See Note [Null device handling]
expected_normalised_path = Path(os.devnull)
actual_raw = read_no_crs(actual_path)
actual_str = normaliser(actual_raw)
# See Note [Output comparison].
if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
return True
else:
if config.verbose >= 1 and _expect_pass(way):
print('Actual ' + kind + ' output differs from expected:')
# See Note [Null device handling]
if expected_normalised_path != Path(os.devnull):
write_file(expected_normalised_path, expected_str)
actual_normalised_path = add_suffix(actual_path, 'normalised')
write_file(actual_normalised_path, actual_str)
if config.verbose >= 1 and _expect_pass(way):
# See Note [Output comparison].
r = runCmd('diff -uw "{0}" "{1}"'.format(null2unix_null(expected_normalised_path),
actual_normalised_path),
stdout=diff_file,
print_output=True)
# If for some reason there were no non-whitespace differences,
# then do a full diff
if r == 0:
r = runCmd('diff -u "{0}" "{1}"'.format(null2unix_null(expected_normalised_path),
actual_normalised_path),
stdout=diff_file,
print_output=True)
elif diff_file: diff_file.open('ab').close() # Make sure the file exists still as
# we will try to read it later
if config.accept and (getTestOpts().expect == 'fail' or
way in getTestOpts().expect_fail_for):
if_verbose(1, 'Test is expected to fail. Not accepting new output.')
return False
elif config.accept and actual_raw:
if config.accept_platform:
if_verbose(1, 'Accepting new output for platform "'
+ config.platform + '".')
expected_path += '-' + config.platform
elif config.accept_os:
if_verbose(1, 'Accepting new output for os "'
+ config.os + '".')
expected_path += '-' + config.os
else:
if_verbose(1, 'Accepting new output.')
write_file(expected_path, actual_raw)
return True
elif config.accept:
if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
expected_path.unlink()
return True
else:
return False
# Checks that each line from pattern_file is present in actual_file as
# a substring or regex pattern depending on is_substring.
def grep_output(normaliser: OutputNormalizer, pattern_file, actual_file, is_substring: bool=True):
expected_path = in_srcdir(pattern_file)
actual_path = in_testdir(actual_file)
expected_patterns = read_no_crs(expected_path).strip().split('\n')
actual_raw = read_no_crs(actual_path)
actual_str = normaliser(actual_raw)
success = True
failed_patterns = []
def regex_match(pat, actual):
return re.search(pat, actual) is not None
def substring_match(pat, actual):
return pat in actual
def is_match(pat, actual):
if is_substring:
return substring_match(pat, actual)
else:
return regex_match(pat, actual)
for pat in expected_patterns:
if not is_match(pat, actual_str):
success = False
failed_patterns.append(pat)
if not success:
print('Actual output does not contain the following patterns:')
for pat in failed_patterns:
print(pat)
return success
# Note [Output comparison]
#
# We do two types of output comparison:
#
# 1. To decide whether a test has failed. We apply a `normaliser` and an
# optional `whitespace_normaliser` to the expected and the actual
# output, before comparing the two.
#
# 2. To show as a diff to the user when the test indeed failed. We apply
# the same `normaliser` function to the outputs, to make the diff as
# small as possible (only showing the actual problem). But we don't
# apply the `whitespace_normaliser` here, because it might completely
# squash all whitespace, making the diff unreadable. Instead we rely
# on the `diff` program to ignore whitespace changes as much as
# possible (#10152).
# Note [Null device handling]
#
# On windows the null device is 'nul' instead of '/dev/null'.
# This can in principle be easily solved by using os.devnull.
# Not doing so causes issues when python tries to read/write/open
# the null device.
#
# However this still leads to a problem when executing shell
# commands in the msys environment. Which again expect '/dev/null'.
#
# So what we do is use os.devnull and convert it to the string
# '/dev/null' for shell commands which are bound to run in a
# unix-like environment.
def null2unix_null(f: Path) -> str:
if f == Path(os.devnull):
return ('/dev/null')
else:
return f.as_posix()
def normalise_whitespace(s: str) -> str:
# Merge contiguous whitespace characters into a single space.
return ' '.join(s.split())
callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
def normalise_callstacks(s: str) -> str:
opts = getTestOpts()
def repl(matches):
location = matches.group(1)
location = normalise_slashes_(location)
return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
# Ignore line number differences in call stacks (#10834).
s = re.sub(callSite_re, repl, s)
# Ignore the change in how we identify implicit call-stacks
s = s.replace('from ImplicitParams', 'from HasCallStack')
if not opts.keep_prof_callstacks:
# Don't output prof callstacks. Test output should be
# independent from the WAY we run the test.
s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
return s
tyCon_re = re.compile(r'TyCon\s*\d+\#\#\d?\d?\s*\d+\#\#\d?\d?\s*', flags=re.MULTILINE)
def normalise_type_reps(s: str) -> str:
""" Normalise out fingerprints from Typeable TyCon representations """
return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', s)
def normalise_errmsg(s: str) -> str:
"""Normalise error-messages emitted via stderr"""
# IBM AIX's `ld` is a bit chatty
if opsys('aix'):
s = s.replace('ld: 0706-027 The -x flag is ignored.\n', '')
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
s = modify_lines(s, lambda l: re.sub(' error:', '', l))
s = modify_lines(s, lambda l: re.sub(' Warning:', ' warning:', l))
s = normalise_callstacks(s)
s = normalise_type_reps(s)
# If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
# the colon is there because it appears in error messages; this
# hacky solution is used in place of more sophisticated filename
# mangling
s = re.sub('([^\\s])\\.exe', '\\1', s)
# normalise slashes, minimise Windows/Unix filename differences
s = re.sub('\\\\', '/', s)
# The inplace ghc's are called ghc-stage[123] to avoid filename
# collisions, so we need to normalise that to just "ghc"
s = re.sub('ghc-stage[123]', 'ghc', s)
# Error messages sometimes contain ghc-bignum implementation package
s = re.sub('ghc-bignum-[0-9.]+', 'ghc-bignum-<VERSION>', s)
# Error messages sometimes contain this blurb which can vary
# spuriously depending upon build configuration (e.g. based on bignum
# backend)
s = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
'...plus N instances involving out-of-scope types', s)
# Also filter out bullet characters. This is because bullets are used to
# separate error sections, and tests shouldn't be sensitive to how the
# the division happens.
bullet = '•'.encode('utf8') if isinstance(s, bytes) else '•'
s = s.replace(bullet, '')
# Windows only, this is a bug in hsc2hs but it is preventing
# stable output for the testsuite. See #9775. For now we filter out this
# warning message to get clean output.
if config.msys:
s = re.sub('Failed to remove file (.*); error= (.*)$', '', s)
s = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', s)
# filter out unsupported GNU_PROPERTY_TYPE (5), which is emitted by LLVM10
# and not understood by older binutils (ar, ranlib, ...)
s = modify_lines(s, lambda l: re.sub('^(.+)warning: (.+): unsupported GNU_PROPERTY_TYPE \(5\) type: 0xc000000(.*)$', '', l))
# filter out nix garbage, that just keeps on showing up as errors on darwin
s = modify_lines(s, lambda l: re.sub('^(.+)\.dylib, ignoring unexpected dylib file$','', l))
return s
# normalise a .prof file, so that we can reasonably compare it against
# a sample. This doesn't compare any of the actual profiling data,
# only the shape of the profile and the number of entries.
def normalise_prof (s: str) -> str:
# sip everything up to the line beginning "COST CENTRE"
s = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',s)
# sip results for CAFs, these tend to change unpredictably
s = re.sub('[ \t]*(CAF|IDLE).*\n','',s)
# XXX Ignore Main.main. Sometimes this appears under CAF, and
# sometimes under MAIN.
s = re.sub('[ \t]*main[ \t]+Main.*\n','',s)
# We have something like this:
#
# MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
# CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
# readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
# readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
# main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
# == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
# == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
# showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
# showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
# readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
# readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
#
# then we remove all the specific profiling data, leaving only the cost
# centre name, module, src, and entries, to end up with this: (modulo
# whitespace between columns)
#
# MAIN MAIN <built-in> 0
# readPrec Main Main_1.hs:7:13-16 1
# readPrec Main Main_1.hs:4:13-16 1
# == Main Main_1.hs:7:25-26 1
# == Main Main_1.hs:4:25-26 1
# showsPrec Main Main_1.hs:7:19-22 2
# showsPrec Main Main_1.hs:4:19-22 2
# readPrec Main Main_1.hs:7:13-16 0
# readPrec Main Main_1.hs:4:13-16 0
# Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
# (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
# this works fine.
s = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
'\\1 \\2 \\3 \\5\n', s)
return s
def normalise_slashes_( s: str ) -> str:
s = re.sub('\\\\', '/', s)
s = re.sub('//', '/', s)
return s
def normalise_exe_( s: str ) -> str:
s = re.sub('\.exe', '', s)
return s
def normalise_output( s: str ) -> str:
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
s = modify_lines(s, lambda l: re.sub(' error:', '', l))
s = modify_lines(s, lambda l: re.sub(' Warning:', ' warning:', l))
# Remove a .exe extension (for Windows)
# This can occur in error messages generated by the program.
s = re.sub('([^\\s])\\.exe', '\\1', s)
s = normalise_callstacks(s)
s = normalise_type_reps(s)
# ghci outputs are pretty unstable with -fexternal-dynamic-refs, which is
# requires for -fPIC
s = re.sub(' -fexternal-dynamic-refs\n','',s)
return s
def normalise_asm( s: str ) -> str:
lines = s.split('\n')
# Only keep insuctions and labels not starting with a dot.
metadata = re.compile('^[ \t]*\\..*$')
out = []
for line in lines:
# Drop metadata directives (e.g. ".type")
if not metadata.match(line):
line = re.sub('@plt', '', line)
ins = line.lstrip().split()
# Drop empty lines.
if not ins:
continue
# Drop operands, except for call insuctions.
elif ins[0] == 'call':
out.append(ins[0] + ' ' + ins[1])
else:
out.append(ins[0])
return '\n'.join(out)
def safe_print(s: str) -> None:
s2 = s.encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)
print(s2)
def if_verbose( n: int, s: str ) -> None:
if config.verbose >= n:
safe_print(s)
def dump_file(f: Path):
try:
with f.open() as file:
safe_print(file.read())
except Exception:
print('')
def runCmd(cmd: str,
stdin: Union[None, Path]=None,
stdout: Union[None, Path]=None,
stderr: Union[None, int, Path]=None,
timeout_multiplier=1.0,
print_output=False) -> int:
timeout_prog = strip_quotes(config.timeout_prog)
timeout = str(int(ceil(config.timeout * timeout_multiplier)))
# Format cmd using config. Example: cmd='{hpc} report A.tix'
cmd = cmd.format(**config.__dict__)
if_verbose(3, '%s< %s' % (cmd, stdin.name if isinstance(stdin, Path) else ''))
stdin_file = stdin.open('rb') if stdin is not None else None
stdout_buffer = b''
stderr_buffer = b''
hStdErr = subprocess.PIPE
if stderr is subprocess.STDOUT:
hStdErr = subprocess.STDOUT
try:
# cmd is a complex command in Bourne-shell syntax
# e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
# Hence it must ultimately be run by a Bourne shell. It's timeout's job
# to invoke the Bourne shell
r = subprocess.Popen([timeout_prog, timeout, cmd],
stdin=stdin_file,
stdout=subprocess.PIPE,
stderr=hStdErr,
env=ghc_env)
stdout_buffer, stderr_buffer = r.communicate()
finally:
if stdin_file:
stdin_file.close()
if config.verbose >= 1 and print_output:
if stdout_buffer:
sys.stdout.buffer.write(stdout_buffer)
if stderr_buffer:
sys.stderr.buffer.write(stderr_buffer)
if stdout is not None:
if isinstance(stdout, Path):
stdout.write_bytes(stdout_buffer)
else:
with io.open(stdout, 'wb') as f:
f.write(stdout_buffer)
if stderr is not None:
if isinstance(stderr, Path):
stderr.write_bytes(stderr_buffer)
if r.returncode == 98:
# The python timeout program uses 98 to signal that ^C was pressed
stopNow()
if r.returncode == 99 and getTestOpts().exit_code != 99:
# Only print a message when timeout killed the process unexpectedly.
if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
return r.returncode
# -----------------------------------------------------------------------------
# checking if ghostscript is available for checking the output of hp2ps
def genGSCmd(psfile: Path) -> str:
return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
@memoize
def does_ghostscript_work() -> bool:
"""
Detect whether Ghostscript is functional.
"""
def gsNotWorking(reason: str) -> None:
print("GhostScript not available for hp2ps tests:", reason)
if config.gs is None:
return False
try:
if runCmd(genGSCmd(config.top / 'config' / 'good.ps')) != 0:
gsNotWorking("gs can't process good input")
return False
except Exception as e:
gsNotWorking('error invoking gs on bad input: %s' % e)
return False
try:
cmd = genGSCmd(config.top / 'config' / 'bad.ps') + ' >/dev/null 2>&1'
if runCmd(cmd) == 0:
gsNotWorking('gs accepts bad input')
return False
except Exception as e:
gsNotWorking('error invoking gs on bad input: %s' % e)
return False
return True
def add_suffix( name: Union[str, Path], suffix: str ) -> Path:
if suffix == '':
return Path(name)
else:
return Path(str(name) + '.' + suffix)
def add_hs_lhs_suffix(name: str) -> Path:
if getTestOpts().c_src:
return add_suffix(name, 'c')
elif getTestOpts().cmm_src:
return add_suffix(name, 'cmm')
elif getTestOpts().objc_src:
return add_suffix(name, 'm')
elif getTestOpts().objcpp_src:
return add_suffix(name, 'mm')
elif getTestOpts().literate:
return add_suffix(name, 'lhs')
else:
return add_suffix(name, 'hs')
def in_testdir(name: Union[Path, str], suffix: str='') -> Path:
return getTestOpts().testdir / add_suffix(name, suffix)
def in_srcdir(name: Union[Path, str], suffix: str='') -> Path:
srcdir = getTestOpts().srcdir
if srcdir is None:
return add_suffix(name, suffix)
else:
return srcdir / add_suffix(name, suffix)
def in_statsdir(name: Union[Path, str], suffix: str='') -> Path:
dir = config.stats_files_dir
if dir is None:
raise TypeError('stats_files_dir is not set')
return dir / add_suffix(name, suffix)
# Finding the sample output. The filename is of the form
#
# <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
#
def find_expected_file(name: TestName, suff: str) -> Path:
basename = add_suffix(name, suff)
# Override the basename if the user has specified one, this will then be
# subjected to the same name mangling scheme as normal to allow platform
# specific overrides to work.
basename = getTestOpts().use_specs.get(suff, basename)
files = [str(basename) + ws + plat
for plat in ['-' + config.platform, '-' + config.os, '']
for ws in ['-ws-' + config.wordsize, '']]
for f in files:
if in_srcdir(f).exists():
return f
return basename
if config.msys:
import stat
def cleanup() -> None:
testdir = getTestOpts().testdir # type: Path
max_attempts = 5
retries = max_attempts
def on_error(function, path: str, excinfo):
# At least one test (T11489) removes the write bit from a file it
# produces. Windows refuses to delete read-only files with a
# permission error. Try setting the write bit and try again.
Path(path).chmod(stat.S_IWRITE)
function(path)
# On Windows we have to retry the delete a couple of times.
# The reason for this is that a FileDelete command just marks a
# file for deletion. The file is really only removed when the last
# handle to the file is closed. Unfortunately there are a lot of
# system services that can have a file temporarily opened using a shared
# readonly lock, such as the built in AV and search indexer.
#
# We can't really guarantee that these are all off, so what we can do is
# whenever after a rmtree the folder still exists to try again and wait a bit.
#
# Based on what I've seen from the tests on CI server, is that this is relatively rare.
# So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
# still locked then abort the current test by throwing an exception, this so it won't fail
# with an even more cryptic error.
#
# See #13162
exception = None
while retries > 0 and testdir.exists():
time.sleep((max_attempts-retries)*6)
try:
shutil.rmtree(str(testdir), onerror=on_error, ignore_errors=False)
except Exception as e:
exception = e
retries -= 1
if retries == 0 and testdir.exists():
raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
% (testdir, exception))
else:
def cleanup() -> None:
testdir = getTestOpts().testdir
if testdir.exists():
shutil.rmtree(str(testdir), ignore_errors=False)
# -----------------------------------------------------------------------------
# Return a list of all the files ending in '.T' below directories roots.
def findTFiles(roots: List[str]) -> Iterator[str]:
for root in roots:
for path, dirs, files in os.walk(root, topdown=True):
# Never pick up .T files in uncleaned .run directories.
dirs[:] = [dir for dir in sorted(dirs)
if not dir.endswith(testdir_suffix)]
for filename in files:
if filename.endswith('.T'):
yield os.path.join(path, filename)
# -----------------------------------------------------------------------------
# Output a test summary to the specified file object
def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
file.write('\n')
printUnexpectedTests(file,
[t.unexpected_passes, t.unexpected_failures,
t.unexpected_stat_failures, t.framework_failures])
if short:
# Only print the list of unexpected tests above.
return
if len(t.unexpected_failures) > 0 or \
len(t.unexpected_stat_failures) > 0 or \
len(t.unexpected_passes) > 0 or \
len(t.framework_failures) > 0:
summary_color = Color.RED
else:
summary_color = Color.GREEN
assert t.start_time is not None
file.write(colored(summary_color, 'SUMMARY') + ' for test run started at '
+ t.start_time.strftime("%c %Z") + '\n'
+ str(datetime.datetime.now() - t.start_time).rjust(8)
+ ' spent to go through\n'
+ repr(t.total_tests).rjust(8)
+ ' total tests, which gave rise to\n'
+ repr(t.total_test_cases).rjust(8)
+ ' test cases, of which\n'
+ repr(t.n_tests_skipped).rjust(8)
+ ' were skipped\n'
+ '\n'
+ repr(len(t.missing_libs)).rjust(8)
+ ' had missing libraries\n'
+ repr(t.n_expected_passes).rjust(8)
+ ' expected passes\n'
+ repr(t.n_expected_failures).rjust(8)
+ ' expected failures\n'
+ '\n'
+ repr(len(t.framework_failures)).rjust(8)
+ ' caused framework failures\n'
+ repr(len(t.framework_warnings)).rjust(8)
+ ' caused framework warnings\n'
+ repr(len(t.unexpected_passes)).rjust(8)
+ ' unexpected passes\n'
+ repr(len(t.unexpected_failures)).rjust(8)
+ ' unexpected failures\n'
+ repr(len(t.unexpected_stat_failures)).rjust(8)
+ ' unexpected stat failures\n'
+ repr(len(t.fragile_failures) + len(t.fragile_passes)).rjust(8)
+ ' fragile tests\n'
+ '\n')
if t.unexpected_passes:
file.write('Unexpected passes:\n')
printTestInfosSummary(file, t.unexpected_passes)
if t.unexpected_failures:
file.write('Unexpected failures:\n')
printTestInfosSummary(file, t.unexpected_failures)
if t.unexpected_stat_failures:
file.write('Unexpected stat failures:\n')
printTestInfosSummary(file, t.unexpected_stat_failures)
if t.framework_failures:
file.write('Framework failures:\n')
printTestInfosSummary(file, t.framework_failures)
if t.framework_warnings:
file.write('Framework warnings:\n')
printTestInfosSummary(file, t.framework_warnings)
if t.fragile_passes:
file.write('Fragile test passes:\n')
printTestInfosSummary(file, t.fragile_passes)
if t.fragile_failures:
file.write('Fragile test failures:\n')
printTestInfosSummary(file, t.fragile_failures)
if stopping():
file.write('WARNING: Testsuite run was terminated early\n')
def printUnexpectedTests(file: TextIO, testInfoss):
unexpected = set(result.testname
for testInfos in testInfoss
for result in testInfos
if not result.testname.endswith('.T'))
if unexpected:
file.write('Unexpected results from:\n')
file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
file.write('\n')
def printTestInfosSummary(file: TextIO, testInfos):
maxDirLen = max(len(tr.directory) for tr in testInfos)
for result in sorted(testInfos, key=lambda r: (r.testname.lower(), r.way, r.directory)):
directory = result.directory.ljust(maxDirLen)
file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
r = result,
directory = directory))
file.write('\n')
def modify_lines(s: str, f: Callable[[str], str]) -> str:
s = '\n'.join([f(l) for l in s.splitlines()])
if s and s[-1] != '\n':
# Prevent '\ No newline at end of file' warnings when diffing.
s += '\n'
return s
| 35.961293
| 179
| 0.615677
|
3530a345a9c0a2157c4bbf25ddca61b5b689b283
| 3,631
|
py
|
Python
|
GCL/losses/triplet.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 361
|
2021-07-09T15:15:23.000Z
|
2022-03-30T07:08:10.000Z
|
GCL/losses/triplet.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 25
|
2021-08-21T11:06:26.000Z
|
2022-03-29T02:51:56.000Z
|
GCL/losses/triplet.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 45
|
2021-08-07T02:59:45.000Z
|
2022-03-29T05:07:17.000Z
|
import torch
from .losses import Loss
class TripletMarginSP(Loss):
def __init__(self, margin: float = 1.0, p: float = 2, *args, **kwargs):
super(TripletMarginSP, self).__init__()
self.loss_fn = torch.nn.TripletMarginLoss(margin=margin, p=p, reduction='none')
self.margin = margin
def compute(self, anchor, sample, pos_mask, neg_mask=None, *args, **kwargs):
neg_mask = 1. - pos_mask
num_pos = pos_mask.to(torch.long).sum(dim=1)
num_neg = neg_mask.to(torch.long).sum(dim=1)
dist = torch.cdist(anchor, sample, p=2) # [num_anchors, num_samples]
pos_dist = pos_mask * dist
neg_dist = neg_mask * dist
pos_dist, neg_dist = pos_dist.sum(dim=1), neg_dist.sum(dim=1)
loss = pos_dist / num_pos - neg_dist / num_neg + self.margin
loss = torch.where(loss > 0, loss, torch.zeros_like(loss))
return loss.mean()
class TripletMargin(Loss):
def __init__(self, margin: float = 1.0, p: float = 2, *args, **kwargs):
super(TripletMargin, self).__init__()
self.loss_fn = torch.nn.TripletMarginLoss(margin=margin, p=p, reduction='none')
self.margin = margin
def compute(self, anchor, sample, pos_mask, neg_mask=None, *args, **kwargs):
num_anchors = anchor.size()[0]
num_samples = sample.size()[0]
# Key idea here:
# (1) Use all possible triples (will be num_anchors * num_positives * num_negatives triples in total)
# (2) Use PyTorch's TripletMarginLoss to compute the marginal loss for each triple
# (3) Since TripletMarginLoss accepts input tensors of shape (B, D), where B is the batch size,
# we have to manually construct all triples and flatten them as an input tensor in the
# shape of (num_triples, D).
# (4) We first compute loss for all triples (including those that are not anchor - positive - negative), which
# will be num_anchors * num_samples * num_samples triples, and then filter them with masks.
# compute negative mask
neg_mask = 1. - pos_mask if neg_mask is None else neg_mask
anchor = torch.unsqueeze(anchor, dim=1) # [N, 1, D]
anchor = torch.unsqueeze(anchor, dim=1) # [N, 1, 1, D]
anchor = anchor.expand(-1, num_samples, num_samples, -1) # [N, M, M, D]
anchor = torch.flatten(anchor, end_dim=1) # [N * M * M, D]
pos_sample = torch.unsqueeze(sample, dim=0) # [1, M, D]
pos_sample = torch.unsqueeze(pos_sample, dim=2) # [1, M, 1, D]
pos_sample = pos_sample.expand(num_anchors, -1, num_samples, -1) # [N, M, M, D]
pos_sample = torch.flatten(pos_sample, end_dim=1) # [N * M * M, D]
neg_sample = torch.unsqueeze(sample, dim=0) # [1, M, D]
neg_sample = torch.unsqueeze(neg_sample, dim=0) # [1, 1, M, D]
neg_sample = neg_sample.expand(num_anchors, -1, num_samples, -1) # [N, M, M, D]
neg_sample = torch.flatten(neg_sample, end_dim=1) # [N * M * M, D]
loss = self.loss_fn(anchor, pos_sample, neg_sample) # [N, M, M]
loss = loss.view(num_anchors, num_samples, num_samples)
pos_mask1 = torch.unsqueeze(pos_mask, dim=2) # [N, M, 1]
pos_mask1 = pos_mask1.expand(-1, -1, num_samples) # [N, M, M]
neg_mask1 = torch.unsqueeze(neg_mask, dim=1) # [N, 1, M]
neg_mask1 = neg_mask1.expand(-1, num_samples, -1) # [N, M, M]
pair_mask = pos_mask1 * neg_mask1 # [N, M, M]
num_pairs = pair_mask.sum()
loss = loss * pair_mask
loss = loss.sum()
return loss / num_pairs
| 44.280488
| 119
| 0.61691
|
4de6376c5e77b1485a921660489df5a8490d5d2c
| 3,169
|
py
|
Python
|
scripts/nnedi3_rpow2.py
|
darcyg/vapoursynth-plugins
|
5aaf090d3523cb8c53841949f2da286688ba33bb
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/nnedi3_rpow2.py
|
darcyg/vapoursynth-plugins
|
5aaf090d3523cb8c53841949f2da286688ba33bb
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/nnedi3_rpow2.py
|
darcyg/vapoursynth-plugins
|
5aaf090d3523cb8c53841949f2da286688ba33bb
|
[
"BSD-2-Clause"
] | 1
|
2020-04-06T16:52:59.000Z
|
2020-04-06T16:52:59.000Z
|
import vapoursynth as vs
def nnedi3_rpow2(clip, rfactor=2, width=None, height=None, correct_shift=True,
kernel="spline36", nsize=0, nns=3, qual=None, etype=None,
pscrn=None, opt=True, fapprox=None):
"""nnedi3_rpow2 is for enlarging images by powers of 2.
Args:
rfactor (int): Image enlargement factor.
Must be a power of 2 in the range [2 to 1024].
correct_shift (bool): If False, the shift is not corrected.
The correction is accomplished by using the subpixel
cropping capability of fmtc's resizers.
width (int): If correcting the image center shift by using the
"correct_shift" parameter, width/height allow you to set a
new output resolution.
kernel (string): Sets the resizer used for correcting the image
center shift that nnedi3_rpow2 introduces. This can be any of
fmtc kernels, such as "cubic", "spline36", etc.
spline36 is the default one.
nnedi3_args (mixed): For help with nnedi3 args
refert to nnedi3 documentation.
"""
core = vs.get_core()
# Setting up variables
plugins = core.get_plugins()
if width is None:
width = clip.width*rfactor
if height is None:
height = clip.height*rfactor
hshift = 0.0
vshift = -0.5
pkdnnedi = dict(dh=True, nsize=nsize, nns=nns, qual=qual,
etype=etype, pscrn=pscrn, opt=opt, fapprox=fapprox)
pkdchroma = dict(kernel=kernel, sy=-0.5, planes=[2, 3, 3])
tmp = 1
times = 0
while tmp < rfactor:
tmp *= 2
times += 1
# Checks
if rfactor < 2 or rfactor > 1024:
raise ValueError("nnedi3_rpow2: rfactor must be between 2 and 1024")
if tmp != rfactor:
raise ValueError("nnedi3_rpow2: rfactor must be a power of 2")
if 'com.deinterlace.nnedi3' not in plugins:
raise RuntimeError("nnedi3_rpow2: nnedi3 plugin is required")
if correct_shift or clip.format.subsampling_h:
if 'fmtconv' not in plugins:
raise RuntimeError("nnedi3_rpow2: fmtconv plugin is required")
# Processing
last = clip
for i in range(times):
field = 1 if i == 0 else 0
last = core.nnedi3.nnedi3(last, field=field, **pkdnnedi)
last = core.std.Transpose(last)
if last.format.subsampling_w:
# Apparently always using field=1 for the horizontal pass somehow
# keeps luma/chroma alignment.
field = 1
hshift = hshift*2 - 0.5
else:
hshift = -0.5
last = core.nnedi3.nnedi3(last, field=field, **pkdnnedi)
last = core.std.Transpose(last)
# Correct vertical shift of the chroma.
if clip.format.subsampling_h:
last = core.fmtc.resample(last, w=last.width, h=last.height, **pkdchroma)
if correct_shift is True:
last = core.fmtc.resample(last, w=width, h=height, kernel=kernel,
sx=hshift, sy=vshift)
if last.format.id != clip.format.id:
last = core.fmtc.bitdepth(last, csp=clip.format.id)
return last
| 34.445652
| 81
| 0.615652
|
e51a1a5f528561ef494de5c4975cdc2d8dc53084
| 7,500
|
py
|
Python
|
homeassistant/components/fritzbox/config_flow.py
|
sangwe11/home-assistant-core
|
51ebfade5260e6f8c36ffc0e3e1d130a80a21740
|
[
"Apache-2.0"
] | 1
|
2021-11-30T07:12:06.000Z
|
2021-11-30T07:12:06.000Z
|
homeassistant/components/fritzbox/config_flow.py
|
sangwe11/home-assistant-core
|
51ebfade5260e6f8c36ffc0e3e1d130a80a21740
|
[
"Apache-2.0"
] | 19
|
2021-11-30T03:40:59.000Z
|
2022-03-31T06:34:57.000Z
|
homeassistant/components/fritzbox/config_flow.py
|
sangwe11/home-assistant-core
|
51ebfade5260e6f8c36ffc0e3e1d130a80a21740
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow for AVM FRITZ!SmartHome."""
from __future__ import annotations
from typing import Any
from urllib.parse import urlparse
from pyfritzhome import Fritzhome, LoginError
from requests.exceptions import HTTPError
import voluptuous as vol
from homeassistant.components import ssdp
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.config_entries import ConfigEntry, ConfigFlow
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.data_entry_flow import FlowResult
from .const import DEFAULT_HOST, DEFAULT_USERNAME, DOMAIN
DATA_SCHEMA_USER = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): str,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
DATA_SCHEMA_CONFIRM = vol.Schema(
{
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
RESULT_INVALID_AUTH = "invalid_auth"
RESULT_NO_DEVICES_FOUND = "no_devices_found"
RESULT_NOT_SUPPORTED = "not_supported"
RESULT_SUCCESS = "success"
class FritzboxConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a AVM FRITZ!SmartHome config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize flow."""
self._entry: ConfigEntry | None = None
self._host: str | None = None
self._name: str | None = None
self._password: str | None = None
self._username: str | None = None
def _get_entry(self, name: str) -> FlowResult:
return self.async_create_entry(
title=name,
data={
CONF_HOST: self._host,
CONF_PASSWORD: self._password,
CONF_USERNAME: self._username,
},
)
async def _update_entry(self) -> None:
assert self._entry is not None
self.hass.config_entries.async_update_entry(
self._entry,
data={
CONF_HOST: self._host,
CONF_PASSWORD: self._password,
CONF_USERNAME: self._username,
},
)
await self.hass.config_entries.async_reload(self._entry.entry_id)
def _try_connect(self) -> str:
"""Try to connect and check auth."""
fritzbox = Fritzhome(
host=self._host, user=self._username, password=self._password
)
try:
fritzbox.login()
fritzbox.get_device_elements()
fritzbox.logout()
return RESULT_SUCCESS
except LoginError:
return RESULT_INVALID_AUTH
except HTTPError:
return RESULT_NOT_SUPPORTED
except OSError:
return RESULT_NO_DEVICES_FOUND
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
self._host = user_input[CONF_HOST]
self._name = str(user_input[CONF_HOST])
self._password = user_input[CONF_PASSWORD]
self._username = user_input[CONF_USERNAME]
result = await self.hass.async_add_executor_job(self._try_connect)
if result == RESULT_SUCCESS:
return self._get_entry(self._name)
if result != RESULT_INVALID_AUTH:
return self.async_abort(reason=result)
errors["base"] = result
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Handle a flow initialized by discovery."""
host = urlparse(discovery_info[ATTR_SSDP_LOCATION]).hostname
assert isinstance(host, str)
self.context[CONF_HOST] = host
if uuid := discovery_info.get(ATTR_UPNP_UDN):
if uuid.startswith("uuid:"):
uuid = uuid[5:]
await self.async_set_unique_id(uuid)
self._abort_if_unique_id_configured({CONF_HOST: host})
for progress in self._async_in_progress():
if progress.get("context", {}).get(CONF_HOST) == host:
return self.async_abort(reason="already_in_progress")
# update old and user-configured config entries
for entry in self._async_current_entries():
if entry.data[CONF_HOST] == host:
if uuid and not entry.unique_id:
self.hass.config_entries.async_update_entry(entry, unique_id=uuid)
return self.async_abort(reason="already_configured")
self._host = host
self._name = str(discovery_info.get(ATTR_UPNP_FRIENDLY_NAME) or host)
self.context["title_placeholders"] = {"name": self._name}
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle user-confirmation of discovered node."""
errors = {}
if user_input is not None:
self._password = user_input[CONF_PASSWORD]
self._username = user_input[CONF_USERNAME]
result = await self.hass.async_add_executor_job(self._try_connect)
if result == RESULT_SUCCESS:
assert self._name is not None
return self._get_entry(self._name)
if result != RESULT_INVALID_AUTH:
return self.async_abort(reason=result)
errors["base"] = result
return self.async_show_form(
step_id="confirm",
data_schema=DATA_SCHEMA_CONFIRM,
description_placeholders={"name": self._name},
errors=errors,
)
async def async_step_reauth(self, data: dict[str, str]) -> FlowResult:
"""Trigger a reauthentication flow."""
entry = self.hass.config_entries.async_get_entry(self.context["entry_id"])
assert entry is not None
self._entry = entry
self._host = data[CONF_HOST]
self._name = str(data[CONF_HOST])
self._username = data[CONF_USERNAME]
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle reauthorization flow."""
errors = {}
if user_input is not None:
self._password = user_input[CONF_PASSWORD]
self._username = user_input[CONF_USERNAME]
result = await self.hass.async_add_executor_job(self._try_connect)
if result == RESULT_SUCCESS:
await self._update_entry()
return self.async_abort(reason="reauth_successful")
if result != RESULT_INVALID_AUTH:
return self.async_abort(reason=result)
errors["base"] = result
return self.async_show_form(
step_id="reauth_confirm",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME, default=self._username): str,
vol.Required(CONF_PASSWORD): str,
}
),
description_placeholders={"name": self._name},
errors=errors,
)
| 34.562212
| 88
| 0.626667
|
21631a77d5bda1442a11b6de4f1b20562b13cd33
| 1,399
|
py
|
Python
|
def_count/test_def_count.py
|
a-domingu/def_count
|
b3692912c4ba576d2144b1e3248231cdd4598d62
|
[
"MIT"
] | null | null | null |
def_count/test_def_count.py
|
a-domingu/def_count
|
b3692912c4ba576d2144b1e3248231cdd4598d62
|
[
"MIT"
] | null | null | null |
def_count/test_def_count.py
|
a-domingu/def_count
|
b3692912c4ba576d2144b1e3248231cdd4598d62
|
[
"MIT"
] | null | null | null |
import os, shutil
import git, pytest
from pyspark import SparkContext
from def_count import get_files, count
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
From https://stackoverflow.com/a/2656405
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise Exception("Cannot delete dir with shutil.rmtree")
@pytest.fixture
def workspace():
path = os.path.join(os.getcwd(), '__downloaded__')
if not os.path.exists(path):
os.mkdir(path)
try:
git.Repo.clone_from('https://github.com/a-domingu/download_repos', path, depth = 1)
except Exception:
pass
yield path
try:
shutil.rmtree(path, onerror=onerror)
except Exception as ex:
print(ex)
def test_get_files(workspace):
assert len(get_files(workspace)) == 3
assert os.path.join(os.getcwd(), '__downloaded__', 'get_repos', 'repos.py') in get_files(workspace)
def test_count(workspace):
ls_files = get_files(workspace)
num = count(ls_files)
assert num == 11
| 25.436364
| 103
| 0.663331
|
cef002cb87099f87fe7c0ee03d47e9c6ae7e7031
| 268
|
py
|
Python
|
celery/celeryconfig.py
|
jpnewman/python_ocr_api
|
1063a03c273438a8df55d6296b87940bf143aa60
|
[
"MIT"
] | null | null | null |
celery/celeryconfig.py
|
jpnewman/python_ocr_api
|
1063a03c273438a8df55d6296b87940bf143aa60
|
[
"MIT"
] | null | null | null |
celery/celeryconfig.py
|
jpnewman/python_ocr_api
|
1063a03c273438a8df55d6296b87940bf143aa60
|
[
"MIT"
] | null | null | null |
# Celery configuration file
BROKER_URL = 'pyamqp://guest@localhost//'
CELERY_RESULT_BACKEND = 'amqp://'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Europe/London'
CELERY_ENABLE_UTC = True
CELERY_CREATE_MISSING_QUEUES = True
| 22.333333
| 41
| 0.791045
|
81de18220c096da5c06dc5cfb447333a59dcd46d
| 18,255
|
py
|
Python
|
jans-linux-setup/jans_setup/setup_app/installers/jetty.py
|
duttarnab/jans
|
b4ae02f9cb60433a44a2b889268525532d82a247
|
[
"Apache-2.0"
] | null | null | null |
jans-linux-setup/jans_setup/setup_app/installers/jetty.py
|
duttarnab/jans
|
b4ae02f9cb60433a44a2b889268525532d82a247
|
[
"Apache-2.0"
] | null | null | null |
jans-linux-setup/jans_setup/setup_app/installers/jetty.py
|
duttarnab/jans
|
b4ae02f9cb60433a44a2b889268525532d82a247
|
[
"Apache-2.0"
] | null | null | null |
import os
import glob
import re
import shutil
import xml.etree.ElementTree as ET
from setup_app import paths
from setup_app.utils import base
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils.setup_utils import SetupUtils
from setup_app.installers.base import BaseInstaller
class JettyInstaller(BaseInstaller, SetupUtils):
# let's borrow these variables from Config
jetty_home = Config.jetty_home
jetty_base = Config.jetty_base
jetty_app_configuration = base.readJsonFile(os.path.join(paths.DATA_DIR, 'jetty_app_configuration.json'), ordered=True)
def __init__(self):
setattr(base.current_app, self.__class__.__name__, self)
self.service_name = 'jetty'
self.needdb = False # we don't need backend connection in this class
self.install_var = 'installJetty'
self.app_type = AppType.APPLICATION
self.install_type = InstallOption.MONDATORY
if not base.snap:
self.register_progess()
self.jetty_user_home = '/home/jetty'
self.jetty_user_home_lib = os.path.join(self.jetty_user_home, 'lib')
self.app_custom_changes = {
'jetty' : {
'name' : 'jetty',
'files' : [
{
'path' : os.path.join(self.jetty_home, 'etc/webdefault.xml'),
'replace' : [
{
'pattern' : r'(\<param-name\>dirAllowed<\/param-name\>)(\s*)(\<param-value\>)true(\<\/param-value\>)',
'update' : r'\1\2\3false\4'
}
]
},
{
'path' : os.path.join(self.jetty_home, 'etc/jetty.xml'),
'replace' : [
{
'pattern' : '<New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"/>',
'update' : '<New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler">\n\t\t\t\t <Set name="showContexts">false</Set>\n\t\t\t </New>'
}
]
}
]
}
}
def install(self):
self.createUser('jetty', self.jetty_user_home)
self.addUserToGroup('jans', 'jetty')
self.run([paths.cmd_mkdir, '-p', self.jetty_user_home_lib])
jettyArchive, jetty_dist = self.get_jetty_info()
jettyTemp = os.path.join(jetty_dist, 'temp')
self.run([paths.cmd_mkdir, '-p', jettyTemp])
self.run([paths.cmd_chown, '-R', 'jetty:jetty', jettyTemp])
try:
self.logIt("Extracting %s into /opt/jetty" % jettyArchive)
self.run(['tar', '-xzf', jettyArchive, '-C', jetty_dist, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.logIt("Error encountered while extracting archive %s" % jettyArchive)
jettyDestinationPath = max(glob.glob(os.path.join(jetty_dist, '{}-*'.format(self.jetty_dist_string))))
self.run([paths.cmd_ln, '-sf', jettyDestinationPath, self.jetty_home])
self.run([paths.cmd_chmod, '-R', "755", "%s/bin/" % jettyDestinationPath])
self.applyChangesInFiles(self.app_custom_changes['jetty'])
self.run([paths.cmd_chown, '-R', 'jetty:jetty', jettyDestinationPath])
self.run([paths.cmd_chown, '-h', 'jetty:jetty', self.jetty_home])
self.run([paths.cmd_mkdir, '-p', self.jetty_base])
self.run([paths.cmd_chown, '-R', 'jetty:jetty', self.jetty_base])
jettyRunFolder = '/var/run/jetty'
self.run([paths.cmd_mkdir, '-p', jettyRunFolder])
self.run([paths.cmd_chmod, '-R', '775', jettyRunFolder])
self.run([paths.cmd_chgrp, '-R', 'jetty', jettyRunFolder])
self.run(['rm', '-rf', '/opt/jetty/bin/jetty.sh'])
self.copyFile("%s/system/initd/jetty.sh" % Config.staticFolder, "%s/bin/jetty.sh" % self.jetty_home)
self.run([paths.cmd_chown, '-R', 'jetty:jetty', "%s/bin/jetty.sh" % self.jetty_home])
self.run([paths.cmd_chmod, '-R', '755', "%s/bin/jetty.sh" % self.jetty_home])
def get_jetty_info(self):
self.jetty_dist_string = 'jetty-home'
# first try latest versions
jetty_archive_list = glob.glob(os.path.join(Config.distAppFolder, 'jetty-home-*.tar.gz'))
if not jetty_archive_list:
jetty_archive_list = glob.glob(os.path.join(Config.distAppFolder, 'jetty-distribution-*.tar.gz'))
self.jetty_dist_string = 'jetty-distribution'
if not jetty_archive_list:
self.logIt("Jetty archive not found in {}. Exiting...".format(Config.distAppFolder), True, True)
jettyArchive = max(jetty_archive_list)
jettyArchive_fn = os.path.basename(jettyArchive)
jetty_regex = re.search('{}-(\d*\.\d*)'.format(self.jetty_dist_string), jettyArchive_fn)
if not jetty_regex:
self.logIt("Can't determine Jetty version", True, True)
jetty_dist = '/opt/jetty-' + jetty_regex.groups()[0]
Config.templateRenderingDict['jetty_dist'] = jetty_dist
self.jetty_version_string = jetty_regex.groups()[0]
return jettyArchive, jetty_dist
def installJettyService(self, serviceConfiguration, supportCustomizations=False, supportOnlyPageCustomizations=False):
serviceName = serviceConfiguration['name']
self.logIt("Installing jetty service %s..." % serviceName)
self.get_jetty_info()
jettyServiceBase = '%s/%s' % (self.jetty_base, serviceName)
jettyModules = serviceConfiguration['jetty']['modules']
jettyModulesList = jettyModules.split(',')
self.web_app_xml_fn = os.path.join(self.jetty_base, serviceName, 'webapps', serviceName+'.xml')
jettyModulesList = [m.strip() for m in jettyModules.split(',')]
if self.jetty_dist_string == 'jetty-home':
if not 'cdi-decorate' in jettyModulesList:
jettyModulesList.append('cdi-decorate')
jettyModules = ','.join(jettyModulesList)
if base.snap:
Config.templateRenderingDict['jetty_dist'] = self.jetty_base
else:
# we need this, because this method may be called externally
jettyArchive, jetty_dist = self.get_jetty_info()
self.logIt("Preparing %s service base folders" % serviceName)
self.run([paths.cmd_mkdir, '-p', jettyServiceBase])
# Create ./ext/lib folder for custom libraries only if installed Jetty "ext" module
if "ext" in jettyModulesList:
self.run([paths.cmd_mkdir, '-p', "%s/lib/ext" % jettyServiceBase])
# Create ./custom/pages and ./custom/static folders for custom pages and static resources, only if application supports them
if supportCustomizations:
if not os.path.exists("%s/custom" % jettyServiceBase):
self.run([paths.cmd_mkdir, '-p', "%s/custom" % jettyServiceBase])
self.run([paths.cmd_mkdir, '-p', "%s/custom/pages" % jettyServiceBase])
if not supportOnlyPageCustomizations:
self.run([paths.cmd_mkdir, '-p', "%s/custom/i18n" % jettyServiceBase])
self.run([paths.cmd_mkdir, '-p', "%s/custom/static" % jettyServiceBase])
self.run([paths.cmd_mkdir, '-p', "%s/custom/libs" % jettyServiceBase])
self.logIt("Preparing %s service base configuration" % serviceName)
jettyEnv = os.environ.copy()
jettyEnv['PATH'] = '%s/bin:' % Config.jre_home + jettyEnv['PATH']
self.run([Config.cmd_java, '-jar', '%s/start.jar' % self.jetty_home, 'jetty.home=%s' % self.jetty_home, 'jetty.base=%s' % jettyServiceBase, '--add-to-start=%s' % jettyModules], None, jettyEnv)
self.run([paths.cmd_chown, '-R', 'jetty:jetty', jettyServiceBase])
# make variables of this class accesible from Config
self.update_rendering_dict()
try:
self.renderTemplateInOut(serviceName, '%s/jetty' % Config.templateFolder, '%s/jetty' % Config.outputFolder)
except:
self.logIt("Error rendering service '%s' defaults" % serviceName, True)
jettyServiceConfiguration = '%s/jetty/%s' % (Config.outputFolder, serviceName)
self.copyFile(jettyServiceConfiguration, Config.osDefault)
self.run([paths.cmd_chown, 'root:root', os.path.join(Config.osDefault, serviceName)])
# Render web eources file
try:
web_resources = '%s_web_resources.xml' % serviceName
if os.path.exists('%s/jetty/%s' % (Config.templateFolder, web_resources)):
self.renderTemplateInOut(web_resources, '%s/jetty' % Config.templateFolder, '%s/jetty' % Config.outputFolder)
self.copyFile('%s/jetty/%s' % (Config.outputFolder, web_resources), "%s/%s/webapps" % (self.jetty_base, serviceName))
except:
self.logIt("Error rendering service '%s' web_resources.xml" % serviceName, True)
# Render web context file
try:
web_context = '%s.xml' % serviceName
if os.path.exists('%s/jetty/%s' % (Config.templateFolder, web_context)):
self.renderTemplateInOut(web_context, '%s/jetty' % Config.templateFolder, '%s/jetty' % Config.outputFolder)
self.copyFile('%s/jetty/%s' % (Config.outputFolder, web_context), "%s/%s/webapps" % (self.jetty_base, serviceName))
except:
self.logIt("Error rendering service '%s' context xml" % serviceName, True)
initscript_fn = os.path.join(self.jetty_home, 'bin/jetty.sh')
self.fix_init_scripts(serviceName, initscript_fn)
if not base.snap:
tmpfiles_base = '/usr/lib/tmpfiles.d'
if Config.os_initdaemon == 'systemd' and os.path.exists(tmpfiles_base):
self.logIt("Creating 'jetty.conf' tmpfiles daemon file")
jetty_tmpfiles_src = '%s/jetty.conf.tmpfiles.d' % Config.templateFolder
jetty_tmpfiles_dst = '%s/jetty.conf' % tmpfiles_base
self.copyFile(jetty_tmpfiles_src, jetty_tmpfiles_dst)
self.run([paths.cmd_chown, 'root:root', jetty_tmpfiles_dst])
self.run([paths.cmd_chmod, '644', jetty_tmpfiles_dst])
self.copyFile(os.path.join(self.jetty_home, 'bin/jetty.sh'), os.path.join(Config.distFolder, 'scripts', serviceName), backup=False)
serviceConfiguration['installed'] = True
# don't send header to server
inifile = 'http.ini' if self.jetty_dist_string == 'jetty-home' else 'start.ini'
self.set_jetty_param(serviceName, 'jetty.httpConfig.sendServerVersion', 'false', inifile=inifile)
if base.snap:
run_dir = os.path.join(jettyServiceBase, 'run')
if not os.path.exists(run_dir):
self.run([paths.cmd_mkdir, '-p', run_dir])
self.write_webapps_xml()
def set_jetty_param(self, jettyServiceName, jetty_param, jetty_val, inifile='start.ini'):
self.logIt("Seeting jetty parameter {0}={1} for service {2}".format(jetty_param, jetty_val, jettyServiceName))
path_list = [self.jetty_base, jettyServiceName, inifile]
if inifile != 'start.ini':
path_list.insert(-1, 'start.d')
service_fn = os.path.join(*tuple(path_list))
start_ini = self.readFile(service_fn)
start_ini_list = start_ini.splitlines()
param_ln = jetty_param + '=' + jetty_val
for i, l in enumerate(start_ini_list[:]):
if jetty_param in l and l[0]=='#':
start_ini_list[i] = param_ln
break
elif l.strip().startswith(jetty_param):
start_ini_list[i] = param_ln
break
else:
start_ini_list.append(param_ln)
self.writeFile(service_fn, '\n'.join(start_ini_list), backup=False)
def calculate_aplications_memory(self, application_max_ram, jetty_app_configuration, installedComponents):
self.logIt("Calculating memory setting for applications")
allowedApplicationsMemory = {}
application_max_ram = int(application_max_ram)
application_max_ram -= len(installedComponents) * 128
retVal = True
usedRatio = 0.001
for installedComponent in installedComponents:
usedRatio += installedComponent['memory']['ratio']
ratioMultiplier = 1.0 + (1.0 - usedRatio)/usedRatio
for installedComponent in installedComponents:
allowedRatio = installedComponent['memory']['ratio'] * ratioMultiplier
allowedMemory = int(round(allowedRatio * int(application_max_ram)))
if allowedMemory > installedComponent['memory']['max_allowed_mb']:
allowedMemory = installedComponent['memory']['max_allowed_mb']
allowedApplicationsMemory[installedComponent['name']] = allowedMemory
# Iterate through all components into order to prepare all keys
for applicationName, applicationConfiguration in jetty_app_configuration.items():
if applicationName in allowedApplicationsMemory:
applicationMemory = allowedApplicationsMemory.get(applicationName)
else:
# We uses this dummy value to render template properly of not installed application
applicationMemory = 256
Config.templateRenderingDict["%s_max_mem" % applicationName] = applicationMemory
if 'jvm_heap_ration' in applicationConfiguration['memory']:
jvmHeapRation = applicationConfiguration['memory']['jvm_heap_ration']
minHeapMem = 256
maxHeapMem = int(applicationMemory * jvmHeapRation)
if maxHeapMem < minHeapMem:
minHeapMem = maxHeapMem
Config.templateRenderingDict["%s_max_heap_mem" % applicationName] = maxHeapMem
Config.templateRenderingDict["%s_min_heap_mem" % applicationName] = minHeapMem
if maxHeapMem < 256 and applicationName in allowedApplicationsMemory:
retVal = False
return retVal
def write_webapps_xml(self, jans_app_path=None, jans_apps=None):
if not jans_app_path:
jans_app_path = '/'+self.service_name
if not jans_apps:
jans_apps = self.service_name+'.war'
web_apps_xml_fn = os.path.join(Config.templateFolder, 'jetty/jans-app.xml')
web_apps_xml = self.readFile(web_apps_xml_fn)
web_apps_xml = self.fomatWithDict(web_apps_xml, {'jans_app_path': jans_app_path, 'jans_apps': jans_apps})
self.writeFile(self.web_app_xml_fn, web_apps_xml)
def calculate_selected_aplications_memory(self):
Config.pbar.progress("jans", "Calculating application memory")
installedComponents = []
# Jetty apps
for config_var, service in [('installOxAuth', 'jans-auth'),
('installScimServer', 'jans-scim'),
('installFido2', 'jans-fido2'),
('installConfigApi', 'jans-config-api'),
('installEleven', 'jans-eleven')]:
if Config.get(config_var) and service in self.jetty_app_configuration:
installedComponents.append(self.jetty_app_configuration[service])
return self.calculate_aplications_memory(Config.application_max_ram, self.jetty_app_configuration, installedComponents)
def war_for_jetty10(self, war_file):
if self.jetty_dist_string == 'jetty-home':
tmp_dir = '/tmp/war_{}'.format(os.urandom(6).hex())
shutil.unpack_archive(war_file, tmp_dir, format='zip')
jetty_env_fn = os.path.join(tmp_dir, 'WEB-INF/jetty-env.xml')
tree = ET.parse(jetty_env_fn)
root = tree.getroot()
for new in root.findall("New"):
for arg in new.findall("Arg"):
for ref in arg.findall("Ref"):
if ref.attrib.get('id') == 'webAppCtx':
ref.set('refid', 'webAppCtx')
ref.attrib.pop('id')
jetty_web_fn = os.path.join(tmp_dir, 'WEB-INF/jetty-web.xml')
if os.path.exists(jetty_web_fn):
os.remove(jetty_web_fn)
xml_header = '<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "https://www.eclipse.org/jetty/configure_{}.dtd">\n\n'.format(self.jetty_version_string.replace('.', '_'))
with open(jetty_env_fn, 'wb') as f:
f.write(b'<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(xml_header.encode())
f.write(ET.tostring(root,method='xml'))
tmp_war_fn = '/tmp/{}.war'.format(os.urandom(6).hex())
shutil.make_archive(tmp_war_fn, format='zip', root_dir=tmp_dir)
shutil.rmtree(tmp_dir)
os.remove(war_file)
shutil.move(tmp_war_fn+'.zip', war_file)
def add_extra_class(self, class_path, xml_fn=None):
if not xml_fn:
xml_fn = self.web_app_xml_fn
tree = ET.parse(xml_fn)
root = tree.getroot()
for app_set in root.findall("Set"):
if app_set.get('name') == 'extraClasspath' and app_set.text.endswith(os.path.basename(class_path)):
break
else:
child = ET.Element("Set")
child.set('name', 'extraClasspath')
child.text = class_path
root.append(child)
with open(xml_fn, 'wb') as f:
f.write(b'<?xml version="1.0" encoding="ISO-8859-1"?>\n')
f.write(b'<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">\n')
f.write(ET.tostring(root, method='xml'))
def installed(self):
return os.path.exists(os.path.join(Config.jetty_base, self.service_name, 'start.ini')) or os.path.exists(os.path.join(Config.jetty_base, self.service_name, 'start.d/server.ini'))
| 47.048969
| 200
| 0.616653
|
7094bed02cbc481b3bd2fe9697c5d3b75c966246
| 4,940
|
py
|
Python
|
Ryven/packages/auto_generated/sre_parse/nodes.py
|
tfroehlich82/Ryven
|
cb57c91d13949712844a4410a9302c4a90d28dcd
|
[
"MIT"
] | 2,872
|
2020-07-01T09:06:34.000Z
|
2022-03-31T05:52:32.000Z
|
Ryven/packages/auto_generated/sre_parse/nodes.py
|
dhf327/Ryven
|
a11e361528d982a9dd3c489dd536f8b05ffd56e1
|
[
"MIT"
] | 59
|
2020-06-28T12:50:50.000Z
|
2022-03-27T19:07:54.000Z
|
Ryven/packages/auto_generated/sre_parse/nodes.py
|
dhf327/Ryven
|
a11e361528d982a9dd3c489dd536f8b05ffd56e1
|
[
"MIT"
] | 339
|
2020-07-05T04:36:20.000Z
|
2022-03-24T07:25:18.000Z
|
from NENV import *
import sre_parse
class NodeBase(Node):
pass
class _Class_Escape_Node(NodeBase):
"""
"""
title = '_class_escape'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='escape'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._class_escape(self.input(0), self.input(1)))
class _Escape_Node(NodeBase):
"""
"""
title = '_escape'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='escape'),
NodeInputBP(label='state'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._escape(self.input(0), self.input(1), self.input(2)))
class _Parse_Node(NodeBase):
"""
"""
title = '_parse'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='state'),
NodeInputBP(label='verbose'),
NodeInputBP(label='nested'),
NodeInputBP(label='first', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._parse(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4)))
class _Parse_Flags_Node(NodeBase):
"""
"""
title = '_parse_flags'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='state'),
NodeInputBP(label='char'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._parse_flags(self.input(0), self.input(1), self.input(2)))
class _Parse_Sub_Node(NodeBase):
"""
"""
title = '_parse_sub'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='state'),
NodeInputBP(label='verbose'),
NodeInputBP(label='nested'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._parse_sub(self.input(0), self.input(1), self.input(2), self.input(3)))
class _Uniq_Node(NodeBase):
"""
"""
title = '_uniq'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='items'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse._uniq(self.input(0)))
class Expand_Template_Node(NodeBase):
"""
"""
title = 'expand_template'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='template'),
NodeInputBP(label='match'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse.expand_template(self.input(0), self.input(1)))
class Fix_Flags_Node(NodeBase):
"""
"""
title = 'fix_flags'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='src'),
NodeInputBP(label='flags'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse.fix_flags(self.input(0), self.input(1)))
class Parse_Node(NodeBase):
"""
"""
title = 'parse'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='str'),
NodeInputBP(label='flags', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='state', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse.parse(self.input(0), self.input(1), self.input(2)))
class Parse_Template_Node(NodeBase):
"""
"""
title = 'parse_template'
type_ = 'sre_parse'
init_inputs = [
NodeInputBP(label='source'),
NodeInputBP(label='state'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, sre_parse.parse_template(self.input(0), self.input(1)))
export_nodes(
_Class_Escape_Node,
_Escape_Node,
_Parse_Node,
_Parse_Flags_Node,
_Parse_Sub_Node,
_Uniq_Node,
Expand_Template_Node,
Fix_Flags_Node,
Parse_Node,
Parse_Template_Node,
)
| 22.352941
| 123
| 0.580162
|
87bee7c35786a8a9594726ebc2004116383c2a9e
| 1,991
|
py
|
Python
|
doc/tutorial/modes_solution_1.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | 1
|
2020-12-30T19:12:52.000Z
|
2020-12-30T19:12:52.000Z
|
doc/tutorial/modes_solution_1.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
doc/tutorial/modes_solution_1.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | 1
|
2020-08-15T17:09:10.000Z
|
2020-08-15T17:09:10.000Z
|
#!/usr/bin/env python
# Theano tutorial
# Solution to Exercise in section 'Configuration Settings and Compiling Modes'
import numpy as np
import theano
import theano.tensor as tt
theano.config.floatX = 'float32'
rng = np.random
N = 400
feats = 784
D = (rng.randn(N, feats).astype(theano.config.floatX),
rng.randint(size=N, low=0, high=2).astype(theano.config.floatX))
training_steps = 10000
# Declare Theano symbolic variables
x = tt.matrix("x")
y = tt.vector("y")
w = theano.shared(rng.randn(feats).astype(theano.config.floatX), name="w")
b = theano.shared(np.asarray(0., dtype=theano.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy
cost = tt.cast(xent.mean(), 'float32') + \
0.01 * (w ** 2).sum() # The cost to optimize
gw, gb = tt.grad(cost, [w, b])
# Compile expressions to functions
train = theano.function(
inputs=[x, y],
outputs=[prediction, xent],
updates={w: w - 0.01 * gw, b: b - 0.01 * gb},
name="train")
predict = theano.function(inputs=[x], outputs=prediction,
name="predict")
if any([x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in
train.maker.fgraph.toposort()]):
print('Used the cpu')
elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
train.maker.fgraph.toposort()]):
print('Used the gpu')
else:
print('ERROR, not able to tell if theano used the cpu or the gpu')
print(train.maker.fgraph.toposort())
for i in range(training_steps):
pred, err = train(D[0], D[1])
#print "Final model:"
#print w.get_value(), b.get_value()
print("target values for D")
print(D[1])
print("prediction on D")
print(predict(D[0]))
| 29.716418
| 78
| 0.655952
|
5f09b985c3c53792f2f43a261dcb0f4756e56220
| 2,777
|
py
|
Python
|
anima/env/mayaEnv/hierarchy_instancer.py
|
MehmetErer/anima
|
f92ae599b5a4c181fc8e131a9ccdde537e635303
|
[
"MIT"
] | 101
|
2015-02-08T22:20:11.000Z
|
2022-03-21T18:56:42.000Z
|
anima/env/mayaEnv/hierarchy_instancer.py
|
Khosiyat/anima
|
f631c08400547f49ac5f1feeb730f22c255eb771
|
[
"MIT"
] | 23
|
2016-11-30T08:33:21.000Z
|
2021-01-26T12:11:12.000Z
|
anima/env/mayaEnv/hierarchy_instancer.py
|
Khosiyat/anima
|
f631c08400547f49ac5f1feeb730f22c255eb771
|
[
"MIT"
] | 27
|
2015-01-03T06:49:45.000Z
|
2021-12-28T03:30:54.000Z
|
# -*- coding: utf-8 -*-
"""
hierarchy_instancer by
v10.6.17
Given a group, it instances the hierarchy. It is written to avoid having
instanced groups in the scene. Thus multi instancing is avoided.
ChangeLog:
----------
10.6.17
- the script now works with single object hierarchies
- after the script finishes it job, it selects the created top most node
10.6.12
- initial working version
"""
import pymel.core as pm
__version__ = '10.6.12'
class HierarchyInstancer(object):
"""the hierarchy object
"""
def __init__(self):
self._instantiable_types = []
self.add_instantiable(pm.nodetypes.Mesh)
self.add_instantiable(pm.nodetypes.NurbsCurve)
self.add_instantiable(pm.nodetypes.NurbsSurface)
self.add_instantiable(pm.nodetypes.Subdiv)
self.add_instantiable(pm.nodetypes.Locator)
def add_instantiable(self, node_type):
"""Adds new instantiable node type, the type should be a
pymel.core.nodeType class
"""
self._instantiable_types.append(node_type)
def walk_hierarchy(self, node):
"""for the given dag node, walks through the hierarchy
"""
assert(isinstance(node, pm.nodetypes.Transform))
nodes = []
for node in node.getChildren():
# try to get children if it is a transform node
if isinstance(node, pm.nodetypes.Transform):
child_nodes = self.walk_hierarchy(node)
nodes.append(node)
nodes += child_nodes
return nodes
def instance(self, source_transform_node):
"""instances the given nodes hierarchy
"""
# duplicate the given node
# then replace the instantiable nodes with instances
# find instantiable nodes in the node and dupNode
source_hierarchy = self.walk_hierarchy(source_transform_node)
# if there is no node in the sourceHierarchy just return
# the instance of the given node
if len(source_hierarchy) < 1:
dup_node = pm.duplicate(source_transform_node, ilf=1, rc=True)[0]
pm.select(dup_node)
return dup_node
dup_node = pm.duplicate(source_transform_node, rc=True)[0]
dup_hierarchy = self.walk_hierarchy(dup_node)
for i, node in enumerate(dup_hierarchy):
shape = node.getShape()
if shape is not None and isinstance(shape, tuple(self._instantiable_types)):
# instance the corresponding sourceNode
source_node = source_hierarchy[i]
new_instance_node = pm.duplicate(source_node, ilf=True)[0]
pm.parent(new_instance_node, node.getParent(), r=False)
pm.delete(node)
return dup_node
| 29.542553
| 88
| 0.647101
|
c802d959a43a9179c38f2265cda3b2d327e9aeac
| 550
|
py
|
Python
|
pbs_util/qdel_name.py
|
Clyde-fare/pbs_util
|
1c1ed93773a9a020f9216056d2ae49cc0cd589d1
|
[
"BSD-3-Clause"
] | 1
|
2015-08-24T02:48:00.000Z
|
2015-08-24T02:48:00.000Z
|
pbs_util/qdel_name.py
|
Clyde-fare/pbs_util
|
1c1ed93773a9a020f9216056d2ae49cc0cd589d1
|
[
"BSD-3-Clause"
] | null | null | null |
pbs_util/qdel_name.py
|
Clyde-fare/pbs_util
|
1c1ed93773a9a020f9216056d2ae49cc0cd589d1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import argparse
import pbs
def kill_all_jobs_named(username, name):
for job in pbs.qstat(user=username):
if job.name.find(name) >= 0:
print 'Killing ', job
pbs.qdel(job)
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Kill all jobs which contain name as a substring.")
parser.add_argument('name')
args = parser.parse_args(argv)
kill_all_jobs_named(os.getenv('USER'), args.name)
if __name__ == "__main__":
main()
| 21.153846
| 100
| 0.661818
|
2057a56d8f76e815446912e9e5d782798c0af940
| 555
|
py
|
Python
|
ch2_5/blender01.py
|
o-kei/design-computing-aij
|
954b46fb5f2192ab79fc003a2ca3a259e41dc7a4
|
[
"MIT"
] | 11
|
2017-11-11T05:09:45.000Z
|
2022-02-14T05:01:22.000Z
|
ch2_5/blender01.py
|
o-kei/design-computing-aij
|
954b46fb5f2192ab79fc003a2ca3a259e41dc7a4
|
[
"MIT"
] | 3
|
2017-02-09T18:20:25.000Z
|
2017-11-10T01:32:40.000Z
|
ch2_5/blender01.py
|
o-kei/design-computing-aij
|
954b46fb5f2192ab79fc003a2ca3a259e41dc7a4
|
[
"MIT"
] | 6
|
2016-12-17T03:06:57.000Z
|
2021-04-28T15:37:06.000Z
|
import bpy
bpy.ops.mesh.primitive_circle_add(location=(5.0, 5.0, 0.0))
bpy.ops.mesh.primitive_cone_add(location=(0.0, 5.0, 0.0))
bpy.ops.mesh.primitive_cube_add(location=(-5.0, 5.0, 0.0))
bpy.ops.mesh.primitive_cylinder_add(location=(5.0, 0.0, 0.0))
bpy.ops.mesh.primitive_grid_add(location=(0.0, 0.0, 0.0))
bpy.ops.mesh.primitive_ico_sphere_add(location=(-5.0, 0.0, 0.0))
bpy.ops.mesh.primitive_monkey_add(location=(5.0, -5.0, 0.0))
bpy.ops.mesh.primitive_plane_add(location=(0.0, -5.0, 0.0))
bpy.ops.mesh.primitive_torus_add(location=(-5.0, -5.0, 0.0))
| 50.454545
| 64
| 0.726126
|
21500a4737178a81728ea036558b7591195c2be2
| 4,058
|
py
|
Python
|
src/tasks/templatetags/task_extras.py
|
Stdubic/Track
|
853df13178967ab9b5c1918d6d56fa7fe2831b0f
|
[
"MIT"
] | 1
|
2015-09-14T19:54:56.000Z
|
2015-09-14T19:54:56.000Z
|
src/tasks/templatetags/task_extras.py
|
Stdubic/Track
|
853df13178967ab9b5c1918d6d56fa7fe2831b0f
|
[
"MIT"
] | null | null | null |
src/tasks/templatetags/task_extras.py
|
Stdubic/Track
|
853df13178967ab9b5c1918d6d56fa7fe2831b0f
|
[
"MIT"
] | null | null | null |
'''
Created on Jan 2, 2015
@author: Milos
'''
from django import template
from django.core.urlresolvers import reverse
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def commnets(milestone):
"""Filtrira milestone tako da nadje samo broj komentara za svaki pojedinacni"""
comments = milestone.event_set.filter(event_kind="K")
size = comments.count()
return size
@register.filter
def closedtasks(milestone):
"""Filtrira taskove, uzimajuci samo one koji su zatvoreni i vraca njihov broj"""
closed_tasks = milestone.task_set.filter(state_kind="Z")
size = closed_tasks.count()
return size
@register.filter
def percentage(milestone):
"""Filter koji racuina procentualno koliko je posla uradjeno"""
closed_tasks = milestone.task_set.filter(state_kind="Z")
part = closed_tasks.count()
all_tasks = milestone.task_set.all()
whole = all_tasks.count()
if(part != 0 and whole != 0):
return round(100 * float(part)/float(whole),2)
return 0
@register.filter
def showname(keyvalue):
"""filter koji za neku od prosledjenih kljuceva vraca vrednost"""
key_dict ={'P':'Accepted','C': 'Created','Z': 'Closed','O': 'On Wait'}
return key_dict[keyvalue]
@register.filter
def paintborder(priority):
"""filter koji dodaje boju za vaznost"""
key_dict ={'C':'#ce2b37','H': '#ee6c3a','M': '#41783f','L': '#3d70b6'}
return key_dict[priority]
@register.filter
def event_glyphicon_style(event):
return {'K':"glyphicon-comment",
'C':"glyphicon-record",
'S':"glyphicon-cog",
'A':"glyphicon-plus-sign",
'P':"glyphicon-exclamation-sign",
'R':"glyphicon-ok-sign",
}[event.event_kind]
@register.filter
def task_priority_style(task):
if task.state_kind == 'Z':
return ""
style_prefix = "bs-callout-"
return style_prefix + {'L':"success",
'M':"info",
'H':"warning",
'C':"danger",
}[task.priority_lvl]
@register.filter
def event_summary(event):
if event.requirement_task:
if hasattr(event.requirement_task, 'task'):
summary="Task "
elif hasattr(event.requirement_task, 'requirement'):
summary="Requirement "
else:
summary=''
summary += '"'+event.requirement_task.name+'": '
elif event.milestone:
summary = '"Milestone "'+event.milestone.name+'": '
else:
summary = ''
if event.event_kind == 'K':
summary += event.comment.content
elif event.event_kind == 'C':
summary += event.commit.message
elif event.event_kind == 'S':
summary += event.statechange.getstate()
max_length = 100
if len(summary) > max_length:
summary = summary[:max_length-3]+"..."
else:
summary = summary[:max_length]
return summary
def do_escape(to_escape, autoescape):
return conditional_escape(to_escape) if autoescape else to_escape
@register.filter
def event_user(event, autoescape=None):
if event.event_kind == 'C':
if event.commit.committer_user:
user = event.commit.committer_user
ret = """<a href="{author_url}"><span class="glyphicon glyphicon-user"></span> {user_name}</a>""".format(author_url=reverse('author', kwargs={'pk':user.pk}), user_name=do_escape(user.username, autoescape))
else:
ret = """<span class="glyphicon glyphicon-user"></span> {user_name}""".format(user_name=do_escape(event.commit.committer_name, autoescape))
else:
ret = """<a href="{author_url}"><span class="glyphicon glyphicon-user"></span> {user_name}</a>""".format(author_url=reverse('author', kwargs={'pk':event.event_user.pk}), user_name=do_escape(event.event_user.username, autoescape))
return mark_safe(ret)
| 30.977099
| 237
| 0.627649
|
dd5223e6d7b077aafc7a9b58df6980df83bc9fc9
| 2,930
|
py
|
Python
|
gym_arc/envs/rendering.py
|
MitrofanovDmitry/gym-arc
|
e39f320d4bb153881f2d36f6bf3f5591a0225e4f
|
[
"MIT"
] | 1
|
2020-04-04T17:47:03.000Z
|
2020-04-04T17:47:03.000Z
|
gym_arc/envs/rendering.py
|
dimbasamster/gym-arc
|
e39f320d4bb153881f2d36f6bf3f5591a0225e4f
|
[
"MIT"
] | null | null | null |
gym_arc/envs/rendering.py
|
dimbasamster/gym-arc
|
e39f320d4bb153881f2d36f6bf3f5591a0225e4f
|
[
"MIT"
] | 1
|
2020-05-07T09:38:24.000Z
|
2020-05-07T09:38:24.000Z
|
from __future__ import division
import os
import six
import sys
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
from gym import error
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
''')
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
import math
import numpy as np
class SimpleImageViewer(object):
def __init__(self, display=None, maxwidth=500):
self.window = None
self.isopen = False
self.display = display
self.maxwidth = maxwidth
def imshow(self, arr):
if self.window is None:
height, width, _channels = arr.shape
if width > self.maxwidth:
scale = self.maxwidth / width
width = int(scale * width)
height = int(scale * height)
self.window = pyglet.window.Window(width=width, height=height,
display=self.display, vsync=False, resizable=True)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
self.width = width
self.height = height
@self.window.event
def on_close():
self.isopen = False
assert len(arr.shape) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(arr.shape[1], arr.shape[0],
'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
texture = image.get_texture()
texture.width = self.width
texture.height = self.height
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
texture.blit(0, 0) # draw
self.window.flip()
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def __del__(self):
self.close()
| 34.069767
| 115
| 0.617065
|
e923ab60d86655d07b9c76e73f530b3a88ff92b7
| 2,925
|
py
|
Python
|
src/recipes/models.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | null | null | null |
src/recipes/models.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | 4
|
2020-06-06T01:17:45.000Z
|
2021-09-08T02:04:22.000Z
|
src/recipes/models.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | null | null | null |
from random import randint
from django.db import models
from django.contrib.auth import get_user_model
from django.db.models import Q
from django_measurement.models import MeasurementField
from measurement.measures import Volume
from model_utils.models import TimeStampedModel
User = get_user_model()
class Recipe(TimeStampedModel):
title = models.CharField('Title', max_length=255, help_text='Title of the recipe.')
description = models.TextField('Description', blank=True, null=True)
owner = models.ForeignKey(User, blank=True, null=True)
expected_production = MeasurementField(verbose_name='Expected Production', measurement=Volume, blank=True, null=True)
og = models.DecimalField('OG', max_digits=4, decimal_places=3, blank=True, null=True, help_text='Original Gravity')
fg = models.DecimalField('FG', max_digits=4, decimal_places=3, blank=True, null=True, help_text='Final Gravity')
ibu = models.PositiveIntegerField('IBU', blank=True, null=True, help_text='International Bitterness Unit')
srm = models.PositiveIntegerField('SRV', blank=True, null=True, help_text='Standard Reference Method')
abv = models.DecimalField('ABV', max_digits=4, decimal_places=2, blank=True, null=True,
help_text='Alcohol by Volume')
steps = models.TextField('Steps')
observations = models.TextField('Observations', blank=True, null=True)
def __str__(self):
return self.title
@staticmethod
def get_random():
recipes = Recipe.objects.filter(owner=None)
recipes_ids = recipes.values_list('id', flat=True)
if not len(recipes_ids):
return None
index = randint(0, recipes.count() - 1)
return recipes[index]
@staticmethod
def get_suggestion(user):
from src.ingredients.models import UserIngredient
user_ingredients = UserIngredient.objects.filter(user=user)
if not user_ingredients.count():
return None
user_ingredients_ids = sorted(list(user_ingredients.values_list('ingredient_type', flat=True)))
found_recipes = Recipe.objects.filter(Q(owner=None) | Q(owner=user)).filter(ingredients__ingredient_type__in=user_ingredients_ids)
suggestions = []
for recipe in found_recipes:
found_ingredients_ids = sorted(list(recipe.ingredients.all().values_list('ingredient_type', flat=True)))
if len(found_ingredients_ids) > len(user_ingredients_ids):
continue
if user_ingredients_ids == found_ingredients_ids:
suggestions.append(recipe)
continue
if not set(found_ingredients_ids).difference(set(user_ingredients_ids)):
suggestions.append(recipe)
continue
if not suggestions:
return None
index = randint(0, len(suggestions) - 1)
return suggestions[index]
| 39.527027
| 138
| 0.694701
|
00353dbe7ab5ee3f9429770c0b8768f9b5820550
| 48,772
|
py
|
Python
|
tools/project-creator/Python2.6.6/Lib/subprocess.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 81
|
2017-03-13T08:24:01.000Z
|
2021-04-02T09:48:38.000Z
|
tools/project-creator/Python2.6.6/Lib/subprocess.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 6
|
2017-04-30T08:36:55.000Z
|
2017-09-22T01:37:28.000Z
|
tools/project-creator/Python2.6.6/Lib/subprocess.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 41
|
2017-03-18T14:11:58.000Z
|
2021-04-14T05:06:09.000Z
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc != None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
from _subprocess import CREATE_NEW_CONSOLE
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
if mswindows:
__all__.append("CREATE_NEW_CONSOLE")
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# True/False does not exist on 2.2.0
#try:
# False
#except NameError:
# False = 0
# True = 1
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (_subprocess.GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
else:
raise ValueError("Only SIGTERM is supported on Windows")
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
try:
try:
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread is not None:
os.dup2(p2cread, 0)
if c2pwrite is not None:
os.dup2(c2pwrite, 1)
if errwrite is not None:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
if p2cread is not None and p2cread not in (0,):
os.close(p2cread)
if c2pwrite is not None and c2pwrite not in (p2cread, 1):
os.close(c2pwrite)
if errwrite is not None and errwrite not in (p2cread, c2pwrite, 2):
os.close(errwrite)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
_eintr_retry_call(os.waitpid, self.pid, 0)
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
chunk = input[input_offset : input_offset + 512]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| 36.397015
| 129
| 0.551833
|
01195f5971e5a8559ed8a74584eac654976a24c4
| 4,513
|
py
|
Python
|
jacdac/packet.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-15T21:30:36.000Z
|
2022-02-15T21:30:36.000Z
|
jacdac/packet.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | null | null | null |
jacdac/packet.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-08T19:32:45.000Z
|
2022-02-08T19:32:45.000Z
|
from typing import Any, Optional
from .pack import PackType, jdpack, jdunpack
from .constants import *
from .system.constants import JD_CMD_COMMAND_NOT_IMPLEMENTED
import jacdac.util as util
class JDPacket:
"""A Jacdac packet
"""
def __init__(self, *, cmd: int = None, size: int = 0, frombytes: bytes = None, data: bytes = None, sender: Any = None) -> None:
self.timestamp = util.now()
if frombytes is None:
self._header = bytearray(JD_SERIAL_HEADER_SIZE)
self.data = bytearray(data or size)
else:
self._header = bytearray(frombytes[0:JD_SERIAL_HEADER_SIZE])
self.data = bytearray(frombytes[JD_SERIAL_HEADER_SIZE:])
if cmd is not None:
self.service_command = cmd
self.sender = sender
@staticmethod
def packed(cmd: int, fmt: str, *args: PackType):
return JDPacket(cmd=cmd, data=jdpack(fmt, *args))
def unpack(self, fmt: str):
return jdunpack(self.data, fmt)
@property
def service_command(self):
return util.u16(self._header, 14)
@service_command.setter
def service_command(self, cmd: int):
util.set_u16(self._header, 14, cmd)
@property
def device_id(self) -> str:
return util.buf2hex(self._header[4:12])
@device_id.setter
def device_id(self, id_str: str):
id = util.hex2buf(id_str)
if len(id) != 8:
raise ValueError()
self._header[4:12] = id
@property
def packet_flags(self):
return self._header[3]
@property
def multicommand_class(self):
if self.packet_flags & JD_FRAME_FLAG_IDENTIFIER_IS_SERVICE_CLASS:
return util.u32(self._header, 4)
else:
return None
@property
def size(self):
return self._header[12]
@property
def requires_ack(self):
return (self.packet_flags & JD_FRAME_FLAG_ACK_REQUESTED) != 0
@requires_ack.setter
def requires_ack(self, val: bool):
if val != self.requires_ack:
self._header[3] ^= JD_FRAME_FLAG_ACK_REQUESTED
@property
def service_index(self):
return self._header[13] & JD_SERVICE_INDEX_MASK
@property
def is_regular_service(self):
return self.service_index <= 58
@service_index.setter
def service_index(self, val: Optional[int]):
if val is None:
raise ValueError("service_index not set")
self._header[13] = (self._header[13] & JD_SERVICE_INDEX_INV_MASK) | val
@property
def crc(self):
return util.u16(self._header, 0)
@property
def is_event(self):
return self.is_report and self.is_regular_service and (self.service_command & CMD_EVENT_MASK) != 0
@property
def event_code(self):
assert self.is_event
return self.service_command & CMD_EVENT_CODE_MASK
@property
def event_counter(self):
assert self.is_event
return (self.service_command >> CMD_EVENT_COUNTER_POS) & CMD_EVENT_COUNTER_MASK
@property
def is_reg_set(self):
return self.is_regular_service and self.service_command >> 12 == CMD_SET_REG >> 12
@property
def is_reg_get(self):
return self.is_regular_service and self.service_command >> 12 == CMD_GET_REG >> 12
@property
def reg_code(self):
return self.service_command & CMD_REG_MASK
@property
def data(self):
return self._data
@data.setter
def data(self, buf: bytearray):
if len(buf) > JD_SERIAL_MAX_PAYLOAD_SIZE:
raise ValueError("Too big")
self._header[12] = len(buf)
self._data = buf
@property
def is_command(self):
return (self.packet_flags & JD_FRAME_FLAG_COMMAND) != 0
@property
def is_report(self):
return (self.packet_flags & JD_FRAME_FLAG_COMMAND) == 0
def to_string(self):
msg = "{}/{}[{}]: {} sz={}".format(
util.short_id(self._header[4:12]),
self.service_index,
self.packet_flags,
util.hex_num(self.service_command, 4),
self.size)
if self.size < 20:
msg += ": " + util.buf2hex(self.data)
else:
msg += ": " + util.buf2hex(self.data[0:20]) + "..."
return msg
def __str__(self):
return "<JDPacket {}>".format(self.to_string())
def not_implemented(self):
return JDPacket.packed(JD_CMD_COMMAND_NOT_IMPLEMENTED, "u16 u16", self.service_command, self.crc)
| 28.929487
| 131
| 0.62885
|
564a50a2fba0a95f699b65fccdbcd19dc6c8b33e
| 26,420
|
py
|
Python
|
assignments/textureHist.py
|
Upward-Spiral-Science/claritycontrol
|
3da44a35f4eb8746c408ad34e7f433d14c031323
|
[
"Apache-2.0"
] | 2
|
2016-02-04T20:32:20.000Z
|
2016-02-21T15:44:01.000Z
|
code/textureHist.py
|
Upward-Spiral-Science/claritycontrol
|
3da44a35f4eb8746c408ad34e7f433d14c031323
|
[
"Apache-2.0"
] | 6
|
2016-02-04T20:24:34.000Z
|
2016-04-28T10:08:32.000Z
|
code/textureHist.py
|
Upward-Spiral-Science/claritycontrol
|
3da44a35f4eb8746c408ad34e7f433d14c031323
|
[
"Apache-2.0"
] | null | null | null |
#IMPORTANT: THE ABOVE PORTION IS A SCRIPT FROM SOLEM'S VISION BLOG
# http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
import SimpleITK as sitk
import clarity as cl # I wrote this module for easier operations on data
import clarity.resources as rs
import csv,gc # garbage memory collection :)
import numpy as np
import matplotlib.pyplot as plt
import jgraph as ig
from ipywidgets import interact, fixed
from IPython.display import clear_output
def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):
# Create a figure with two subplots and the specified size.
plt.subplots(1,2,figsize=(10,8))
# Draw the fixed image in the first subplot.
plt.subplot(1,2,1)
plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('fixed image')
plt.axis('off')
# Draw the moving image in the second subplot.
plt.subplot(1,2,2)
plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('moving image')
plt.axis('off')
plt.show()
# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending
# of an image stack of two images that occupy the same physical space.
def display_images_with_alpha(image_z, alpha, fixed, moving):
img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z]
plt.imshow(sitk.GetArrayFromImage(img),cmap=plt.cm.Greys_r);
plt.axis('off')
plt.show()
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations
del metric_values
del multires_iterations
# Close figure, we don't want to get a duplicate of the plot latter on.
plt.close()
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations
metric_values.append(registration_method.GetMetricValue())
# Clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# Plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number',fontsize=12)
plt.ylabel('Metric Value',fontsize=12)
plt.show()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
def histeq(im,nbr_bins=256):
#get image histogram
imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)
cdf = imhist.cumsum() #cumulative distribution function
cdf = 255 * cdf / cdf[-1] #normalize
#use linear interpolation of cdf to find new pixel values
im2 = interp(im.flatten(),bins[:-1],cdf)
return im2.reshape(im.shape), cdf
from PIL import Image
from numpy import *
import nibabel as nb
im = nb.load('../data/raw/Fear199.hdr')
im = im.get_data()
img = im[:,:,:,0]
im2,cdf = histeq(img)
print im
print im2
nb.save(img,'../data/raw/HistFear199.hdr')
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
| 197.164179
| 880
| 0.094663
|
a4830359bd5426aa1ab19d657d47cac0e2a69ea1
| 5,278
|
py
|
Python
|
query_builder.py
|
filchyboy/gol
|
e2730152c36fb085acb63adde83216a9da083d7b
|
[
"MIT"
] | null | null | null |
query_builder.py
|
filchyboy/gol
|
e2730152c36fb085acb63adde83216a9da083d7b
|
[
"MIT"
] | null | null | null |
query_builder.py
|
filchyboy/gol
|
e2730152c36fb085acb63adde83216a9da083d7b
|
[
"MIT"
] | null | null | null |
# Query Builder
# length = 625
# for each in range(length):
# print(f"c{each} INT NOT NULL,")
# string = ""
# for each in range(length):
# string = string + "c" + str(each) + ", "
# print(string)
# string2 = ""
# for each in range(length):
# string2 = string2 + "%s, "
# print(string2)
list = ["Date", "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11", "c12", "c13", "c14", "c15", "c16", "c17", "c18", "c19", "c20", "c21", "c22", "c23", "c24", "c25", "c26", "c27", "c28", "c29", "c30", "c31", "c32", "c33", "c34", "c35", "c36", "c37", "c38", "c39", "c40", "c41", "c42", "c43", "c44", "c45", "c46", "c47", "c48", "c49", "c50", "c51", "c52", "c53", "c54", "c55", "c56", "c57", "c58", "c59", "c60", "c61", "c62", "c63", "c64", "c65", "c66", "c67", "c68", "c69", "c70", "c71", "c72", "c73", "c74", "c75", "c76", "c77", "c78", "c79", "c80", "c81", "c82", "c83", "c84", "c85", "c86", "c87", "c88", "c89", "c90", "c91", "c92", "c93", "c94", "c95", "c96", "c97", "c98", "c99", "c100", "c101", "c102", "c103", "c104", "c105", "c106", "c107", "c108", "c109", "c110", "c111", "c112", "c113", "c114", "c115", "c116", "c117", "c118", "c119", "c120", "c121", "c122", "c123", "c124", "c125", "c126", "c127", "c128", "c129", "c130", "c131", "c132", "c133", "c134", "c135", "c136", "c137", "c138", "c139", "c140", "c141", "c142", "c143", "c144", "c145", "c146", "c147", "c148", "c149", "c150", "c151", "c152", "c153", "c154", "c155", "c156", "c157", "c158", "c159", "c160", "c161", "c162", "c163", "c164", "c165", "c166", "c167", "c168", "c169", "c170", "c171", "c172", "c173", "c174", "c175", "c176", "c177", "c178", "c179", "c180", "c181", "c182", "c183", "c184", "c185", "c186", "c187", "c188", "c189", "c190", "c191", "c192", "c193", "c194", "c195", "c196", "c197", "c198", "c199", "c200", "c201", "c202", "c203", "c204", "c205", "c206", "c207", "c208", "c209", "c210", "c211", "c212", "c213", "c214", "c215", "c216", "c217", "c218", "c219", "c220", "c221", "c222", "c223", "c224", "c225", "c226", "c227", "c228", "c229", "c230", "c231", "c232", "c233", "c234", "c235", "c236", "c237", "c238", "c239", "c240", "c241", "c242", "c243", "c244", "c245", "c246", "c247", "c248", "c249", "c250", "c251", "c252", "c253", "c254", "c255", "c256", "c257", "c258", "c259", "c260", "c261", "c262", "c263", "c264", "c265", "c266", "c267", "c268", "c269", "c270", "c271", "c272", "c273", "c274", "c275", "c276", "c277", "c278", "c279", "c280", "c281", "c282", "c283", "c284", "c285", "c286", "c287", "c288", "c289", "c290", "c291", "c292", "c293", "c294", "c295", "c296", "c297", "c298", "c299", "c300", "c301", "c302", "c303", "c304", "c305", "c306", "c307", "c308", "c309", "c310", "c311", "c312", "c313", "c314", "c315", "c316", "c317", "c318", "c319", "c320", "c321", "c322", "c323", "c324", "c325", "c326", "c327", "c328", "c329", "c330", "c331", "c332", "c333", "c334", "c335", "c336", "c337", "c338", "c339", "c340", "c341", "c342", "c343", "c344", "c345", "c346", "c347", "c348", "c349", "c350", "c351", "c352", "c353", "c354", "c355", "c356", "c357", "c358", "c359", "c360", "c361", "c362", "c363", "c364", "c365", "c366", "c367", "c368", "c369", "c370", "c371", "c372", "c373", "c374", "c375", "c376", "c377", "c378", "c379", "c380", "c381", "c382", "c383", "c384", "c385", "c386", "c387", "c388", "c389", "c390", "c391", "c392", "c393", "c394", "c395", "c396", "c397", "c398", "c399", "c400", "c401", "c402", "c403", "c404", "c405", "c406", "c407", "c408", "c409", "c410", "c411", "c412", "c413", "c414", "c415", "c416", "c417", "c418", "c419", "c420", "c421", "c422", "c423", "c424", "c425", "c426", "c427", "c428", "c429", "c430", "c431", "c432", "c433", "c434", "c435", "c436", "c437", "c438", "c439", "c440", "c441", "c442", "c443", "c444", "c445", "c446", "c447", "c448", "c449", "c450", "c451", "c452", "c453", "c454", "c455", "c456", "c457", "c458", "c459", "c460", "c461", "c462", "c463", "c464", "c465", "c466", "c467", "c468", "c469", "c470", "c471", "c472", "c473", "c474", "c475", "c476", "c477", "c478", "c479", "c480", "c481", "c482", "c483", "c484", "c485", "c486", "c487", "c488", "c489", "c490", "c491", "c492", "c493", "c494", "c495", "c496", "c497", "c498", "c499", "c500", "c501", "c502", "c503", "c504", "c505", "c506", "c507", "c508", "c509", "c510", "c511", "c512", "c513", "c514", "c515", "c516", "c517", "c518", "c519", "c520", "c521", "c522", "c523", "c524", "c525", "c526", "c527", "c528", "c529", "c530", "c531", "c532", "c533", "c534", "c535", "c536", "c537", "c538", "c539", "c540", "c541", "c542", "c543", "c544", "c545", "c546", "c547", "c548", "c549", "c550", "c551", "c552", "c553", "c554", "c555", "c556", "c557", "c558", "c559", "c560", "c561", "c562", "c563", "c564", "c565", "c566", "c567", "c568", "c569", "c570", "c571", "c572", "c573", "c574", "c575", "c576", "c577", "c578", "c579", "c580", "c581", "c582", "c583", "c584", "c585", "c586", "c587", "c588", "c589", "c590", "c591", "c592", "c593", "c594", "c595", "c596", "c597", "c598", "c599", "c600", "c601", "c602", "c603", "c604", "c605", "c606", "c607", "c608", "c609", "c610", "c611", "c612", "c613", "c614", "c615", "c616", "c617", "c618", "c619", "c620", "c621", "c622", "c623", "c624"]
for each in range(len(list)):
print(f"'c{each}': [c{each}], ")
| 229.478261
| 4,905
| 0.494127
|
8f3fb9a601667118de857545c8fe43327eb7c0df
| 2,475
|
py
|
Python
|
src/wavestate/control/statespace/dense/test/test_matrix.py
|
wavestate/wavestate-control
|
6c683643d33fa45b5240894a5f89119761edb4e5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/wavestate/control/statespace/dense/test/test_matrix.py
|
wavestate/wavestate-control
|
6c683643d33fa45b5240894a5f89119761edb4e5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/wavestate/control/statespace/dense/test/test_matrix.py
|
wavestate/wavestate-control
|
6c683643d33fa45b5240894a5f89119761edb4e5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import copy
from wavestate import declarative
import pytest
from wavestate.utilities.np import logspaced
from wavestate.utilities.mpl import mplfigB
from wavestate.control.statespace.dense import matrix_algorithms
import numpy.testing
pytestmark = pytest.mark.xfail(reason="Need to revisit these")
def test_QRH():
N = 10
M = np.random.rand(N, N)
M = np.array(
[
[1, 0, 0],
[1, 1, 0],
[0, 0, 0],
],
float,
)
eye = np.eye(M.shape[0], M.shape[1])
R, [Q], [QT] = matrix_algorithms.QR(
mat=M,
mshadow=None,
qmul=[eye],
qAmul=[eye],
pivoting=False,
# method = 'Householder',
method="Givens",
Rexact=False,
)
R2, [Q], [QT] = matrix_algorithms.QR(
mat=M,
mshadow=None,
qmul=[eye],
qAmul=[eye],
pivoting=False,
# method = 'Householder',
method="Givens",
Rexact=True,
)
import tabulate
print("near", tabulate.tabulate(R))
print("exact", tabulate.tabulate(R2))
print(tabulate.tabulate(Q))
print(tabulate.tabulate(QT))
numpy.testing.assert_almost_equal(Q @ Q.T, eye)
numpy.testing.assert_almost_equal(Q @ QT, eye)
numpy.testing.assert_almost_equal(Q @ R2, M)
def test_QRHpivot():
N = 10
M = np.random.rand(N, N)
M = np.array(
[
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
],
float,
)
eye = np.eye(M.shape[0], M.shape[1])
R, [Q], [QT], P = matrix_algorithms.QR(
mat=M,
mshadow=None,
qmul=[eye],
qAmul=[eye],
pivoting=True,
# method = 'Householder',
method="Givens",
Rexact=True,
)
import tabulate
print(P)
print(tabulate.tabulate(R))
# print(tabulate.tabulate(Q))
# print(tabulate.tabulate(QT))
numpy.testing.assert_almost_equal(Q @ Q.T, eye)
numpy.testing.assert_almost_equal(Q @ QT, eye)
numpy.testing.assert_almost_equal((Q @ R)[:, P], M)
| 22.5
| 76
| 0.57899
|
a991181f0973c484e2411c28c3a9d60867e3d0a0
| 752
|
py
|
Python
|
src/atcoder/abc185/e/sol_0.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 1
|
2021-07-11T03:20:10.000Z
|
2021-07-11T03:20:10.000Z
|
src/atcoder/abc185/e/sol_0.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 39
|
2021-07-10T05:21:09.000Z
|
2021-12-15T06:10:12.000Z
|
src/atcoder/abc185/e/sol_0.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | null | null | null |
import typing
import sys
import numpy as np
import numba as nb
@nb.njit((nb.i8[:], nb.i8[:]), cache=True)
def solve(
a: np.ndarray,
b: np.ndarray,
) -> typing.NoReturn:
n, m = a.size, b.size
cost = np.empty((n + 1, m + 1), np.int64)
cost[0] = np.arange(m + 1)
cost[:, 0] = np.arange(n + 1)
for i in range(n):
for j in range(m):
cost[i + 1][j + 1] = min(
cost[i + 1][j] + 1,
cost[i][j + 1] + 1,
cost[i][j] + (a[i] != b[j]),
)
print(cost[n][m])
def main() -> typing.NoReturn:
n, m = map(int, input().split())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
b = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(a, b)
main()
| 17.904762
| 43
| 0.510638
|
932deaac5600b45ae9582602e0c024e19b113e29
| 4,168
|
py
|
Python
|
libs/configs/DOTA1.0/vanilla_csl/cfgs_res50_dota_v40.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 187
|
2020-03-11T05:41:59.000Z
|
2022-03-28T04:44:03.000Z
|
libs/configs/DOTA1.0/vanilla_csl/cfgs_res50_dota_v40.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 13
|
2020-07-16T09:00:11.000Z
|
2021-11-05T12:15:35.000Z
|
libs/configs/DOTA1.0/vanilla_csl/cfgs_res50_dota_v40.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 31
|
2020-03-21T08:11:36.000Z
|
2022-03-16T09:18:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
gaussian label, omega=6
This is your result for task 1:
mAP: 0.6251804082959679
ap of each class: plane:0.8788620522554762,
baseball-diamond:0.7108527765896498,
bridge:0.39326363063833736,
ground-track-field:0.5983420562893361,
small-vehicle:0.6164282912414312,
large-vehicle:0.516536657230959,
ship:0.6725772964964745,
tennis-court:0.9082415824440556,
basketball-court:0.7557320334025829,
storage-tank:0.7563441062866568,
soccer-ball-field:0.475666226892059,
roundabout:0.5541213292307309,
harbor:0.5095408687967674,
swimming-pool:0.6332644710885142,
helicopter:0.39793274555648517
The submitted information is :
Description: RetinaNet_DOTA_2x_20200719_70.2w
Username: yangxue
Institute: DetectionTeamUCAS
Emailadress: yangxue16@mails.ucas.ac.cn
TeamMembers: yangxue, yangjirui
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_2x_20200719'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 27000 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 2.0
REG_LOSS_MODE = None
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTA' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
LABEL_TYPE = 0
RADUIUS = 1
OMEGA = 6
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 90 or 180
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
| 29.560284
| 105
| 0.693618
|
f08b0a385fd5b75f4391c95cc4fe8cfe5b4616db
| 7,644
|
py
|
Python
|
scripts/cut_release_branch.py
|
netajik/oppia
|
d3780352d615db7438e010c5aa5eb60588bb7de6
|
[
"Apache-2.0"
] | null | null | null |
scripts/cut_release_branch.py
|
netajik/oppia
|
d3780352d615db7438e010c5aa5eb60588bb7de6
|
[
"Apache-2.0"
] | null | null | null |
scripts/cut_release_branch.py
|
netajik/oppia
|
d3780352d615db7438e010c5aa5eb60588bb7de6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper script used for creating a new release branch on GitHub.
ONLY RELEASE COORDINATORS SHOULD USE THIS SCRIPT.
Usage: Run this script from your oppia root folder:
python scripts/cut_release_branch.py --version="x.y.z"
where x.y.z is the new version of Oppia, e.g. 2.5.3.
"""
import argparse
import json
import os
import re
import subprocess
import sys
import urllib
import common # pylint: disable=relative-import
def new_version_type(arg, pattern=re.compile(r'\d\.\d\.\d')):
"""Checks that the new version name matches the expected pattern.
Args:
arg: str. The new version name.
pattern: RegularExpression. The pattern that release version should
match.
Raises:
argparse.ArgumentTypeError: The new version name does not match
the pattern.
Returns:
str. The new version name with correct pattern.
"""
if not pattern.match(arg):
raise argparse.ArgumentTypeError(
'The format of "new_version" should be: x.x.x')
return arg
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--new_version', help='new version to be released', type=new_version_type)
PARSED_ARGS = _PARSER.parse_args()
if PARSED_ARGS.new_version:
TARGET_VERSION = PARSED_ARGS.new_version
else:
raise Exception('ERROR: A "new_version" arg must be specified.')
# Construct the new branch name.
NEW_BRANCH_NAME = 'release-%s' % TARGET_VERSION
NEW_APP_YAML_VERSION = TARGET_VERSION.replace('.', '-')
assert '.' not in NEW_APP_YAML_VERSION
def _verify_target_branch_does_not_already_exist(remote_alias):
"""Checks that the new release branch doesn't already exist locally or
remotely.
Args:
remote_alias: str. The alias that points to the remote oppia
repository. Example: When calling git remote -v, you get:
upstream https://github.com/oppia/oppia.git (fetch),
where 'upstream' is the alias that points to the remote oppia
repository.
Raises:
Exception: The target branch name already exists locally.
Exception: The target branch name already exists on the remote
oppia repository.
"""
git_branch_output = subprocess.check_output(['git', 'branch'])
if NEW_BRANCH_NAME in git_branch_output:
raise Exception(
'ERROR: The target branch name already exists locally. '
'Run "git branch -D %s" to delete it.' % NEW_BRANCH_NAME)
git_ls_remote_output = subprocess.check_output(
['git', 'ls-remote', '--heads', remote_alias])
remote_branch_ref = 'refs/heads/%s' % NEW_BRANCH_NAME
if remote_branch_ref in git_ls_remote_output:
raise Exception(
'ERROR: The target branch name already exists on the remote repo.')
def _verify_target_version_is_consistent_with_latest_released_version():
"""Checks that the target version is consistent with the latest released
version on GitHub.
Raises:
Exception: Failed to fetch latest release info from GitHub.
Exception: Could not parse version number of latest GitHub release.
AssertionError: The previous and the current major version are not the
same.
AssertionError: The current patch version is not equal to previous patch
version plus one.
AssertionError: The current patch version is greater or equal to 10.
AssertionError: The current minor version is not equal to previous
minor version plus one.
AssertionError: The current patch version is different than 0.
"""
response = urllib.urlopen(
'https://api.github.com/repos/oppia/oppia/releases/latest')
if response.getcode() != 200:
raise Exception(
'ERROR: Failed to fetch latest release info from GitHub')
data = json.load(response)
latest_release_tag_name = data['tag_name']
match_result = re.match(r'v(\d)\.(\d)\.(\d)', latest_release_tag_name)
if match_result is None:
raise Exception(
'ERROR: Could not parse version number of latest GitHub release.')
prev_major, prev_minor, prev_patch = match_result.group(1, 2, 3)
match_result = re.match(r'(\d)\.(\d)\.(\d)', TARGET_VERSION)
curr_major, curr_minor, curr_patch = match_result.group(1, 2, 3)
# This will need to be overridden if the major version changes.
assert prev_major == curr_major, 'Unexpected major version change.'
if prev_minor == curr_minor:
assert int(curr_patch) == int(prev_patch) + 1
assert int(curr_patch) < 10
else:
assert int(curr_minor) == int(prev_minor) + 1
assert int(curr_patch) == 0
def _execute_branch_cut():
"""Pushes the new release branch to Github.
Raises:
AssertionError: 'version: default' was not found in app.yaml
"""
# Do prerequisite checks.
common.require_cwd_to_be_oppia()
common.verify_local_repo_is_clean()
common.verify_current_branch_name('develop')
# Update the local repo.
remote_alias = common.get_remote_alias('https://github.com/oppia/oppia')
subprocess.call(['git', 'pull', remote_alias])
_verify_target_branch_does_not_already_exist(remote_alias)
_verify_target_version_is_consistent_with_latest_released_version()
# The release coordinator should verify that tests are passing on develop
# before checking out the release branch.
common.open_new_tab_in_browser_if_possible(
'https://github.com/oppia/oppia#oppia---')
while True:
print (
'Please confirm: are Travis checks passing on develop? (y/n) ')
answer = raw_input().lower()
if answer in ['y', 'ye', 'yes']:
break
elif answer:
print (
'Tests should pass on develop before this script is run. '
'Exiting.')
sys.exit()
# Cut a new release branch.
print 'Cutting a new release branch: %s' % NEW_BRANCH_NAME
subprocess.call(['git', 'checkout', '-b', NEW_BRANCH_NAME])
# Update the version in app.yaml.
print 'Updating the version number in app.yaml ...'
with open('app.yaml', 'r') as f:
content = f.read()
assert content.count('version: default') == 1
os.remove('app.yaml')
content = content.replace(
'version: default', 'version: %s' % NEW_APP_YAML_VERSION)
with open('app.yaml', 'w+') as f:
f.write(content)
print 'Version number updated.'
# Make a commit.
print 'Committing the change.'
subprocess.call([
'git', 'commit', '-a', '-m',
'"Update version number to %s"' % TARGET_VERSION])
# Push the new release branch to GitHub.
print 'Pushing new release branch to GitHub.'
subprocess.call(['git', 'push', remote_alias, NEW_BRANCH_NAME])
print ''
print (
'New release branch successfully cut. You are now on branch %s' %
NEW_BRANCH_NAME)
print 'Done!'
if __name__ == '__main__':
_execute_branch_cut()
| 35.225806
| 80
| 0.677656
|
ea134b7ea08b932242b46c7f2268680dcefa8e33
| 8,895
|
py
|
Python
|
stumpy/stumped.py
|
abbasnikbakht/stumpy
|
3d0f5e165ade7dfdd4dad19ba5f71026345c551c
|
[
"BSD-3-Clause"
] | 1
|
2020-08-10T20:22:44.000Z
|
2020-08-10T20:22:44.000Z
|
stumpy/stumped.py
|
abbasnikbakht/stumpy
|
3d0f5e165ade7dfdd4dad19ba5f71026345c551c
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/stumped.py
|
abbasnikbakht/stumpy
|
3d0f5e165ade7dfdd4dad19ba5f71026345c551c
|
[
"BSD-3-Clause"
] | null | null | null |
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import logging
import numpy as np
from . import core, _stump, _count_diagonal_ndist, config
logger = logging.getLogger(__name__)
def stumped(dask_client, T_A, m, T_B=None, ignore_trivial=True):
"""
Compute the matrix profile with parallelized and distributed STOMPopt with
Pearson correlations.
This is highly distributed implementation around the Numba JIT-compiled
parallelized `_stump` function which computes the matrix profile according
to STOMPopt with Pearson correlations.
Parameters
----------
dask_client : client
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
T_A : ndarray
The time series or sequence for which to compute the matrix profile
m : int
Window size
T_B : ndarray
The time series or sequence that contain your query subsequences
of interest. Default is `None` which corresponds to a self-join.
ignore_trivial : bool
Set to `True` if this is a self-join. Otherwise, for AB-join, set this
to `False`. Default is `True`.
Returns
-------
out : ndarray
The first column consists of the matrix profile, the second column
consists of the matrix profile indices, the third column consists of
the left matrix profile indices, and the fourth column consists of
the right matrix profile indices.
Notes
-----
`DOI: 10.1007/s10115-017-1138-x \
<https://www.cs.ucr.edu/~eamonn/ten_quadrillion.pdf>`__
See Section 4.5
The above reference outlines a general approach for traversing the distance
matrix in a diagonal fashion rather than in a row-wise fashion.
`DOI: 10.1145/3357223.3362721 \
<https://www.cs.ucr.edu/~eamonn/public/GPU_Matrix_profile_VLDB_30DraftOnly.pdf>`__
See Section 3.1 and Section 3.3
The above reference outlines the use of the Pearson correlation via Welford's
centered sum-of-products along each diagonal of the distance matrix in place of the
sliding window dot product found in the original STOMP method.
`DOI: 10.1109/ICDM.2016.0085 \
<https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__
See Table II
This is a Dask distributed implementation of stump that scales
across multiple servers and is a convenience wrapper around the
parallelized `stump._stump` function
Timeseries, T_B, will be annotated with the distance location
(or index) of all its subsequences in another times series, T_A.
Return: For every subsequence, Q, in T_B, you will get a distance
and index for the closest subsequence in T_A. Thus, the array
returned will have length T_B.shape[0]-m+1. Additionally, the
left and right matrix profiles are also returned.
Note: Unlike in the Table II where T_A.shape is expected to be equal
to T_B.shape, this implementation is generalized so that the shapes of
T_A and T_B can be different. In the case where T_A.shape == T_B.shape,
then our algorithm reduces down to the same algorithm found in Table II.
Additionally, unlike STAMP where the exclusion zone is m/2, the default
exclusion zone for STOMP is m/4 (See Definition 3 and Figure 3).
For self-joins, set `ignore_trivial = True` in order to avoid the
trivial match.
Note that left and right matrix profiles are only available for self-joins.
"""
T_A = np.asarray(T_A)
T_A = T_A.copy()
if T_B is None:
T_B = T_A.copy()
ignore_trivial = True
else:
T_B = np.asarray(T_B)
T_B = T_B.copy()
ignore_trivial = False
if T_A.ndim != 1: # pragma: no cover
raise ValueError(
f"T_A is {T_A.ndim}-dimensional and must be 1-dimensional. "
"For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
)
if T_B.ndim != 1: # pragma: no cover
raise ValueError(
f"T_B is {T_B.ndim}-dimensional and must be 1-dimensional. "
"For multidimensional STUMP use `stumpy.mstump` or `stumpy.mstumped`"
)
core.check_dtype(T_A)
core.check_dtype(T_B)
core.check_window_size(m)
if ignore_trivial is False and core.are_arrays_equal(T_A, T_B): # pragma: no cover
logger.warning("Arrays T_A, T_B are equal, which implies a self-join.")
logger.warning("Try setting `ignore_trivial = True`.")
if ignore_trivial and core.are_arrays_equal(T_A, T_B) is False: # pragma: no cover
logger.warning("Arrays T_A, T_B are not equal, which implies an AB-join.")
logger.warning("Try setting `ignore_trivial = False`.")
T_A[np.isinf(T_A)] = np.nan
T_B[np.isinf(T_B)] = np.nan
T_A_subseq_isfinite = np.all(np.isfinite(core.rolling_window(T_A, m)), axis=1)
T_B_subseq_isfinite = np.all(np.isfinite(core.rolling_window(T_B, m)), axis=1)
T_A[np.isnan(T_A)] = 0
T_B[np.isnan(T_B)] = 0
M_T, Σ_T = core.compute_mean_std(T_A, m)
μ_Q, σ_Q = core.compute_mean_std(T_B, m)
T_A_subseq_isconstant = Σ_T < config.STUMPY_STDDEV_THRESHOLD
T_B_subseq_isconstant = σ_Q < config.STUMPY_STDDEV_THRESHOLD
# Avoid divide by zero
Σ_T[T_A_subseq_isconstant] = 1.0
σ_Q[T_B_subseq_isconstant] = 1.0
Σ_T_inverse = 1.0 / Σ_T
σ_Q_inverse = 1.0 / σ_Q
M_T_m_1, _ = core.compute_mean_std(T_A, m - 1)
μ_Q_m_1, _ = core.compute_mean_std(T_B, m - 1)
n_A = T_A.shape[0]
n_B = T_B.shape[0]
l = n_B - m + 1
excl_zone = int(np.ceil(m / 4))
out = np.empty((l, 4), dtype=object)
hosts = list(dask_client.ncores().keys())
nworkers = len(hosts)
if ignore_trivial:
diags = np.arange(excl_zone + 1, n_B - m + 1)
else:
diags = np.arange(-(n_B - m + 1) + 1, n_A - m + 1)
ndist_counts = _count_diagonal_ndist(diags, m, n_A, n_B)
diags_ranges = core._get_array_ranges(ndist_counts, nworkers)
diags_ranges += diags[0]
# Scatter data to Dask cluster
T_A_future = dask_client.scatter(T_A, broadcast=True)
T_B_future = dask_client.scatter(T_B, broadcast=True)
M_T_future = dask_client.scatter(M_T, broadcast=True)
μ_Q_future = dask_client.scatter(μ_Q, broadcast=True)
Σ_T_inverse_future = dask_client.scatter(Σ_T_inverse, broadcast=True)
σ_Q_inverse_future = dask_client.scatter(σ_Q_inverse, broadcast=True)
M_T_m_1_future = dask_client.scatter(M_T_m_1, broadcast=True)
μ_Q_m_1_future = dask_client.scatter(μ_Q_m_1, broadcast=True)
T_A_subseq_isfinite_future = dask_client.scatter(
T_A_subseq_isfinite, broadcast=True
)
T_B_subseq_isfinite_future = dask_client.scatter(
T_B_subseq_isfinite, broadcast=True
)
T_A_subseq_isconstant_future = dask_client.scatter(
T_A_subseq_isconstant, broadcast=True
)
T_B_subseq_isconstant_future = dask_client.scatter(
T_B_subseq_isconstant, broadcast=True
)
diags_futures = []
for i, host in enumerate(hosts):
diags_future = dask_client.scatter(
np.arange(diags_ranges[i, 0], diags_ranges[i, 1]), workers=[host]
)
diags_futures.append(diags_future)
futures = []
for i in range(len(hosts)):
futures.append(
dask_client.submit(
_stump,
T_A_future,
T_B_future,
m,
M_T_future,
μ_Q_future,
Σ_T_inverse_future,
σ_Q_inverse_future,
M_T_m_1_future,
μ_Q_m_1_future,
T_A_subseq_isfinite_future,
T_B_subseq_isfinite_future,
T_A_subseq_isconstant_future,
T_B_subseq_isconstant_future,
diags_futures[i],
ignore_trivial,
)
)
results = dask_client.gather(futures)
profile, indices = results[0]
for i in range(1, len(hosts)):
P, I = results[i]
for col in range(P.shape[1]): # pragma: no cover
cond = P[:, col] < profile[:, col]
profile[:, col] = np.where(cond, P[:, col], profile[:, col])
indices[:, col] = np.where(cond, I[:, col], indices[:, col])
out[:, 0] = profile[:, 0]
out[:, 1:4] = indices
threshold = 10e-6
if core.are_distances_too_small(out[:, 0], threshold=threshold): # pragma: no cover
logger.warning(f"A large number of values are smaller than {threshold}.")
logger.warning("For a self-join, try setting `ignore_trivial = True`.")
return out
| 35.866935
| 88
| 0.664306
|
de37d8c5692387b6262a18aaa9eabde15aa1ef86
| 10,251
|
py
|
Python
|
deep_rl/network/network_heads.py
|
runxuanjiang/DeepRL
|
f5c47c52d4db50577fbada17b09d739da3da67cc
|
[
"MIT"
] | null | null | null |
deep_rl/network/network_heads.py
|
runxuanjiang/DeepRL
|
f5c47c52d4db50577fbada17b09d739da3da67cc
|
[
"MIT"
] | null | null | null |
deep_rl/network/network_heads.py
|
runxuanjiang/DeepRL
|
f5c47c52d4db50577fbada17b09d739da3da67cc
|
[
"MIT"
] | 1
|
2020-02-09T08:05:48.000Z
|
2020-02-09T08:05:48.000Z
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from .network_utils import *
from .network_bodies import *
class VanillaNet(nn.Module, BaseNet):
def __init__(self, output_dim, body):
super(VanillaNet, self).__init__()
self.fc_head = layer_init(nn.Linear(body.feature_dim, output_dim))
self.body = body
self.to(Config.DEVICE)
def forward(self, x):
phi = self.body(tensor(x))
y = self.fc_head(phi)
return y
class DuelingNet(nn.Module, BaseNet):
def __init__(self, action_dim, body):
super(DuelingNet, self).__init__()
self.fc_value = layer_init(nn.Linear(body.feature_dim, 1))
self.fc_advantage = layer_init(nn.Linear(body.feature_dim, action_dim))
self.body = body
self.to(Config.DEVICE)
def forward(self, x, to_numpy=False):
phi = self.body(tensor(x))
value = self.fc_value(phi)
advantange = self.fc_advantage(phi)
q = value.expand_as(advantange) + (advantange - advantange.mean(1, keepdim=True).expand_as(advantange))
return q
class CategoricalNet(nn.Module, BaseNet):
def __init__(self, action_dim, num_atoms, body):
super(CategoricalNet, self).__init__()
self.fc_categorical = layer_init(nn.Linear(body.feature_dim, action_dim * num_atoms))
self.action_dim = action_dim
self.num_atoms = num_atoms
self.body = body
self.to(Config.DEVICE)
def forward(self, x):
phi = self.body(tensor(x))
pre_prob = self.fc_categorical(phi).view((-1, self.action_dim, self.num_atoms))
prob = F.softmax(pre_prob, dim=-1)
log_prob = F.log_softmax(pre_prob, dim=-1)
return prob, log_prob
class QuantileNet(nn.Module, BaseNet):
def __init__(self, action_dim, num_quantiles, body):
super(QuantileNet, self).__init__()
self.fc_quantiles = layer_init(nn.Linear(body.feature_dim, action_dim * num_quantiles))
self.action_dim = action_dim
self.num_quantiles = num_quantiles
self.body = body
self.to(Config.DEVICE)
def forward(self, x):
phi = self.body(tensor(x))
quantiles = self.fc_quantiles(phi)
quantiles = quantiles.view((-1, self.action_dim, self.num_quantiles))
return quantiles
class OptionCriticNet(nn.Module, BaseNet):
def __init__(self, body, action_dim, num_options):
super(OptionCriticNet, self).__init__()
self.fc_q = layer_init(nn.Linear(body.feature_dim, num_options))
self.fc_pi = layer_init(nn.Linear(body.feature_dim, num_options * action_dim))
self.fc_beta = layer_init(nn.Linear(body.feature_dim, num_options))
self.num_options = num_options
self.action_dim = action_dim
self.body = body
self.to(Config.DEVICE)
def forward(self, x):
phi = self.body(tensor(x))
q = self.fc_q(phi)
beta = F.sigmoid(self.fc_beta(phi))
pi = self.fc_pi(phi)
pi = pi.view(-1, self.num_options, self.action_dim)
log_pi = F.log_softmax(pi, dim=-1)
pi = F.softmax(pi, dim=-1)
return {'q': q,
'beta': beta,
'log_pi': log_pi,
'pi': pi}
class DeterministicActorCriticNet(nn.Module, BaseNet):
def __init__(self,
state_dim,
action_dim,
actor_opt_fn,
critic_opt_fn,
phi_body=None,
actor_body=None,
critic_body=None):
super(DeterministicActorCriticNet, self).__init__()
if phi_body is None: phi_body = DummyBody(state_dim)
if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
self.phi_body = phi_body
self.actor_body = actor_body
self.critic_body = critic_body
self.fc_action = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic = layer_init(nn.Linear(critic_body.feature_dim, 1), 1e-3)
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters())
self.phi_params = list(self.phi_body.parameters())
self.actor_opt = actor_opt_fn(self.actor_params + self.phi_params)
self.critic_opt = critic_opt_fn(self.critic_params + self.phi_params)
self.to(Config.DEVICE)
def forward(self, obs):
phi = self.feature(obs)
action = self.actor(phi)
return action
def feature(self, obs):
obs = tensor(obs)
return self.phi_body(obs)
def actor(self, phi):
return torch.tanh(self.fc_action(self.actor_body(phi)))
def critic(self, phi, a):
return self.fc_critic(self.critic_body(phi, a))
class GaussianActorCriticNet(nn.Module, BaseNet):
def __init__(self,
state_dim,
action_dim,
phi_body=None,
actor_body=None,
critic_body=None):
super(GaussianActorCriticNet, self).__init__()
if phi_body is None: phi_body = DummyBody(state_dim)
if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
self.phi_body = phi_body
self.actor_body = actor_body
self.critic_body = critic_body
self.fc_action = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic = layer_init(nn.Linear(critic_body.feature_dim, 1), 1e-3)
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters())
self.phi_params = list(self.phi_body.parameters())
self.std = nn.Parameter(torch.zeros(action_dim))
self.to(Config.DEVICE)
def forward(self, obs, action=None):
obs = tensor(obs)
phi = self.phi_body(obs)
phi_a = self.actor_body(phi)
phi_v = self.critic_body(phi)
mean = torch.tanh(self.fc_action(phi_a))
v = self.fc_critic(phi_v)
dist = torch.distributions.Normal(mean, F.softplus(self.std))
if action is None:
action = dist.sample()
log_prob = dist.log_prob(action).sum(-1).unsqueeze(-1)
entropy = dist.entropy().sum(-1).unsqueeze(-1)
return {'a': action,
'log_pi_a': log_prob,
'ent': entropy,
'mean': mean,
'v': v}
class CategoricalActorCriticNet(nn.Module, BaseNet):
def __init__(self,
state_dim,
action_dim,
phi_body=None,
actor_body=None,
critic_body=None):
super(CategoricalActorCriticNet, self).__init__()
if phi_body is None: phi_body = DummyBody(state_dim)
if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
self.phi_body = phi_body
self.actor_body = actor_body
self.critic_body = critic_body
self.fc_action = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic = layer_init(nn.Linear(critic_body.feature_dim, 1), 1e-3)
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters())
self.phi_params = list(self.phi_body.parameters())
self.to(Config.DEVICE)
def forward(self, obs, action=None):
obs = tensor(obs)
print('obs', obs.device)
phi = self.phi_body(obs)
phi_a = self.actor_body(phi)
phi_v = self.critic_body(phi)
logits = self.fc_action(phi_a)
v = self.fc_critic(phi_v)
dist = torch.distributions.Categorical(logits=logits)
if action is None:
action = dist.sample()
log_prob = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
return {'a': action,
'log_pi_a': log_prob,
'ent': entropy,
'v': v}
class TD3Net(nn.Module, BaseNet):
def __init__(self,
action_dim,
actor_body_fn,
critic_body_fn,
actor_opt_fn,
critic_opt_fn,
):
super(TD3Net, self).__init__()
self.actor_body = actor_body_fn()
self.critic_body_1 = critic_body_fn()
self.critic_body_2 = critic_body_fn()
self.fc_action = layer_init(nn.Linear(self.actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic_1 = layer_init(nn.Linear(self.critic_body_1.feature_dim, 1), 1e-3)
self.fc_critic_2 = layer_init(nn.Linear(self.critic_body_2.feature_dim, 1), 1e-3)
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
self.critic_params = list(self.critic_body_1.parameters()) + list(self.fc_critic_1.parameters()) +\
list(self.critic_body_2.parameters()) + list(self.fc_critic_2.parameters())
self.actor_opt = actor_opt_fn(self.actor_params)
self.critic_opt = critic_opt_fn(self.critic_params)
self.to(Config.DEVICE)
def forward(self, obs):
obs = tensor(obs)
return torch.tanh(self.fc_action(self.actor_body(obs)))
def q(self, obs, a):
obs = tensor(obs)
a = tensor(a)
x = torch.cat([obs, a], dim=1)
q_1 = self.fc_critic_1(self.critic_body_1(x))
q_2 = self.fc_critic_2(self.critic_body_2(x))
return q_1, q_2
| 39.125954
| 111
| 0.616135
|
f59d2002e3fc7e53aaed1a8846060d9fcdd55137
| 360
|
py
|
Python
|
rec_to_nwb/processing/nwb/components/mda/fl_mda_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 1
|
2021-01-20T00:26:30.000Z
|
2021-01-20T00:26:30.000Z
|
rec_to_nwb/processing/nwb/components/mda/fl_mda_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 12
|
2020-11-13T01:36:32.000Z
|
2022-01-23T20:35:55.000Z
|
rec_to_nwb/processing/nwb/components/mda/fl_mda_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 3
|
2020-10-20T06:52:45.000Z
|
2021-07-06T23:00:53.000Z
|
from rec_to_nwb.processing.nwb.components.mda.fl_mda import FlMda
class FlMdaBuilder:
def __init__(self, sampling_rate, conversion):
self.sampling_rate = sampling_rate
self.conversion = conversion
def build(self, electrode_table_region, data):
return FlMda(self.sampling_rate, self.conversion, electrode_table_region, data)
| 30
| 87
| 0.755556
|
ea0aae517ca3659ec4db0f252dd94e7b1c34b4cb
| 27,065
|
py
|
Python
|
backend/tests/baserow/contrib/database/field/test_link_row_field_type.py
|
calvinchengx/baserow
|
0340d5abf0a3b48154d41fd05cd2e1e05814cd66
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/contrib/database/field/test_link_row_field_type.py
|
calvinchengx/baserow
|
0340d5abf0a3b48154d41fd05cd2e1e05814cd66
|
[
"MIT"
] | 6
|
2021-04-08T22:03:06.000Z
|
2022-01-13T03:38:17.000Z
|
backend/tests/baserow/contrib/database/field/test_link_row_field_type.py
|
calvinchengx/baserow
|
0340d5abf0a3b48154d41fd05cd2e1e05814cd66
|
[
"MIT"
] | null | null | null |
import pytest
from rest_framework.status import HTTP_200_OK, HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST
from django.shortcuts import reverse
from django.db import connections
from baserow.contrib.database.fields.models import Field
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import LinkRowField
from baserow.contrib.database.fields.exceptions import (
LinkRowTableNotInSameDatabase, LinkRowTableNotProvided
)
from baserow.contrib.database.rows.handler import RowHandler
@pytest.mark.django_db
def test_link_row_field_type(data_fixture):
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user, name='Placeholder')
table = data_fixture.create_database_table(name='Example', database=database)
customers_table = data_fixture.create_database_table(name='Customers',
database=database)
cars_table = data_fixture.create_database_table(name='Cars', database=database)
unrelated_table_1 = data_fixture.create_database_table(name='Unrelated')
field_handler = FieldHandler()
row_handler = RowHandler()
# Create a primary field and some example data for the customers table.
customers_primary_field = field_handler.create_field(
user=user, table=customers_table, type_name='text', name='Name', primary=True
)
customers_row_1 = row_handler.create_row(user=user, table=customers_table, values={
f'field_{customers_primary_field.id}': 'John'
})
customers_row_2 = row_handler.create_row(user=user, table=customers_table, values={
f'field_{customers_primary_field.id}': 'Jane'
})
# Create a primary field and some example data for the cars table.
cars_primary_field = field_handler.create_field(
user=user, table=cars_table, type_name='text', name='Name', primary=True
)
row_handler.create_row(user=user, table=cars_table, values={
f'field_{cars_primary_field.id}': 'BMW'
})
row_handler.create_row(user=user, table=cars_table, values={
f'field_{cars_primary_field.id}': 'Audi'
})
with pytest.raises(LinkRowTableNotProvided):
field_handler.create_field(
user=user, table=table, type_name='link_row', name='Without table'
)
with pytest.raises(LinkRowTableNotInSameDatabase):
field_handler.create_field(
user=user, table=table, type_name='link_row', name='Unrelated',
link_row_table=unrelated_table_1
)
link_field_1 = field_handler.create_field(
user=user, table=table, type_name='link_row', name='Customer',
link_row_table=customers_table
)
link_field_2 = field_handler.create_field(
user=user, table=table, type_name='link_row', name='Customer',
link_row_table=customers_table
)
assert link_field_1.link_row_related_field.name == 'Example'
assert link_field_2.link_row_related_field.name == 'Example'
connection = connections['default']
tables = connection.introspection.table_names()
assert (
link_field_1.through_table_name ==
link_field_1.link_row_related_field.through_table_name
)
assert (
link_field_2.through_table_name ==
link_field_2.link_row_related_field.through_table_name
)
assert link_field_1.through_table_name in tables
assert link_field_2.through_table_name in tables
model = table.get_model()
table_row = model.objects.create()
getattr(table_row, f'field_{link_field_1.id}').add(customers_row_1.id)
results = getattr(table_row, f'field_{link_field_1.id}').all()
assert len(results) == 1
assert getattr(results[0], f'field_{customers_primary_field.id}') == 'John'
getattr(table_row, f'field_{link_field_2.id}').add(
customers_row_1.id, customers_row_2.id
)
results = getattr(table_row, f'field_{link_field_2.id}').all()
assert len(results) == 2
assert getattr(results[0], f'field_{customers_primary_field.id}') == 'John'
assert getattr(results[1], f'field_{customers_primary_field.id}') == 'Jane'
table_row_2 = model.objects.create()
getattr(table_row_2, f'field_{link_field_1.id}').add(customers_row_2.id)
results = getattr(table_row_2, f'field_{link_field_1.id}').all()
assert len(results) == 1
assert getattr(results[0], f'field_{customers_primary_field.id}') == 'Jane'
# Going to change only the name of the field. This should not result in any errors
# of schema changes.
link_field_1 = field_handler.update_field(
user, link_field_1, name='Customer 2'
)
with pytest.raises(LinkRowTableNotInSameDatabase):
field_handler.update_field(
user, link_field_1, link_row_table=unrelated_table_1
)
model = table.get_model()
assert model.objects.all().count() == 2
# Change the table, this should destroy all relations.
old_link_field_1_relation_id = link_field_1.link_row_relation_id
link_field_1 = field_handler.update_field(
user, link_field_1, link_row_table=cars_table
)
model = table.get_model()
table_rows = model.objects.all()
table_row = table_rows[0]
table_row_2 = table_rows[1]
assert link_field_1.link_row_table.id == cars_table.id
assert link_field_1.link_row_relation_id == old_link_field_1_relation_id
assert getattr(table_row, f'field_{link_field_1.id}').all().count() == 0
assert getattr(table_row, f'field_{link_field_2.id}').all().count() == 2
assert getattr(table_row_2, f'field_{link_field_1.id}').all().count() == 0
assert getattr(table_row_2, f'field_{link_field_2.id}').all().count() == 0
link_field_2 = field_handler.update_field(
user, link_field_2, new_type_name='text'
)
model = table.get_model()
table_row = model.objects.all().first()
assert getattr(table_row, f'field_{link_field_2.id}') is None
assert LinkRowField.objects.all().count() == 2
setattr(table_row, f'field_{link_field_2.id}', 'Text value')
table_row.save()
assert getattr(table_row, f'field_{link_field_2.id}') == 'Text value'
# Delete the existing field. Alter that the related field should be deleted and
# no table named _relation_ should exist.
field_handler.delete_field(user, link_field_1)
assert LinkRowField.objects.all().count() == 0
for t in connection.introspection.table_names():
if '_relation_' in t:
assert False
# Change a the text field back into a link row field.
link_field_2 = field_handler.update_field(
user, link_field_2, new_type_name='link_row', link_row_table=customers_table
)
assert link_field_2.link_row_related_field.name == 'Example'
assert (
link_field_2.through_table_name ==
link_field_2.link_row_related_field.through_table_name
)
assert link_field_2.through_table_name in connection.introspection.table_names()
assert LinkRowField.objects.all().count() == 2
model = table.get_model()
table_row = model.objects.all().first()
getattr(table_row, f'field_{link_field_2.id}').add(
customers_row_1.id, customers_row_2.id
)
results = getattr(table_row, f'field_{link_field_2.id}').all()
assert len(results) == 2
assert getattr(results[0], f'field_{customers_primary_field.id}') == 'John'
assert getattr(results[1], f'field_{customers_primary_field.id}') == 'Jane'
@pytest.mark.django_db
def test_link_row_field_type_rows(data_fixture):
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user, name='Placeholder')
example_table = data_fixture.create_database_table(name='Example',
database=database)
customers_table = data_fixture.create_database_table(name='Customers',
database=database)
users_table = data_fixture.create_database_table(name='Users',
database=database)
field_handler = FieldHandler()
row_handler = RowHandler()
link_row_field = field_handler.create_field(
user=user,
table=example_table,
type_name='link_row',
link_row_table=customers_table
)
customers_row_1 = row_handler.create_row(user=user, table=customers_table)
customers_row_2 = row_handler.create_row(user=user, table=customers_table)
customers_row_3 = row_handler.create_row(user=user, table=customers_table)
row = row_handler.create_row(user=user, table=example_table, values={
f'field_{link_row_field.id}': [customers_row_1.id, customers_row_2.id],
})
row_2 = row_handler.create_row(user=user, table=example_table, values={
f'field_{link_row_field.id}': [customers_row_1.id],
})
example_table.name = 'Example2'
example_table.save()
customers_table.name = 'Customers2'
customers_table.save()
row_1_all = getattr(row, f'field_{link_row_field.id}').all()
row_2_all = getattr(row_2, f'field_{link_row_field.id}').all()
row_1_ids = [i.id for i in row_1_all]
row_2_ids = [i.id for i in row_2_all]
assert row_1_all.count() == 2
assert row_2_all.count() == 1
assert customers_row_1.id in row_1_ids
assert customers_row_2.id in row_1_ids
assert customers_row_1.id in row_2_ids
row = row_handler.update_row(
user=user,
table=example_table,
row_id=row.id,
values={
f'field_{link_row_field.id}': [customers_row_3.id]
}
)
row_2 = row_handler.update_row(
user=user,
table=example_table,
row_id=row_2.id,
values={
f'field_{link_row_field.id}': [customers_row_2.id, customers_row_1.id]
}
)
row_1_all = getattr(row, f'field_{link_row_field.id}').all()
row_2_all = getattr(row_2, f'field_{link_row_field.id}').all()
row_1_ids = [i.id for i in row_1_all]
row_2_ids = [i.id for i in row_2_all]
assert row_1_all.count() == 1
assert row_2_all.count() == 2
assert customers_row_3.id in row_1_ids
assert customers_row_1.id in row_2_ids
assert customers_row_2.id in row_2_ids
# Check if the relations are there via the customers table.
customers_table.refresh_from_db()
customers_model = customers_table.get_model()
related_field = link_row_field.link_row_related_field
customer_rows = customers_model.objects.all()
assert customer_rows.count() == 3
customers_row_1 = customer_rows[0]
customers_row_2 = customer_rows[1]
customers_row_3 = customer_rows[2]
customer_row_1_all = getattr(customers_row_1, f'field_{related_field.id}').all()
customer_row_2_all = getattr(customers_row_2, f'field_{related_field.id}').all()
customer_row_3_all = getattr(customers_row_3, f'field_{related_field.id}').all()
assert customer_row_1_all.count() == 1
assert customer_row_2_all.count() == 1
assert customer_row_3_all.count() == 1
customers_row_1_ids = [i.id for i in customer_row_1_all]
customers_row_2_ids = [i.id for i in customer_row_2_all]
customers_row_3_ids = [i.id for i in customer_row_3_all]
assert row_2.id in customers_row_1_ids
assert row_2.id in customers_row_2_ids
assert row.id in customers_row_3_ids
# When changing the link row table table all the existing relations should be
# deleted.
link_row_field = field_handler.update_field(
user=user,
field=link_row_field,
type_name='link_row',
link_row_table=users_table
)
example_table.refresh_from_db()
model = example_table.get_model()
rows = model.objects.all()
row = rows[0]
row_2 = rows[1]
assert getattr(row, f'field_{link_row_field.id}').all().count() == 0
assert getattr(row_2, f'field_{link_row_field.id}').all().count() == 0
# Just check if the field can be deleted can be deleted.
field_handler.delete_field(user=user, field=link_row_field)
assert Field.objects.all().count() == 0
@pytest.mark.django_db
def test_link_row_enhance_queryset(data_fixture, django_assert_num_queries):
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user, name='Placeholder')
example_table = data_fixture.create_database_table(name='Example',
database=database)
customers_table = data_fixture.create_database_table(name='Customers',
database=database)
field_handler = FieldHandler()
row_handler = RowHandler()
link_row_field = field_handler.create_field(
user=user,
table=example_table,
type_name='link_row',
link_row_table=customers_table
)
customers_row_1 = row_handler.create_row(user=user, table=customers_table)
customers_row_2 = row_handler.create_row(user=user, table=customers_table)
customers_row_3 = row_handler.create_row(user=user, table=customers_table)
row_handler.create_row(user=user, table=example_table, values={
f'field_{link_row_field.id}': [customers_row_1.id, customers_row_2.id],
})
row_handler.create_row(user=user, table=example_table, values={
f'field_{link_row_field.id}': [customers_row_1.id],
})
row_handler.create_row(user=user, table=example_table, values={
f'field_{link_row_field.id}': [customers_row_3.id],
})
model = example_table.get_model()
rows = list(model.objects.all().enhance_by_fields())
with django_assert_num_queries(0):
for row in rows:
list(getattr(row, f'field_{link_row_field.id}').all())
@pytest.mark.django_db
def test_link_row_field_type_api_views(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email='test@test.nl', password='password', first_name='Test1')
database = data_fixture.create_database_application(user=user, name='Placeholder')
unrelated_database = data_fixture.create_database_application(
user=user, name='Unrelated')
table = data_fixture.create_database_table(name='Example', database=database)
customers_table = data_fixture.create_database_table(
name='Customers', database=database)
cars_table = data_fixture.create_database_table(name='Cars', database=database)
unrelated_table_1 = data_fixture.create_database_table(
name='Unrelated', database=unrelated_database
)
unrelated_table_2 = data_fixture.create_database_table(name='Unrelated 2')
# Try to make a relation with a table from another database
response = api_client.post(
reverse('api:database:fields:list', kwargs={'table_id': table.id}),
{
'name': 'Link',
'type': 'link_row',
'link_row_table': unrelated_table_1.id
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json['error'] == 'ERROR_LINK_ROW_TABLE_NOT_IN_SAME_DATABASE'
assert LinkRowField.objects.all().count() == 0
# Try to make a relation with a table that we don't have access to.
response = api_client.post(
reverse('api:database:fields:list', kwargs={'table_id': table.id}),
{
'name': 'Link',
'type': 'link_row',
'link_row_table': unrelated_table_2.id
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json['error'] == 'ERROR_USER_NOT_IN_GROUP'
assert LinkRowField.objects.all().count() == 0
# Try to make a relation without providing the table
response = api_client.post(
reverse('api:database:fields:list', kwargs={'table_id': table.id}),
{
'name': 'Link',
'type': 'link_row'
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json['error'] == 'ERROR_LINK_ROW_TABLE_NOT_PROVIDED'
assert LinkRowField.objects.all().count() == 0
# Create new link row field type.
response = api_client.post(
reverse('api:database:fields:list', kwargs={'table_id': table.id}),
{
'name': 'Link 1',
'type': 'link_row',
'link_row_table': customers_table.id,
# The `link_row_related_field` is a read_only field so we deliberately set
# an unknown id to see if it is ignored.
'link_row_related_field': 999999,
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Link 1'
assert response_json['type'] == 'link_row'
assert response_json['link_row_table'] == customers_table.id
assert LinkRowField.objects.all().count() == 2
field_id = response_json['id']
field = LinkRowField.objects.all().order_by('id').first()
related_field = LinkRowField.objects.all().order_by('id').last()
assert response_json['link_row_related_field'] == related_field.id
assert response_json['link_row_related_field'] != 999999
# Check if the correct fields are correctly linked.
assert field.table.id == table.id
assert field.link_row_table.id == customers_table.id
assert related_field.table.id == customers_table.id
assert related_field.link_row_table.id == table.id
assert field.link_row_relation_id == related_field.link_row_relation_id
# Just fetching the field and check if is has the correct values.
response = api_client.get(
reverse('api:database:fields:item', kwargs={'field_id': field_id}),
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Link 1'
assert response_json['type'] == 'link_row'
assert response_json['link_row_table'] == customers_table.id
assert response_json['link_row_related_field'] == related_field.id
# Just fetching the related field and check if is has the correct values.
response = api_client.get(
reverse('api:database:fields:item', kwargs={'field_id': related_field.id}),
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Example'
assert response_json['link_row_table'] == table.id
assert response_json['link_row_related_field'] == field.id
# Only updating the name of the field without changing anything else
response = api_client.patch(
reverse('api:database:fields:item', kwargs={'field_id': field_id}),
{'name': 'Link new name'},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Link new name'
assert response_json['type'] == 'link_row'
assert response_json['link_row_table'] == customers_table.id
assert response_json['link_row_related_field'] == related_field.id
# Only try to update the link_row_related_field, but this is a read only field so
# nothing should happen.
response = api_client.patch(
reverse('api:database:fields:item', kwargs={'field_id': field_id}),
{'link_row_related_field': 9999},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Link new name'
assert response_json['type'] == 'link_row'
assert response_json['link_row_table'] == customers_table.id
assert response_json['link_row_related_field'] == related_field.id
response = api_client.patch(
reverse('api:database:fields:item', kwargs={'field_id': field_id}),
{'link_row_table': cars_table.id},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['name'] == 'Link new name'
assert response_json['type'] == 'link_row'
assert response_json['link_row_table'] == cars_table.id
assert response_json['link_row_related_field'] == related_field.id
field.refresh_from_db()
related_field.refresh_from_db()
# Check if the correct fields are still linked.
assert field.table.id == table.id
assert field.link_row_table.id == cars_table.id
assert related_field.table.id == cars_table.id
assert related_field.link_row_table.id == table.id
url = reverse('api:database:fields:item', kwargs={'field_id': field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f'JWT {token}')
assert response.status_code == HTTP_204_NO_CONTENT
assert LinkRowField.objects.all().count() == 0
@pytest.mark.django_db
def test_link_row_field_type_api_row_views(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
database = data_fixture.create_database_application(user=user, name='Placeholder')
example_table = data_fixture.create_database_table(name='Example',
database=database)
customers_table = data_fixture.create_database_table(name='Customers',
database=database)
grid = data_fixture.create_grid_view(table=example_table)
data_fixture.create_text_field(
name='Name',
table=example_table,
primary=True
)
customers_primary = data_fixture.create_text_field(
name='Customer name',
table=customers_table,
primary=True
)
field_handler = FieldHandler()
row_handler = RowHandler()
link_row_field = field_handler.create_field(
user=user,
table=example_table,
type_name='link_row',
link_row_table=customers_table
)
customers_row_1 = row_handler.create_row(
user=user, table=customers_table,
values={f'field_{customers_primary.id}': 'John Doe'}
)
customers_row_2 = row_handler.create_row(
user=user, table=customers_table,
values={f'field_{customers_primary.id}': 'Jane Doe'}
)
customers_row_3 = row_handler.create_row(user=user, table=customers_table)
response = api_client.post(
reverse('api:database:rows:list', kwargs={'table_id': example_table.id}),
{
f'field_{link_row_field.id}': 'Random',
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json['error'] == 'ERROR_REQUEST_BODY_VALIDATION'
assert (
response_json['detail'][f'field_{link_row_field.id}'][0]['code'] ==
'not_a_list'
)
response = api_client.post(
reverse('api:database:rows:list', kwargs={'table_id': example_table.id}),
{
f'field_{link_row_field.id}': ['a'],
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json['error'] == 'ERROR_REQUEST_BODY_VALIDATION'
assert (
response_json['detail'][f'field_{link_row_field.id}']['0'][0]['code'] ==
'invalid'
)
response = api_client.post(
reverse('api:database:rows:list', kwargs={'table_id': example_table.id}),
{
f'field_{link_row_field.id}': [customers_row_1.id, customers_row_2.id, 999],
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
row_id = response_json['id']
assert response.status_code == HTTP_200_OK
assert len(response_json[f'field_{link_row_field.id}']) == 2
assert response_json[f'field_{link_row_field.id}'][0]['id'] == customers_row_1.id
assert response_json[f'field_{link_row_field.id}'][0]['value'] == 'John Doe'
assert response_json[f'field_{link_row_field.id}'][1]['id'] == customers_row_2.id
assert response_json[f'field_{link_row_field.id}'][1]['value'] == 'Jane Doe'
model = example_table.get_model()
assert model.objects.all().count() == 1
url = reverse('api:database:rows:item', kwargs={
'table_id': example_table.id,
'row_id': row_id
})
response = api_client.patch(
url,
{
f'field_{link_row_field.id}': [],
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json[f'field_{link_row_field.id}']) == 0
url = reverse('api:database:rows:item', kwargs={
'table_id': example_table.id,
'row_id': row_id
})
response = api_client.patch(
url,
{
f'field_{link_row_field.id}': [customers_row_2.id, customers_row_3.id],
},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json[f'field_{link_row_field.id}']) == 2
assert response_json[f'field_{link_row_field.id}'][0]['id'] == customers_row_2.id
assert response_json[f'field_{link_row_field.id}'][0]['value'] == 'Jane Doe'
assert response_json[f'field_{link_row_field.id}'][1]['id'] == customers_row_3.id
assert not response_json[f'field_{link_row_field.id}'][1]['value']
url = reverse('api:database:views:grid:list', kwargs={'view_id': grid.id})
response = api_client.get(
url,
**{'HTTP_AUTHORIZATION': f'JWT {token}'}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json['count'] == 1
assert response_json['results'][0]['id'] == row_id
assert len(response_json['results'][0][f'field_{link_row_field.id}']) == 2
url = reverse('api:database:rows:item', kwargs={
'table_id': example_table.id,
'row_id': row_id
})
response = api_client.delete(url, HTTP_AUTHORIZATION=f'JWT {token}')
assert response.status_code == HTTP_204_NO_CONTENT
assert model.objects.all().count() == 0
response = api_client.post(
reverse('api:database:rows:list', kwargs={'table_id': example_table.id}),
{},
format='json',
HTTP_AUTHORIZATION=f'JWT {token}'
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json[f'field_{link_row_field.id}']) == 0
| 38.942446
| 88
| 0.682505
|
5073ded6297b26ddf4760eed26880cdbcafe22bf
| 101,572
|
py
|
Python
|
salt/config.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
salt/config.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
salt/config.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
All salt configuration loading and defaults should be in this module
'''
from __future__ import absolute_import, generators
# Import python libs
import os
import re
import sys
import glob
import time
import codecs
import logging
from copy import deepcopy
# Import third party libs
import yaml
try:
yaml.Loader = yaml.CLoader
yaml.Dumper = yaml.CDumper
except Exception:
pass
# pylint: disable=import-error,no-name-in-module
import salt.ext.six as six
from salt.ext.six import string_types, text_type
from salt.ext.six.moves.urllib.parse import urlparse
# pylint: enable=import-error,no-name-in-module
# Import salt libs
import salt.utils
import salt.utils.network
import salt.syspaths
import salt.utils.validate.path
import salt.utils.xdg
import salt.exceptions
import salt.utils.sdb
log = logging.getLogger(__name__)
_DFLT_LOG_DATEFMT = '%H:%M:%S'
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
_DFLT_LOG_FMT_LOGFILE = (
'%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
)
if salt.utils.is_windows():
# Since an 'ipc_mode' of 'ipc' will never work on Windows due to lack of
# support in ZeroMQ, we want the default to be something that has a
# chance of working.
_DFLT_IPC_MODE = 'tcp'
_DFLT_MULTIPROCESSING_MODE = False
else:
_DFLT_IPC_MODE = 'ipc'
_DFLT_MULTIPROCESSING_MODE = True
FLO_DIR = os.path.join(
os.path.dirname(__file__),
'daemons', 'flo')
VALID_OPTS = {
# The address of the salt master. May be specified as IP address or hostname
'master': str,
# The TCP/UDP port of the master to connect to in order to listen to publications
'master_port': int,
# The behaviour of the minion when connecting to a master. Can specify 'failover',
# or 'func'. If 'func' is specified, the 'master' option should be set to an exec
# module function to run to determine the master hostname.
'master_type': str,
# Specify the format in which the master address will be specified. Can
# specify 'default' or 'ip_only'. If 'ip_only' is specified, then the
# master address will not be split into IP and PORT.
'master_uri_format': str,
# The fingerprint of the master key may be specified to increase security. Generate
# a master fingerprint with `salt-key -F master`
'master_finger': str,
# Selects a random master when starting a minion up in multi-master mode
'master_shuffle': bool,
# When in mulit-master mode, temporarily remove a master from the list if a conenction
# is interrupted and try another master in the list.
'master_alive_interval': int,
# The name of the signing key-pair
'master_sign_key_name': str,
# Sign the master auth-replies with a cryptographic signature of the masters public key.
'master_sign_pubkey': bool,
# Enables verification of the master-public-signature returned by the master in auth-replies.
# Must also set master_sign_pubkey for this to work
'verify_master_pubkey_sign': bool,
# If verify_master_pubkey_sign is enabled, the signature is only verified, if the public-key of the master changes.
# If the signature should always be verified, this can be set to True.
'always_verify_signature': bool,
# The name of the file in the masters pki-directory that holds the pre-calculated signature of the masters public-key.
'master_pubkey_signature': str,
# Instead of computing the signature for each auth-reply, use a pre-calculated signature.
# The master_pubkey_signature must also be set for this.
'master_use_pubkey_signature': bool,
# The key fingerprint of the higher-level master for the syndic to verify it is talking to the intended
# master
'syndic_finger': str,
# The user under which the daemon should run
'user': str,
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
'root_dir': str,
# The directory used to store public key data
'pki_dir': str,
# A unique identifier for this daemon
'id': str,
# The directory to store all cache files.
'cachedir': str,
# Flag to cache jobs locally.
'cache_jobs': bool,
# The path to the salt configuration file
'conf_file': str,
# The directory containing unix sockets for things like the event bus
'sock_dir': str,
# Specifies how the file server should backup files, if enabled. The backups
# live in the cache dir.
'backup_mode': str,
# A default renderer for all operations on this host
'renderer': str,
# A flag indicating that a highstate run should immediately cease if a failure occurs.
'failhard': bool,
# A flag to indicate that highstate runs should force refresh the modules prior to execution
'autoload_dynamic_modules': bool,
# Force the minion into a single environment when it fetches files from the master
'environment': str,
# Force the minion into a single pillar root when it fetches pillar data from the master
'pillarenv': str,
# Allows a user to provide an alternate name for top.sls
'state_top': str,
# States to run when a minion starts up
'startup_states': str,
# List of startup states
'sls_list': list,
# A top file to execute if startup_states == 'top'
'top_file': str,
# Location of the files a minion should look for. Set to 'local' to never ask the master.
'file_client': str,
# When using a local file_client, this parameter is used to allow the client to connect to
# a master for remote execution.
'use_master_when_local': bool,
# A map of saltenvs and fileserver backend locations
'file_roots': dict,
# A map of saltenvs and fileserver backend locations
'pillar_roots': dict,
# The type of hashing algorithm to use when doing file comparisons
'hash_type': str,
# FIXME Does not appear to be implemented
'disable_modules': list,
# FIXME Does not appear to be implemented
'disable_returners': list,
# Tell the loader to only load modules in this list
'whitelist_modules': list,
# A list of additional directories to search for salt modules in
'module_dirs': list,
# A list of additional directories to search for salt returners in
'returner_dirs': list,
# A list of additional directories to search for salt states in
'states_dirs': list,
# A list of additional directories to search for salt grains in
'grains_dirs': list,
# A list of additional directories to search for salt renderers in
'render_dirs': list,
# A list of additional directories to search for salt outputters in
'outputter_dirs': list,
# A list of additional directories to search for salt utilities in. (Used by the loader
# to populate __utils__)
'utils_dirs': list,
# salt cloud providers
'providers': dict,
# First remove all modules during any sync operation
'clean_dynamic_modules': bool,
# A flag indicating that a master should accept any minion connection without any authentication
'open_mode': bool,
# Whether or not processes should be forked when needed. The altnerative is to use threading.
'multiprocessing': bool,
# Schedule a mine update every n number of seconds
'mine_interval': int,
# The ipc strategy. (i.e., sockets versus tcp, etc)
'ipc_mode': str,
# Enable ipv6 support for deamons
'ipv6': bool,
# The chunk size to use when streaming files with the file server
'file_buffer_size': int,
# The TCP port on which minion events should be published if ipc_mode is TCP
'tcp_pub_port': int,
# The TCP port on which minion events should be pulled if ipc_mode is TCP
'tcp_pull_port': int,
# The TCP port on which events for the master should be pulled if ipc_mode is TCP
'tcp_master_pub_port': int,
# The TCP port on which events for the master should be pulled if ipc_mode is TCP
'tcp_master_pull_port': int,
# The TCP port on which events for the master should pulled and then republished onto
# the event bus on the master
'tcp_master_publish_pull': int,
# The TCP port for mworkers to connect to on the master
'tcp_master_workers': int,
# The file to send logging data to
'log_file': str,
# The level of verbosity at which to log
'log_level': bool,
# The log level to log to a given file
'log_level_logfile': bool,
# The format to construct dates in log files
'log_datefmt': str,
# The dateformat for a given logfile
'log_datefmt_logfile': str,
# The format for console logs
'log_fmt_console': str,
# The format for a given log file
'log_fmt_logfile': tuple,
# A dictionary of logging levels
'log_granular_levels': dict,
# If an event is above this size, it will be trimmed before putting it on the event bus
'max_event_size': int,
# Always execute states with test=True if this flag is set
'test': bool,
# Tell the loader to attempt to import *.pyx cython files if cython is available
'cython_enable': bool,
# Tell the client to show minions that have timed out
'show_timeout': bool,
# Tell the client to display the jid when a job is published
'show_jid': bool,
# Tells the highstate outputter to show successful states. False will omit successes.
'state_verbose': bool,
# Specify the format for state outputs. See highstate outputter for additional details.
'state_output': str,
# When true, states run in the order defined in an SLS file, unless requisites re-order them
'state_auto_order': bool,
# Fire events as state chunks are processed by the state compiler
'state_events': bool,
# The number of seconds a minion should wait before retry when attempting authentication
'acceptance_wait_time': float,
# The number of seconds a minion should wait before giving up during authentication
'acceptance_wait_time_max': float,
# Retry a connection attempt if the master rejects a minion's public key
'rejected_retry': bool,
# The interval in which a daemon's main loop should attempt to perform all necessary tasks
# for normal operation
'loop_interval': float,
# Perform pre-flight verification steps before daemon startup, such as checking configuration
# files and certain directories.
'verify_env': bool,
# The grains dictionary for a minion, containing specific "facts" about the minion
'grains': dict,
# Allow a deamon to function even if the key directories are not secured
'permissive_pki_access': bool,
# The path to a directory to pull in configuration file includes
'default_include': str,
# If a minion is running an esky build of salt, upgrades can be performed using the url
# defined here. See saltutil.update() for additional information
'update_url': bool,
# If using update_url with saltutil.update(), provide a list of services to be restarted
# post-install
'update_restart_services': list,
# The number of seconds to sleep between retrying an attempt to resolve the hostname of a
# salt master
'retry_dns': float,
# set the zeromq_reconnect_ivl option on the minion.
# http://lists.zeromq.org/pipermail/zeromq-dev/2011-January/008845.html
'recon_max': float,
# If recon_randomize is set, this specifies the lower bound for the randomized period
'recon_default': float,
# Tells the minion to choose a bounded, random interval to have zeromq attempt to reconnect
# in the event of a disconnect event
'recon_randomize': float, # FIXME This should really be a bool, according to the implementation
# Specify a returner in which all events will be sent to. Requires that the returner in question
# have an event_return(event) function!
'event_return': str,
# The number of events to queue up in memory before pushing them down the pipe to an event returner
# specified by 'event_return'
'event_return_queue': int,
# Only forward events to an event returner if it matches one of the tags in this list
'event_return_whitelist': list,
# Events matching a tag in this list should never be sent to an event returner.
'event_return_blacklist': list,
# The source location for the winrepo sls files
'win_repo_source_dir': str,
# This pidfile to write out to when a deamon starts
'pidfile': str,
# Used with the SECO range master tops system
'range_server': str,
# The tcp keepalive interval to set on TCP ports. This setting can be used to tune salt connectivity
# issues in messy network environments with misbeahving firewalls
'tcp_keepalive': bool,
# Sets zeromq TCP keepalive idle. May be used to tune issues with minion disconnects
'tcp_keepalive_idle': float,
# Sets zeromq TCP keepalive count. May be used to tune issues with minion disconnects
'tcp_keepalive_cnt': float,
# Sets zeromq TCP keepalive interval. May be used to tune issues with minion disconnects.
'tcp_keepalive_intvl': float,
# The network interface for a daemon to bind to
'interface': str,
# The port for a salt master to broadcast publications on. This will also be the port minions
# connect to to listen for publications.
'publish_port': int,
# TODO unknown option!
'auth_mode': int,
# Set the zeromq high water mark on the publisher interface.
# http://api.zeromq.org/3-2:zmq-setsockopt
'pub_hwm': int,
# The number of MWorker processes for a master to startup. This number needs to scale up as
# the number of connected minions increases.
'worker_threads': int,
# The port for the master to listen to returns on. The minion needs to connect to this port
# to send returns.
'ret_port': int,
# The number of hours to keep jobs around in the job cache on the master
'keep_jobs': int,
# A master-only copy of the file_roots dictionary, used by the state compiler
'master_roots': dict,
'gitfs_remotes': list,
'gitfs_mountpoint': str,
'gitfs_root': str,
'gitfs_base': str,
'gitfs_user': str,
'gitfs_password': str,
'gitfs_insecure_auth': bool,
'gitfs_privkey': str,
'gitfs_pubkey': str,
'gitfs_passphrase': str,
'gitfs_env_whitelist': list,
'gitfs_env_blacklist': list,
'hgfs_remotes': list,
'hgfs_mountpoint': str,
'hgfs_root': str,
'hgfs_base': str,
'hgfs_branch_method': str,
'hgfs_env_whitelist': list,
'hgfs_env_blacklist': list,
'svnfs_remotes': list,
'svnfs_mountpoint': str,
'svnfs_root': str,
'svnfs_trunk': str,
'svnfs_branches': str,
'svnfs_tags': str,
'svnfs_env_whitelist': list,
'svnfs_env_blacklist': list,
'minionfs_env': str,
'minionfs_mountpoint': str,
'minionfs_whitelist': list,
'minionfs_blacklist': list,
# Specify a list of external pillar systems to use
'ext_pillar': list,
# Reserved for future use to version the pillar structure
'pillar_version': int,
# Whether or not a copy of the master opts dict should be rendered into minion pillars
'pillar_opts': bool,
'pillar_safe_render_error': bool,
'pillar_source_merging_strategy': str,
'ping_on_rotate': bool,
'peer': dict,
'preserve_minion_cache': bool,
'syndic_master': str,
'runner_dirs': list,
'client_acl': dict,
'client_acl_blacklist': dict,
'sudo_acl': bool,
'external_auth': dict,
'token_expire': int,
'file_recv': bool,
'file_recv_max_size': int,
'file_ignore_regex': bool,
'file_ignore_glob': bool,
'fileserver_backend': list,
'fileserver_followsymlinks': bool,
'fileserver_ignoresymlinks': bool,
'fileserver_limit_traversal': bool,
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
# higher than the system default in order to account for the way zeromq consumes file handles.
'max_open_files': int,
# Automatically accept any key provided to the master. Implies that the key will be preserved
# so that subsequent connections will be authenticated even if this option has later been
# turned off.
'auto_accept': bool,
'autosign_timeout': int,
# A mapping of external systems that can be used to generate topfile data.
'master_tops': bool, # FIXME Should be dict?
# A flag that should be set on a top-level master when it is ordering around subordinate masters
# via the use of a salt syndic
'order_masters': bool,
# Whether or not to cache jobs so that they can be examined later on
'job_cache': bool,
# Define a returner to be used as an external job caching storage backend
'ext_job_cache': str,
# Specify a returner for the master to use as a backend storage system to cache jobs returns
# that it receives
'master_job_cache': str,
# The minion data cache is a cache of information about the minions stored on the master.
# This information is primarily the pillar and grains data. The data is cached in the master
# cachedir under the name of the minion and used to predetermine what minions are expected to
# reply from executions.
'minion_data_cache': bool,
# The number of seconds between AES key rotations on the master
'publish_session': int,
# Defines a salt reactor. See http://docs.saltstack.com/en/latest/topics/reactor/
'reactor': list,
# The TTL for the cache of the reactor configuration
'reactor_refresh_interval': int,
# The number of workers for the runner/wheel in the reactor
'reactor_worker_threads': int,
# The queue size for workers in the reactor
'reactor_worker_hwm': int,
'serial': str,
'search': str,
# The update interval, in seconds, for the master maintenance process to update the search
# index
'search_index_interval': int,
# A compound target definition. See: http://docs.saltstack.com/en/latest/topics/targeting/nodegroups.html
'nodegroups': dict,
# The logfile location for salt-key
'key_logfile': str,
'win_repo': str,
'win_repo_mastercachefile': str,
'win_gitrepos': list,
# Set a hard limit for the amount of memory modules can consume on a minion.
'modules_max_memory': int,
# The number of minutes between the minion refreshing its cache of grains
'grains_refresh_every': int,
# Use lspci to gather system data for grains on a minion
'enable_lspci': bool,
# The number of seconds for the salt client to wait for additional syndics to
# check in with their lists of expected minions before giving up
'syndic_wait': int,
# If this is set to True leading spaces and tabs are stripped from the start
# of a line to a block.
'jinja_lstrip_blocks': bool,
# If this is set to True the first newline after a Jinja block is removed
'jinja_trim_blocks': bool,
# FIXME Appears to be unused
'minion_id_caching': bool,
# If set, the master will sign all publications before they are sent out
'sign_pub_messages': bool,
# The size of key that should be generated when creating new keys
'keysize': int,
# The transport system for this deamon. (i.e. zeromq, raet, etc)
'transport': str,
# FIXME Appears to be unused
'enumerate_proxy_minions': bool,
# The number of seconds to wait when the client is requesting information about running jobs
'gather_job_timeout': int,
# The number of seconds to wait before timing out an authentication request
'auth_timeout': int,
# The number of attempts to authenticate to a master before giving up
'auth_tries': int,
# Never give up when trying to authenticate to a master
'auth_safemode': bool,
'random_master': bool,
# An upper bound for the amount of time for a minion to sleep before attempting to
# reauth after a restart.
'random_reauth_delay': int,
# The number of seconds for a syndic to poll for new messages that need to be forwarded
'syndic_event_forward_timeout': float,
# The number of seconds for the syndic to spend polling the event bus
'syndic_max_event_process_time': float,
# The length that the syndic event queue must hit before events are popped off and forwarded
'syndic_jid_forward_cache_hwm': int,
'ssh_passwd': str,
'ssh_port': str,
'ssh_sudo': bool,
'ssh_timeout': float,
'ssh_user': str,
'ssh_scan_ports': str,
'ssh_scan_timeout': float,
'ssh_identities_only': bool,
# Enable ioflo verbose logging. Warning! Very verbose!
'ioflo_verbose': int,
'ioflo_period': float,
# Set ioflo to realtime. Useful only for testing/debugging to simulate many ioflo periods very quickly.
'ioflo_realtime': bool,
# Location for ioflo logs
'ioflo_console_logdir': str,
# The port to bind to when bringing up a RAET daemon
'raet_port': int,
'raet_alt_port': int,
'raet_mutable': bool,
'raet_main': bool,
'raet_clear_remotes': bool,
'raet_clear_remote_masters': bool,
'raet_road_bufcnt': int,
'raet_lane_bufcnt': int,
'cluster_mode': bool,
'cluster_masters': list,
'sqlite_queue_dir': str,
'queue_dirs': list,
# Instructs the minion to ping its master(s) ever n number of seconds. Used
# primarily as a mitigation technique against minion disconnects.
'ping_interval': int,
# Instructs the salt CLI to print a summary of a minion reponses before returning
'cli_summary': bool,
# The number of minions the master should allow to connect. Can have performance implications
# in large setups.
'max_minions': int,
'username': str,
'password': str,
# Use zmq.SUSCRIBE to limit listening sockets to only process messages bound for them
'zmq_filtering': bool,
# Connection caching. Can greatly speed up salt performance.
'con_cache': bool,
'rotate_aes_key': bool,
# Cache ZeroMQ connections. Can greatly improve salt performance.
'cache_sreqs': bool,
# Can be set to override the python_shell=False default in the cmd module
'cmd_safe': bool,
# Used strictly for performance testing in RAET.
'dummy_publisher': bool,
# Used by salt-api for master requests timeout
'rest_timeout': int,
'sudo_user': str,
}
# default configurations
DEFAULT_MINION_OPTS = {
'interface': '0.0.0.0',
'master': 'salt',
'master_type': 'str',
'master_uri_format': 'default',
'master_port': '4506',
'master_finger': '',
'master_shuffle': False,
'master_alive_interval': 0,
'verify_master_pubkey_sign': False,
'always_verify_signature': False,
'master_sign_key_name': 'master_sign',
'syndic_finger': '',
'user': 'root',
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
'id': None,
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
'cache_jobs': False,
'grains_cache': False,
'grains_cache_expiration': 300,
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'backup_mode': '',
'renderer': 'yaml_jinja',
'failhard': False,
'autoload_dynamic_modules': True,
'environment': None,
'pillarenv': None,
'extension_modules': '',
'state_top': 'top.sls',
'startup_states': '',
'sls_list': [],
'top_file': '',
'file_client': 'remote',
'use_master_when_local': False,
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
},
'fileserver_limit_traversal': False,
'file_recv': False,
'file_recv_max_size': 100,
'file_ignore_regex': None,
'file_ignore_glob': None,
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
},
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'hash_type': 'md5',
'disable_modules': [],
'disable_returners': [],
'whitelist_modules': [],
'module_dirs': [],
'returner_dirs': [],
'grains_dirs': [],
'states_dirs': [],
'render_dirs': [],
'outputter_dirs': [],
'utils_dirs': [],
'providers': {},
'clean_dynamic_modules': True,
'open_mode': False,
'auto_accept': True,
'autosign_timeout': 120,
'multiprocessing': _DFLT_MULTIPROCESSING_MODE,
'mine_interval': 60,
'ipc_mode': _DFLT_IPC_MODE,
'ipv6': False,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
'tcp_pull_port': 4511,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'minion'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
'max_event_size': 1048576,
'test': False,
'ext_job_cache': '',
'cython_enable': False,
'state_verbose': True,
'state_output': 'full',
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'acceptance_wait_time': 10,
'acceptance_wait_time_max': 0,
'rejected_retry': False,
'loop_interval': 1,
'verify_env': True,
'grains': {},
'permissive_pki_access': False,
'default_include': 'minion.d/*.conf',
'update_url': False,
'update_restart_services': [],
'retry_dns': 30,
'recon_max': 10000,
'recon_default': 1000,
'recon_randomize': True,
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
'random_reauth_delay': 10,
'win_repo_source_dir': 'salt://win/repo/',
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-minion.pid'),
'range_server': 'range:80',
'tcp_keepalive': True,
'tcp_keepalive_idle': 300,
'tcp_keepalive_cnt': -1,
'tcp_keepalive_intvl': -1,
'modules_max_memory': -1,
'grains_refresh_every': 0,
'minion_id_caching': True,
'keysize': 2048,
'transport': 'zeromq',
'auth_timeout': 60,
'auth_tries': 7,
'auth_safemode': False,
'random_master': False,
'minion_floscript': os.path.join(FLO_DIR, 'minion.flo'),
'caller_floscript': os.path.join(FLO_DIR, 'caller.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.1,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4510,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': False,
'raet_clear_remotes': True,
'raet_clear_remote_masters': True,
'raet_road_bufcnt': 2,
'raet_lane_bufcnt': 100,
'cluster_mode': False,
'cluster_masters': [],
'restart_on_error': False,
'ping_interval': 0,
'username': None,
'password': None,
'zmq_filtering': False,
'zmq_monitor': False,
'cache_sreqs': True,
'cmd_safe': True,
'sudo_user': '',
}
DEFAULT_MASTER_OPTS = {
'interface': '0.0.0.0',
'publish_port': '4505',
'pub_hwm': 1000,
'auth_mode': 1,
'user': 'root',
'worker_threads': 5,
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
'ret_port': '4506',
'timeout': 5,
'keep_jobs': 24,
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'),
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'),
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
},
'master_roots': {
'base': [salt.syspaths.BASE_MASTER_ROOTS_DIR],
},
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
},
'file_client': 'local',
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'hgfs_remotes': [],
'hgfs_mountpoint': '',
'hgfs_root': '',
'hgfs_base': 'default',
'hgfs_branch_method': 'branches',
'hgfs_env_whitelist': [],
'hgfs_env_blacklist': [],
'show_timeout': True,
'show_jid': False,
'svnfs_remotes': [],
'svnfs_mountpoint': '',
'svnfs_root': '',
'svnfs_trunk': 'trunk',
'svnfs_branches': 'branches',
'svnfs_tags': 'tags',
'svnfs_env_whitelist': [],
'svnfs_env_blacklist': [],
'max_event_size': 1048576,
'minionfs_env': 'base',
'minionfs_mountpoint': '',
'minionfs_whitelist': [],
'minionfs_blacklist': [],
'ext_pillar': [],
'pillar_version': 2,
'pillar_opts': False,
'pillar_safe_render_error': True,
'pillar_source_merging_strategy': 'smart',
'ping_on_rotate': False,
'peer': {},
'preserve_minion_cache': False,
'syndic_master': '',
'runner_dirs': [],
'outputter_dirs': [],
'client_acl': {},
'client_acl_blacklist': {},
'sudo_acl': False,
'external_auth': {},
'token_expire': 43200,
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'extmods'),
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
'file_ignore_regex': None,
'file_ignore_glob': None,
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'fileserver_limit_traversal': False,
'max_open_files': 100000,
'hash_type': 'md5',
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'master'),
'open_mode': False,
'auto_accept': False,
'renderer': 'yaml_jinja',
'failhard': False,
'state_top': 'top.sls',
'master_tops': {},
'order_masters': False,
'job_cache': True,
'ext_job_cache': '',
'master_job_cache': 'local_cache',
'minion_data_cache': True,
'enforce_mine_cache': False,
'ipc_mode': _DFLT_IPC_MODE,
'ipv6': False,
'tcp_master_pub_port': 4512,
'tcp_master_pull_port': 4513,
'tcp_master_publish_pull': 4514,
'tcp_master_workers': 4515,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'master'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-master.pid'),
'publish_session': 86400,
'range_server': 'range:80',
'reactor': [],
'reactor_refresh_interval': 60,
'reactor_worker_threads': 10,
'reactor_worker_hwm': 10000,
'event_return': '',
'event_return_queue': 0,
'event_return_whitelist': [],
'event_return_blacklist': [],
'serial': 'msgpack',
'state_verbose': True,
'state_output': 'full',
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'search': '',
'search_index_interval': 3600,
'loop_interval': 60,
'nodegroups': {},
'cython_enable': False,
'enable_gpu_grains': False,
# XXX: Remove 'key_logfile' support in 2014.1.0
'key_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'key'),
'verify_env': True,
'permissive_pki_access': False,
'default_include': 'master.d/*.conf',
'win_repo': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
'win_repo_mastercachefile': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR,
'win', 'repo', 'winrepo.p'),
'win_gitrepos': ['https://github.com/saltstack/salt-winrepo.git'],
'syndic_wait': 5,
'jinja_lstrip_blocks': False,
'jinja_trim_blocks': False,
'sign_pub_messages': False,
'keysize': 2048,
'transport': 'zeromq',
'enumerate_proxy_minions': False,
'gather_job_timeout': 5,
'syndic_event_forward_timeout': 0.5,
'syndic_max_event_process_time': 0.5,
'syndic_jid_forward_cache_hwm': 100,
'ssh_passwd': '',
'ssh_port': '22',
'ssh_sudo': False,
'ssh_timeout': 60,
'ssh_user': 'root',
'ssh_scan_ports': '22',
'ssh_scan_timeout': 0.01,
'ssh_identities_only': False,
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.01,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4506,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': True,
'raet_clear_remotes': False,
'raet_clear_remote_masters': True,
'raet_road_bufcnt': 2,
'raet_lane_bufcnt': 100,
'cluster_mode': False,
'cluster_masters': [],
'sqlite_queue_dir': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'queues'),
'queue_dirs': [],
'cli_summary': False,
'max_minions': 0,
'master_sign_key_name': 'master_sign',
'master_sign_pubkey': False,
'master_pubkey_signature': 'master_pubkey_signature',
'master_use_pubkey_signature': False,
'zmq_filtering': False,
'zmq_monitor': False,
'con_cache': False,
'rotate_aes_key': True,
'cache_sreqs': True,
'dummy_pub': False,
}
# ----- Salt Cloud Configuration Defaults ----------------------------------->
CLOUD_CONFIG_DEFAULTS = {
'verify_env': True,
'default_include': 'cloud.conf.d/*.conf',
# Global defaults
'ssh_auth': '',
'keysize': 4096,
'os': '',
'script': 'bootstrap-salt',
'start_action': None,
'enable_hard_maps': False,
'delete_sshkeys': False,
# Custom deploy scripts
'deploy_scripts_search_path': 'cloud.deploy.d',
# Logging defaults
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'cloud'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
}
DEFAULT_API_OPTS = {
# ----- Salt master settings overridden by Salt-API --------------------->
'pidfile': '/var/run/salt-api.pid',
'logfile': '/var/log/salt/api',
'rest_timeout': 300,
# <---- Salt master settings overridden by Salt-API ----------------------
}
DEFAULT_SPM_OPTS = {
# ----- Salt master settings overridden by SPM --------------------->
'reactor_roots': '/srv/reactor',
'spm_logfile': '/var/log/salt/spm',
# spm_repos_config also includes a .d/ directory
'spm_repos_config': '/etc/salt/spm.repos',
'spm_cache_dir': os.path.join(salt.syspaths.CACHE_DIR, 'spm'),
'spm_build_dir': '/srv/spm',
'spm_build_exclude': ['.git'],
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
# <---- Salt master settings overridden by SPM ----------------------
}
VM_CONFIG_DEFAULTS = {
'default_include': 'cloud.profiles.d/*.conf',
}
PROVIDER_CONFIG_DEFAULTS = {
'default_include': 'cloud.providers.d/*.conf',
}
# <---- Salt Cloud Configuration Defaults ------------------------------------
def _validate_file_roots(opts):
'''
If the file_roots option has a key that is None then we will error out,
just replace it with an empty list
'''
if not isinstance(opts['file_roots'], dict):
log.warning('The file_roots parameter is not properly formatted,'
' using defaults')
return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
for saltenv, dirs in six.iteritems(opts['file_roots']):
if not isinstance(dirs, (list, tuple)):
opts['file_roots'][saltenv] = []
opts['file_roots'][saltenv] = _expand_glob_path(opts['file_roots'][saltenv])
return opts['file_roots']
def _expand_glob_path(file_roots):
'''
Applies shell globbing to a set of directories and returns
the expanded paths
'''
unglobbed_path = []
for path in file_roots:
if glob.has_magic(path):
unglobbed_path.extend(glob.glob(path))
else:
unglobbed_path.append(path)
return unglobbed_path
def _validate_opts(opts):
'''
Check that all of the types of values passed into the config are
of the right types
'''
errors = []
err = ('Key {0} with value {1} has an invalid type of {2}, a {3} is '
'required for this value')
for key, val in six.iteritems(opts):
if key in VALID_OPTS:
if isinstance(VALID_OPTS[key](), list):
if isinstance(val, VALID_OPTS[key]):
continue
else:
errors.append(err.format(key, val, type(val), 'list'))
if isinstance(VALID_OPTS[key](), dict):
if isinstance(val, VALID_OPTS[key]):
continue
else:
errors.append(err.format(key, val, type(val), 'dict'))
else:
try:
VALID_OPTS[key](val)
except ValueError:
errors.append(
err.format(key, val, type(val), VALID_OPTS[key])
)
except TypeError:
errors.append(
err.format(key, val, type(val), VALID_OPTS[key])
)
# RAET on Windows uses 'win32file.CreateMailslot()' for IPC. Due to this,
# sock_dirs must start with '\\.\mailslot\' and not contain any colons.
# We don't expect the user to know this, so we will fix up their path for
# them if it isn't compliant.
if (salt.utils.is_windows() and opts.get('transport') == 'raet' and
'sock_dir' in opts and
not opts['sock_dir'].startswith('\\\\.\\mailslot\\')):
opts['sock_dir'] = (
'\\\\.\\mailslot\\' + opts['sock_dir'].replace(':', ''))
for error in errors:
log.warning(error)
if errors:
return False
return True
def _append_domain(opts):
'''
Append a domain to the existing id if it doesn't already exist
'''
# Domain already exists
if opts['id'].endswith(opts['append_domain']):
return opts['id']
# Trailing dot should mean an FQDN that is terminated, leave it alone.
if opts['id'].endswith('.'):
return opts['id']
return '{0[id]}.{0[append_domain]}'.format(opts)
def _read_conf_file(path):
'''
Read in a config file from a given path and process it into a dictionary
'''
log.debug('Reading configuration from {0}'.format(path))
with salt.utils.fopen(path, 'r') as conf_file:
try:
conf_opts = yaml.safe_load(conf_file.read()) or {}
except yaml.YAMLError as err:
log.error(
'Error parsing configuration file: {0} - {1}'.format(path, err)
)
conf_opts = {}
# only interpret documents as a valid conf, not things like strings,
# which might have been caused by invalid yaml syntax
if not isinstance(conf_opts, dict):
log.error(
'Error parsing configuration file: {0} - conf should be a '
'document, not {1}.'.format(path, type(conf_opts))
)
conf_opts = {}
# allow using numeric ids: convert int to string
if 'id' in conf_opts:
conf_opts['id'] = str(conf_opts['id'])
for key, value in six.iteritems(conf_opts.copy()):
if isinstance(value, text_type) and six.PY2:
# We do not want unicode settings
conf_opts[key] = value.encode('utf-8')
return conf_opts
def _absolute_path(path, relative_to=None):
'''
Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one
'''
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path {0!r} converted to existing absolute path {1!r}'.format(
path, _abspath
)
)
return _abspath
return path
def load_config(path, env_var, default_path=None):
'''
Returns configuration dict from parsing either the file described by
``path`` or the environment variable described by ``env_var`` as YAML.
'''
if path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if default_path is None:
# This is most likely not being used from salt, i.e., could be salt-cloud
# or salt-api which have not yet migrated to the new default_path
# argument. Let's issue a warning message that the environ vars won't
# work.
import inspect
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
log.warning(
'The function \'{0}()\' defined in {1!r} is not yet using the '
'new \'default_path\' argument to `salt.config.load_config()`. '
'As such, the {2!r} environment variable will be ignored'.format(
previous_frame.function, previous_frame.filename, env_var
)
)
# In this case, maintain old behavior
default_path = DEFAULT_MASTER_OPTS['conf_file']
# Default to the environment variable path, if it exists
env_path = os.environ.get(env_var, path)
if not env_path or not os.path.isfile(env_path):
env_path = path
# If non-default path from `-c`, use that over the env variable
if path != default_path:
env_path = path
path = env_path
# If the configuration file is missing, attempt to copy the template,
# after removing the first header line.
if not os.path.isfile(path):
template = '{0}.template'.format(path)
if os.path.isfile(template):
log.debug('Writing {0} based on {1}'.format(path, template))
with salt.utils.fopen(path, 'w') as out:
with salt.utils.fopen(template, 'r') as ifile:
ifile.readline() # skip first line
out.write(ifile.read())
if salt.utils.validate.path.is_readable(path):
opts = _read_conf_file(path)
opts['conf_file'] = path
return opts
log.debug('Missing configuration file: {0}'.format(path))
return {}
def include_config(include, orig_path, verbose):
'''
Parses extra configuration file(s) specified in an include list in the
main config file.
'''
# Protect against empty option
if not include:
return {}
if orig_path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if isinstance(include, str):
include = [include]
configuration = {}
for path in include:
# Allow for includes like ~/foo
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(orig_path), path)
# Catch situation where user typos path in configuration; also warns
# for empty include directory (which might be by design)
if len(glob.glob(path)) == 0:
if verbose:
log.warn(
'Warning parsing configuration file: "include" path/glob '
'{0!r} matches no files'.format(path)
)
for fn_ in sorted(glob.glob(path)):
log.debug('Including configuration from {0!r}'.format(fn_))
configuration.update(_read_conf_file(fn_))
return configuration
def prepend_root_dir(opts, path_options):
'''
Prepends the options that represent filesystem paths with value of the
'root_dir' option.
'''
root_dir = os.path.abspath(opts['root_dir'])
root_opt = opts['root_dir'].rstrip(os.sep)
for path_option in path_options:
if path_option in opts:
path = opts[path_option]
if path == root_opt or path.startswith(root_opt + os.sep):
path = path[len(root_opt):]
opts[path_option] = salt.utils.path_join(root_dir, path)
def insert_system_path(opts, paths):
'''
Inserts path into python path taking into consideration 'root_dir' option.
'''
if isinstance(paths, str):
paths = [paths]
for path in paths:
path_options = {'path': path, 'root_dir': opts['root_dir']}
prepend_root_dir(path_options, path_options)
if (os.path.isdir(path_options['path'])
and path_options['path'] not in sys.path):
sys.path.insert(0, path_options['path'])
def minion_config(path,
env_var='SALT_MINION_CONFIG',
defaults=None,
cache_minion_id=False):
'''
Reads in the minion configuration file and sets up special options
This is useful for Minion-side operations, such as the
:py:class:`~salt.client.Caller` class, and manually running the loader
interface.
.. code-block:: python
import salt.client
minion_opts = salt.config.minion_config('/etc/salt/minion')
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'minion')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MINION_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False))
overrides.update(include_config(include, path, verbose=True))
opts = apply_minion_config(overrides, defaults, cache_minion_id=cache_minion_id)
_validate_opts(opts)
return opts
def syndic_config(master_config_path,
minion_config_path,
master_env_var='SALT_MASTER_CONFIG',
minion_env_var='SALT_MINION_CONFIG',
minion_defaults=None,
master_defaults=None):
if minion_defaults is None:
minion_defaults = DEFAULT_MINION_OPTS
if master_defaults is None:
master_defaults = DEFAULT_MASTER_OPTS
opts = {}
master_opts = master_config(
master_config_path, master_env_var, master_defaults
)
minion_opts = minion_config(
minion_config_path, minion_env_var, minion_defaults
)
opts['_minion_conf_file'] = master_opts['conf_file']
opts['_master_conf_file'] = minion_opts['conf_file']
opts.update(master_opts)
opts.update(minion_opts)
syndic_opts = {
'__role': 'syndic',
'root_dir': opts.get('root_dir', salt.syspaths.ROOT_DIR),
'pidfile': opts.get('syndic_pidfile', 'salt-syndic.pid'),
'log_file': opts.get('syndic_log_file', 'salt-syndic.log'),
'id': minion_opts['id'],
'pki_dir': minion_opts['pki_dir'],
'master': opts['syndic_master'],
'interface': master_opts['interface'],
'master_port': int(
opts.get(
# The user has explicitly defined the syndic master port
'syndic_master_port',
opts.get(
# No syndic_master_port, grab master_port from opts
'master_port',
# No master_opts, grab from the provided minion defaults
minion_defaults.get(
'master_port',
# Not on the provided minion defaults, load from the
# static minion defaults
DEFAULT_MINION_OPTS['master_port']
)
)
)
),
'user': opts.get('syndic_user', opts['user']),
'sock_dir': os.path.join(
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
),
'cachedir': master_opts['cachedir'],
}
opts.update(syndic_opts)
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir'
]
for config_key in ('log_file', 'key_logfile'):
# If this is not a URI and instead a local path
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts
# ----- Salt Cloud Configuration Functions ---------------------------------->
def apply_sdb(opts, sdb_opts=None):
'''
Recurse for sdb:// links for opts
'''
if sdb_opts is None:
sdb_opts = opts
if isinstance(sdb_opts, string_types) and sdb_opts.startswith('sdb://'):
return salt.utils.sdb.sdb_get(sdb_opts, opts)
elif isinstance(sdb_opts, dict):
for key, value in six.iteritems(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
elif isinstance(sdb_opts, list):
for key, value in enumerate(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
return sdb_opts
def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
master_config_path=None, master_config=None,
providers_config_path=None, providers_config=None,
profiles_config_path=None, profiles_config=None):
'''
Read in the salt cloud config and return the dict
'''
# Load the cloud configuration
overrides = load_config(
path,
env_var,
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud')
)
if path:
config_dir = os.path.dirname(path)
else:
config_dir = salt.syspaths.CONFIG_DIR
if defaults is None:
defaults = CLOUD_CONFIG_DEFAULTS
# Load cloud configuration from any default or provided includes
default_include = overrides.get(
'default_include', defaults['default_include']
)
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
# The includes have been evaluated, let's see if master, providers and
# profiles configuration settings have been included and if not, set the
# default value
if 'master_config' in overrides and master_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
master_config_path = overrides['master_config']
elif 'master_config' not in overrides and not master_config \
and not master_config_path:
# The configuration setting is not being provided in the main cloud
# configuration file, and
master_config_path = os.path.join(config_dir, 'master')
# Convert relative to absolute paths if necessary
master_config_path = _absolute_path(master_config_path, config_dir)
if 'providers_config' in overrides and providers_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
providers_config_path = overrides['providers_config']
elif 'providers_config' not in overrides and not providers_config \
and not providers_config_path:
providers_config_path = os.path.join(config_dir, 'cloud.providers')
# Convert relative to absolute paths if necessary
providers_config_path = _absolute_path(providers_config_path, config_dir)
if 'profiles_config' in overrides and profiles_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
profiles_config_path = overrides['profiles_config']
elif 'profiles_config' not in overrides and not profiles_config \
and not profiles_config_path:
profiles_config_path = os.path.join(config_dir, 'cloud.profiles')
# Convert relative to absolute paths if necessary
profiles_config_path = _absolute_path(profiles_config_path, config_dir)
# Prepare the deploy scripts search path
deploy_scripts_search_path = overrides.get(
'deploy_scripts_search_path',
defaults.get('deploy_scripts_search_path', 'cloud.deploy.d')
)
if isinstance(deploy_scripts_search_path, string_types):
deploy_scripts_search_path = [deploy_scripts_search_path]
# Check the provided deploy scripts search path removing any non existing
# entries.
for idx, entry in enumerate(deploy_scripts_search_path[:]):
if not os.path.isabs(entry):
# Let's try adding the provided path's directory name turns the
# entry into a proper directory
entry = os.path.join(os.path.dirname(path), entry)
if os.path.isdir(entry):
# Path exists, let's update the entry (its path might have been
# made absolute)
deploy_scripts_search_path[idx] = entry
continue
# It's not a directory? Remove it from the search path
deploy_scripts_search_path.pop(idx)
# Add the built-in scripts directory to the search path (last resort)
deploy_scripts_search_path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'cloud',
'deploy'
)
)
)
# Let's make the search path a tuple and add it to the overrides.
overrides.update(
deploy_scripts_search_path=tuple(deploy_scripts_search_path)
)
# Grab data from the 4 sources
# 1st - Master config
if master_config_path is not None and master_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `master_config` or `master_config_path`, not both.'
)
elif master_config_path is None and master_config is None:
master_config = salt.config.master_config(
overrides.get(
# use the value from the cloud config file
'master_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'master')
)
)
elif master_config_path is not None and master_config is None:
master_config = salt.config.master_config(master_config_path)
# 2nd - salt-cloud configuration which was loaded before so we could
# extract the master configuration file if needed.
# Override master configuration with the salt cloud(current overrides)
master_config.update(overrides)
# We now set the overridden master_config as the overrides
overrides = master_config
if providers_config_path is not None and providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `providers_config` or `providers_config_path`, '
'not both.'
)
elif providers_config_path is None and providers_config is None:
providers_config_path = overrides.get(
# use the value from the cloud config file
'providers_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
if profiles_config_path is not None and profiles_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `profiles_config` or `profiles_config_path`, not both.'
)
elif profiles_config_path is None and profiles_config is None:
profiles_config_path = overrides.get(
# use the value from the cloud config file
'profiles_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
# Apply the salt-cloud configuration
opts = apply_cloud_config(overrides, defaults)
# 3rd - Include Cloud Providers
if 'providers' in opts:
if providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the passing a pre-configured providers configuration '
'dictionary.'
)
if providers_config_path is not None:
providers_confd = os.path.join(
os.path.dirname(providers_config_path),
'cloud.providers.d', '*'
)
if (os.path.isfile(providers_config_path) or
glob.glob(providers_confd)):
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the new one. The providers configuration should now go '
'in the file `{0}` or a separate `*.conf` file within '
'`cloud.providers.d/` which is relative to `{0}`.'.format(
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
)
# No exception was raised? It's the old configuration alone
providers_config = opts['providers']
elif providers_config_path is not None:
# Load from configuration file, even if that files does not exist since
# it will be populated with defaults.
providers_config = cloud_providers_config(providers_config_path)
# Let's assign back the computed providers configuration
opts['providers'] = providers_config
# 4th - Include VM profiles config
if profiles_config is None:
# Load profiles configuration from the provided file
profiles_config = vm_profiles_config(profiles_config_path,
providers_config)
opts['profiles'] = profiles_config
# recurse opts for sdb configs
apply_sdb(opts)
# Return the final options
return opts
def apply_cloud_config(overrides, defaults=None):
'''
Return a cloud config
'''
if defaults is None:
defaults = CLOUD_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
# If the user defined providers in salt cloud's main configuration file, we
# need to take care for proper and expected format.
if 'providers' in config:
# Keep a copy of the defined providers
providers = config['providers'].copy()
# Reset the providers dictionary
config['providers'] = {}
# Populate the providers dictionary
for alias, details in six.iteritems(providers):
if isinstance(details, list):
for detail in details:
if 'provider' not in detail and 'driver' not in detail:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has an entry missing the required setting of either'
'\'provider\' or \'driver\'. Note that \'provider\' has been deprecated, so you should '
'use the \'driver\' notation.'.format(
alias
)
)
elif 'provider' in detail:
salt.utils.warn_until(
'Nitrogen',
'The term \'provider\' is being deprecated in favor of \'driver\'. Support for '
'\'provider\' will be removed in Salt Nitrogen. Please convert your cloud provider '
'configuration files to use \'driver\'.'
)
driver = detail['provider']
elif 'driver' in detail:
driver = detail['driver']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
detail['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = detail
elif isinstance(details, dict):
if 'provider' not in details and 'driver' not in details:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has an entry missing the required setting of either'
'\'provider\' or \'driver\''.format(
alias
)
)
elif 'provider' in details:
salt.utils.warn_until(
'Nitrogen',
'The term \'provider\' is being deprecated in favor of \'driver\' and support for '
'\'provider\' will be removed in Salt Nitrogen. Please convert your cloud provider'
'configuration files to use \'driver\'.'
)
driver = details['provider']
elif 'driver' in details:
driver = details['driver']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
details['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = details
# Migrate old configuration
config = old_to_new(config)
return config
def old_to_new(opts):
providers = (
'AWS',
'CLOUDSTACK',
'DIGITAL_OCEAN',
'EC2',
'GOGRID',
'IBMSCE',
'JOYENT',
'LINODE',
'OPENSTACK',
'PARALLELS'
'RACKSPACE',
'SALTIFY'
)
for provider in providers:
provider_config = {}
for opt, val in opts.items():
if provider in opt:
value = val
name = opt.split('.', 1)[1]
provider_config[name] = value
lprovider = provider.lower()
if provider_config:
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in provider_config:
provider_config['driver'] = provider_config.pop('provider')
provider_config['provider'] = lprovider
opts.setdefault('providers', {})
# provider alias
opts['providers'][lprovider] = {}
# provider alias, provider driver
opts['providers'][lprovider][lprovider] = provider_config
return opts
def vm_profiles_config(path,
providers,
env_var='SALT_CLOUDVM_CONFIG',
defaults=None):
'''
Read in the salt cloud VM config file
'''
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_vm_profiles_config(providers, overrides, defaults)
def apply_vm_profiles_config(providers, overrides, defaults=None):
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
vms = {}
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, dict):
raise salt.exceptions.SaltCloudConfigError(
'The VM profiles configuration found in {0[conf_file]!r} is '
'not in the proper format'.format(config)
)
val['profile'] = key
vms[key] = val
# Is any VM profile extending data!?
for profile, details in six.iteritems(vms.copy()):
if 'extends' not in details:
if ':' in details['provider']:
alias, driver = details['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, details)
)
vms.pop(profile)
continue
if 'profiles' not in providers[alias][driver]:
providers[alias][driver]['profiles'] = {}
providers[alias][driver]['profiles'][profile] = details
if details['provider'] not in providers:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, details)
)
vms.pop(profile)
continue
driver = next(iter(list(providers[details['provider']].keys())))
providers[details['provider']][driver].setdefault(
'profiles', {}).update({profile: details})
details['provider'] = '{0[provider]}:{1}'.format(details, driver)
vms[profile] = details
continue
extends = details.pop('extends')
if extends not in vms:
log.error(
'The {0!r} profile is trying to extend data from {1!r} '
'though {1!r} is not defined in the salt profiles loaded '
'data. Not extending and removing from listing!'.format(
profile, extends
)
)
vms.pop(profile)
continue
extended = vms.get(extends).copy()
extended.pop('profile')
extended.update(details)
if ':' not in extended['provider']:
if extended['provider'] not in providers:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, extended)
)
vms.pop(profile)
continue
driver = next(iter(list(providers[extended['provider']].keys())))
providers[extended['provider']][driver].setdefault(
'profiles', {}).update({profile: extended})
extended['provider'] = '{0[provider]}:{1}'.format(extended, driver)
else:
alias, driver = extended['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, extended)
)
vms.pop(profile)
continue
providers[alias][driver].setdefault('profiles', {}).update(
{profile: extended}
)
# Update the profile's entry with the extended data
vms[profile] = extended
return vms
def cloud_providers_config(path,
env_var='SALT_CLOUD_PROVIDERS_CONFIG',
defaults=None):
'''
Read in the salt cloud providers configuration file
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_cloud_providers_config(overrides, defaults)
def apply_cloud_providers_config(overrides, defaults=None):
'''
Apply the loaded cloud providers configuration.
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
# Is the user still using the old format in the new configuration file?!
for name, settings in six.iteritems(config.copy()):
if '.' in name:
log.warn(
'Please switch to the new providers configuration syntax'
)
# Let's help out and migrate the data
config = old_to_new(config)
# old_to_new will migrate the old data into the 'providers' key of
# the config dictionary. Let's map it correctly
for prov_name, prov_settings in six.iteritems(config.pop('providers')):
config[prov_name] = prov_settings
break
providers = {}
ext_count = 0
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, (list, tuple)):
val = [val]
else:
# Need to check for duplicate cloud provider entries per "alias" or
# we won't be able to properly reference it.
handled_providers = set()
for details in val:
if 'provider' not in details and 'driver' not in details:
if 'extends' not in details:
log.error(
'Please check your cloud providers configuration. '
'There\'s no \'driver\', \'provider\', nor \'extends\' '
'definition referenced.'
)
continue
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in details:
details['driver'] = details.pop('provider')
if details['driver'] in handled_providers:
log.error(
'You can only have one entry per cloud provider. For '
'example, if you have a cloud provider configuration '
'section named, \'production\', you can only have a '
'single entry for EC2, Joyent, Openstack, and so '
'forth.'
)
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has multiple entries '
'for the {1[driver]!r} driver.'.format(key, details)
)
handled_providers.add(details['driver'])
for entry in val:
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in entry:
salt.utils.warn_until(
'Nitrogen',
'The term \'provider\' is being deprecated in favor of \'driver\'. Support for '
'\'provider\' will be removed in Salt Nitrogen. Please convert your cloud provider '
'configuration files to use \'driver\'.'
)
entry['driver'] = entry.pop('provider')
if 'driver' not in entry:
entry['driver'] = '-only-extendable-{0}'.format(ext_count)
ext_count += 1
if key not in providers:
providers[key] = {}
provider = entry['driver']
if provider not in providers[key]:
providers[key][provider] = entry
# Is any provider extending data!?
while True:
keep_looping = False
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in details:
details['driver'] = details.pop('provider')
# Set a holder for the defined profiles
providers[provider_alias][driver]['profiles'] = {}
if 'extends' not in details:
continue
extends = details.pop('extends')
if ':' in extends:
alias, provider = extends.split(':')
if alias not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is '
'trying to extend data from {2!r} though {2!r} '
'is not defined in the salt cloud providers '
'loaded data.'.format(
details['driver'],
provider_alias,
alias
)
)
if provider not in providers.get(alias):
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is '
'trying to extend data from \'{2}:{3}\' though '
'{3!r} is not defined in {1!r}'.format(
details['driver'],
provider_alias,
alias,
provider
)
)
details['extends'] = '{0}:{1}'.format(alias, provider)
# change provider details '-only-extendable-' to extended provider name
details['driver'] = provider
elif providers.get(extends):
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is trying '
'to extend from {2!r} and no provider was specified. '
'Not extending!'.format(
details['driver'], provider_alias, extends
)
)
elif extends not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is trying '
'to extend data from {2!r} though {2!r} is not '
'defined in the salt cloud providers loaded '
'data.'.format(
details['driver'], provider_alias, extends
)
)
else:
if driver in providers.get(extends):
details['extends'] = '{0}:{1}'.format(extends, driver)
elif '-only-extendable-' in providers.get(extends):
details['extends'] = '{0}:{1}'.format(
extends, '-only-extendable-{0}'.format(ext_count)
)
else:
# We're still not aware of what we're trying to extend
# from. Let's try on next iteration
details['extends'] = extends
keep_looping = True
if not keep_looping:
break
while True:
# Merge provided extends
keep_looping = False
for alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
if 'extends' not in details:
# Extends resolved or non existing, continue!
continue
if 'extends' in details['extends']:
# Since there's a nested extends, resolve this one in the
# next iteration
keep_looping = True
continue
# Let's get a reference to what we're supposed to extend
extends = details.pop('extends')
# Split the setting in (alias, driver)
ext_alias, ext_driver = extends.split(':')
# Grab a copy of what should be extended
extended = providers.get(ext_alias).get(ext_driver).copy()
# Merge the data to extend with the details
extended.update(details)
# Update the providers dictionary with the merged data
providers[alias][driver] = extended
# Update name of the driver, now that it's populated with extended information
if driver.startswith('-only-extendable-'):
providers[alias][ext_driver] = providers[alias][driver]
# Delete driver with old name to maintain dictionary size
del providers[alias][driver]
if not keep_looping:
break
# Now clean up any providers entry that was just used to be a data tree to
# extend from
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries.copy()):
if not driver.startswith('-only-extendable-'):
continue
log.info(
'There\'s at least one cloud driver under the {0!r} '
'cloud provider alias which does not have the required '
'\'driver\' setting. Removing it from the available '
'providers listing.'.format(
provider_alias
)
)
providers[provider_alias].pop(driver)
if not providers[provider_alias]:
providers.pop(provider_alias)
return providers
def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
'''
Search and return a setting in a known order:
1. In the virtual machine's configuration
2. In the virtual machine's profile configuration
3. In the virtual machine's provider configuration
4. In the salt cloud configuration if global searching is enabled
5. Return the provided default
'''
# As a last resort, return the default
value = default
if search_global is True and opts.get(name, None) is not None:
# The setting name exists in the cloud(global) configuration
value = deepcopy(opts[name])
if vm_ and name:
# Let's get the value from the profile, if present
if 'profile' in vm_ and vm_['profile'] is not None:
if name in opts['profiles'][vm_['profile']]:
if isinstance(value, dict):
value.update(opts['profiles'][vm_['profile']][name].copy())
else:
value = deepcopy(opts['profiles'][vm_['profile']][name])
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
# Let's get the value from the provider, if present.
if ':' in vm_['driver']:
# The provider is defined as <provider-alias>:<driver-name>
alias, driver = vm_['driver'].split(':')
if alias in opts['providers'] and \
driver in opts['providers'][alias]:
details = opts['providers'][alias][driver]
if name in details:
if isinstance(value, dict):
value.update(details[name].copy())
else:
value = deepcopy(details[name])
elif len(opts['providers'].get(vm_['driver'], ())) > 1:
# The provider is NOT defined as <provider-alias>:<driver-name>
# and there's more than one entry under the alias.
# WARN the user!!!!
log.error(
'The {0!r} cloud provider definition has more than one '
'entry. Your VM configuration should be specifying the '
'provider as \'driver: {0}:<driver-engine>\'. Since '
'it\'s not, we\'re returning the first definition which '
'might not be what you intended.'.format(
vm_['driver']
)
)
if vm_['driver'] in opts['providers']:
# There's only one driver defined for this provider. This is safe.
alias_defs = opts['providers'].get(vm_['driver'])
provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]
if name in provider_driver_defs:
# The setting name exists in the VM's provider configuration.
# Return it!
if isinstance(value, dict):
value.update(provider_driver_defs[name].copy())
else:
value = deepcopy(provider_driver_defs[name])
if name and vm_ and name in vm_:
# The setting name exists in VM configuration.
if isinstance(value, dict):
value.update(vm_[name].copy())
else:
value = deepcopy(vm_[name])
return value
def is_provider_configured(opts, provider, required_keys=()):
'''
Check and return the first matching and fully configured cloud provider
configuration.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
# There's at least one require configuration key which is not set.
log.warning(
'The required {0!r} configuration setting is missing from '
'the {1!r} driver, which is configured under the {2!r} '
'alias.'.format(
key, provider, alias
)
)
return False
# If we reached this far, there's a properly configured provider. Return it!
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
# If we reached this far, we have a matching provider, let's see if
# all required configuration keys are present and not None.
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
log.warning(
'The required {0!r} configuration setting is missing '
'from the {1!r} driver, which is configured under the '
'{2!r} alias.'.format(
key, provider, alias
)
)
skip_provider = True
break
if skip_provider:
continue
# If we reached this far, the provider included all required keys
return provider_details
# If we reached this point, the provider is not configured.
return False
def is_profile_configured(opts, provider, profile_name):
'''
Check if the requested profile contains the minimum required parameters for
a profile.
Required parameters include image, provider, and size keys.
.. versionadded:: Beryllium
'''
required_keys = ['image', 'provider', 'size']
alias, driver = provider.split(':')
provider_key = opts['providers'][alias][driver]
profile_key = opts['providers'][alias][driver]['profiles'][profile_name]
# Check if image and/or size are supplied in the provider config. If either
# one is present, remove it from the required_keys list.
for item in required_keys:
if item in provider_key:
required_keys.remove(item)
# Check for remaining required parameters in the profile config.
for item in required_keys:
if profile_key.get(item, None) is None:
# There's at least one required configuration item which is not set.
log.error(
'The required {0!r} configuration setting is missing from the '
'{1!r} profile, which is configured '
'under the {2!r} alias.'.format(
item, profile_name, alias
)
)
return False
return True
# <---- Salt Cloud Configuration Functions -----------------------------------
def _cache_id(minion_id, cache_file):
'''
Helper function, writes minion id to a cache file.
'''
try:
with salt.utils.fopen(cache_file, 'w') as idf:
idf.write(minion_id)
except (IOError, OSError) as exc:
log.error('Could not cache minion ID: {0}'.format(exc))
def get_id(opts, cache_minion_id=False):
'''
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
'''
if opts['root_dir'] is None:
root_dir = salt.syspaths.ROOT_DIR
else:
root_dir = opts['root_dir']
config_dir = salt.syspaths.CONFIG_DIR
if config_dir.startswith(salt.syspaths.ROOT_DIR):
config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1]
# Check for cached minion ID
id_cache = os.path.join(root_dir,
config_dir.lstrip(os.path.sep),
'minion_id')
if opts.get('minion_id_caching', True):
try:
with salt.utils.fopen(id_cache) as idf:
name = idf.readline().strip()
bname = salt.utils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.to_str(bname.replace(codecs.BOM, '', 1))
if name:
log.debug('Using cached minion ID from {0}: {1}'.format(id_cache, name))
return name, False
except (IOError, OSError):
pass
if '__role' in opts and opts.get('__role') == 'minion':
log.debug('Guessing ID. The id can be explicitly set in {0}'
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
newid = salt.utils.network.generate_minion_id()
if '__role' in opts and opts.get('__role') == 'minion':
log.info('Found minion id from generate_minion_id(): {0}'.format(newid))
if cache_minion_id and opts.get('minion_id_caching', True):
_cache_id(newid, id_cache)
is_ipv4 = newid.count('.') == 3 and not any(c.isalpha() for c in newid)
return newid, is_ipv4
def apply_minion_config(overrides=None,
defaults=None,
cache_minion_id=False):
'''
Returns minion configurations dict.
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS
opts = defaults.copy()
opts['__role'] = 'minion'
if overrides:
opts.update(overrides)
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
if opts['id'] is None:
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=cache_minion_id)
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
# set up the extension_modules location from the cachedir
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
# Set up the utils_dirs location from the extension_modules location
opts['utils_dirs'] = (
opts.get('utils_dirs') or
[os.path.join(opts['extension_modules'], 'utils')]
)
# Insert all 'utils_dirs' directories to the system path
insert_system_path(opts, opts['utils_dirs'])
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'sock_dir', 'extension_modules', 'pidfile',
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# if there is no beacons option yet, add an empty beacons dict
if 'beacons' not in opts:
opts['beacons'] = {}
# if there is no schedule option yet, add an empty scheduler
if 'schedule' not in opts:
opts['schedule'] = {}
return opts
def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None):
'''
Reads in the master configuration file and sets up default options
This is useful for running the actual master daemon. For running
Master-side client interfaces that need the master opts see
:py:func:`salt.client.client_config`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'master')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False))
overrides.update(include_config(include, path, verbose=True))
opts = apply_master_config(overrides, defaults)
_validate_opts(opts)
# If 'nodegroups:' is uncommented in the master config file, and there are
# no nodegroups defined, opts['nodegroups'] will be None. Fix this by
# reverting this value to the default, as if 'nodegroups:' was commented
# out or not present.
if opts.get('nodegroups') is None:
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
if opts.get('transport') == 'raet' and 'aes' in opts:
opts.pop('aes')
return opts
def apply_master_config(overrides=None, defaults=None):
'''
Returns master configurations dict.
'''
import salt.crypt
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
opts = defaults.copy()
opts['__role'] = 'master'
if overrides:
opts.update(overrides)
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
using_ip_for_id = False
append_master = False
if opts.get('id') is None:
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=None)
append_master = True
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
if append_master:
opts['id'] += '_master'
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir',
'sqlite_queue_dir'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
log_setting = opts.get(config_key, '')
if log_setting is None:
continue
if urlparse(log_setting).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
opts['auto_accept'] = opts['auto_accept'] is True
opts['file_roots'] = _validate_file_roots(opts)
if opts['file_ignore_regex']:
# If file_ignore_regex was given, make sure it's wrapped in a list.
# Only keep valid regex entries for improved performance later on.
if isinstance(opts['file_ignore_regex'], str):
ignore_regex = [opts['file_ignore_regex']]
elif isinstance(opts['file_ignore_regex'], list):
ignore_regex = opts['file_ignore_regex']
opts['file_ignore_regex'] = []
for regex in ignore_regex:
try:
# Can't store compiled regex itself in opts (breaks
# serialization)
re.compile(regex)
opts['file_ignore_regex'].append(regex)
except Exception:
log.warning(
'Unable to parse file_ignore_regex. Skipping: {0}'.format(
regex
)
)
if opts['file_ignore_glob']:
# If file_ignore_glob was given, make sure it's wrapped in a list.
if isinstance(opts['file_ignore_glob'], str):
opts['file_ignore_glob'] = [opts['file_ignore_glob']]
# Let's make sure `worker_threads` does not drop below 3 which has proven
# to make `salt.modules.publish` not work under the test-suite.
if opts['worker_threads'] < 3 and opts.get('peer', None):
log.warning(
'The \'worker_threads\' setting on {0!r} cannot be lower than 3. '
'Resetting it to the default value of 3.'.format(
opts['conf_file']
)
)
opts['worker_threads'] = 3
opts.setdefault('pillar_source_merging_strategy', 'smart')
return opts
def client_config(path, env_var='SALT_CLIENT_CONFIG', defaults=None):
'''
Load Master configuration data
Usage:
.. code-block:: python
import salt.config
master_opts = salt.config.client_config('/etc/salt/master')
Returns a dictionary of the Salt Master configuration file with necessary
options needed to communicate with a locally-running Salt Master daemon.
This function searches for client specific configurations and adds them to
the data from the master configuration.
This is useful for master-side operations like
:py:class:`~salt.client.LocalClient`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
xdg_dir = salt.utils.xdg.xdg_config_dir()
if os.path.isdir(xdg_dir):
client_config_dir = xdg_dir
saltrc_config_file = 'saltrc'
else:
client_config_dir = os.path.expanduser('~')
saltrc_config_file = '.saltrc'
# Get the token file path from the provided defaults. If not found, specify
# our own, sane, default
opts = {
'token_file': defaults.get(
'token_file',
os.path.join(client_config_dir, 'salt_token')
)
}
# Update options with the master configuration, either from the provided
# path, salt's defaults or provided defaults
opts.update(
master_config(path, defaults=defaults)
)
# Update with the users salt dot file or with the environment variable
saltrc_config = os.path.join(client_config_dir, saltrc_config_file)
opts.update(
load_config(
saltrc_config,
env_var,
saltrc_config
)
)
# Make sure we have a proper and absolute path to the token file
if 'token_file' in opts:
opts['token_file'] = os.path.abspath(
os.path.expanduser(
opts['token_file']
)
)
# If the token file exists, read and store the contained token
if os.path.isfile(opts['token_file']):
# Make sure token is still valid
expire = opts.get('token_expire', 43200)
if os.stat(opts['token_file']).st_mtime + expire > time.mktime(time.localtime()):
with salt.utils.fopen(opts['token_file']) as fp_:
opts['token'] = fp_.read().strip()
# On some platforms, like OpenBSD, 0.0.0.0 won't catch a master running on localhost
if opts['interface'] == '0.0.0.0':
opts['interface'] = '127.0.0.1'
# Make sure the master_uri is set
if 'master_uri' not in opts:
opts['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=salt.utils.ip_bracket(opts['interface']),
port=opts['ret_port']
)
# Return the client options
_validate_opts(opts)
return opts
def api_config(path):
'''
Read in the salt master config file and add additional configs that
need to be stubbed out for salt-api
'''
# Let's grab a copy of salt's master default opts
defaults = DEFAULT_MASTER_OPTS
# Let's override them with salt-api's required defaults
defaults.update(DEFAULT_API_OPTS)
return client_config(path, defaults=defaults)
def spm_config(path):
'''
Read in the salt master config file and add additional configs that
need to be stubbed out for spm
.. versionadded:: Beryllium
'''
# Let's grab a copy of salt's master default opts
defaults = DEFAULT_MASTER_OPTS
# Let's override them with spm's required defaults
defaults.update(DEFAULT_SPM_OPTS)
return client_config(path, env_var='SPM_CONFIG', defaults=defaults)
| 36.211052
| 122
| 0.614677
|
e444a04b3862c7db65f789882c88609e9e06c1f7
| 239
|
py
|
Python
|
pacote-download/revisao/fibonacci.py
|
wiliampianco/aulas_python
|
9addd095e6312ad2864d0d76b26fc2a9b1337db6
|
[
"MIT"
] | null | null | null |
pacote-download/revisao/fibonacci.py
|
wiliampianco/aulas_python
|
9addd095e6312ad2864d0d76b26fc2a9b1337db6
|
[
"MIT"
] | null | null | null |
pacote-download/revisao/fibonacci.py
|
wiliampianco/aulas_python
|
9addd095e6312ad2864d0d76b26fc2a9b1337db6
|
[
"MIT"
] | null | null | null |
# SEQUÊNCIA DE FEBONACCI
n = int(input('Quantos termos: '))
cont = 3
t1 = 0
t2 = 1
t3 = t1 + t2
print(f'{t1} - {t2}', end=' - ')
while cont <= n:
t3 = t1 + t2
print(t3, end=' - ')
t1 = t2
t2 = t3
cont += 1
print('FIM')
| 15.933333
| 34
| 0.497908
|
142fdf6dbdca75030666201bb1e39c8d40660fee
| 2,135
|
py
|
Python
|
Homework_8/G_Branch_output/G_Branch_output.py
|
dimk00z/summer_yandex_algorithmic_course
|
36006bd7eea031764369becac84458427e3f848b
|
[
"MIT"
] | 8
|
2021-06-11T08:27:29.000Z
|
2022-01-25T09:20:37.000Z
|
Homework_8/G_Branch_output/G_Branch_output.py
|
dimk00z/summer_yandex_algorithmic_course
|
36006bd7eea031764369becac84458427e3f848b
|
[
"MIT"
] | null | null | null |
Homework_8/G_Branch_output/G_Branch_output.py
|
dimk00z/summer_yandex_algorithmic_course
|
36006bd7eea031764369becac84458427e3f848b
|
[
"MIT"
] | 3
|
2021-06-18T15:44:27.000Z
|
2021-09-09T18:38:12.000Z
|
class Node():
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
if data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def get_branches(self, root):
result = []
if root:
result = self.get_branches(root.left)
result = result+self.get_branches(root.right)
if (root.left is not None and root.right is None) or \
(root.left is None and root.right is not None):
result.append(root.data)
return result
def print_tree(self):
if self.left:
self.left.print_tree()
print(self.data)
if self.right:
self.right.print_tree()
def get_node_from_tree(self, node, data, depth=0):
depth += 1
if not data:
return None
if data == node.data:
return node, depth
if data < node.data:
if node.left:
return self.get_node_from_tree(node.left, data, depth)
if data > node.data:
if node.right:
return self.get_node_from_tree(node.right, data, depth)
def __str__(self):
return str(self.data)
def max_depth(node):
if node is None:
return 0
left_depth = max_depth(node.left)
right_depth = max_depth(node.right)
if left_depth > right_depth:
return left_depth+1
else:
return right_depth+1
with open('input.txt') as file:
nodes = tuple(map(int, file.readlines()[0].split()))
root = Node(nodes[0])
result = []
for node in nodes[1:-1]:
root.insert(node)
with open('output.txt', 'w') as file:
file.write('\n'.join(tuple(map(str, sorted(root.get_branches(root))))))
| 26.6875
| 75
| 0.534895
|
e4a48cc3a7ca90f7a32e7950df2fe524edd364fc
| 5,657
|
py
|
Python
|
heron/tools/explorer/src/python/main.py
|
wromansky/incubator-heron
|
9cfc5c4785ba0c14d26dfbbf0fe1d6b477dd38a5
|
[
"Apache-2.0"
] | null | null | null |
heron/tools/explorer/src/python/main.py
|
wromansky/incubator-heron
|
9cfc5c4785ba0c14d26dfbbf0fe1d6b477dd38a5
|
[
"Apache-2.0"
] | null | null | null |
heron/tools/explorer/src/python/main.py
|
wromansky/incubator-heron
|
9cfc5c4785ba0c14d26dfbbf0fe1d6b477dd38a5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' main.py '''
import logging
import os
import sys
from heron.common.src.python.utils import log
from heron.tools.common.src.python.clients import tracker
from heron.tools.common.src.python.utils import config
from heron.tools.explorer.src.python import logicalplan
from heron.tools.explorer.src.python import physicalplan
from heron.tools.explorer.src.python import topologies
import click
import requests
Log = log.Log
DEFAULT_TRACKER_URL = "http://127.0.0.1:8888"
try:
click_extra = {"max_content_width": os.get_terminal_size().columns}
except Exception:
click_extra = {}
def config_path_option():
return click.option(
"--config-path",
default=config.get_heron_conf_dir(),
show_default=True,
help="Path to heron's config clusters config directory"
)
def tracker_url_option():
return click.option(
"--tracker-url",
default=DEFAULT_TRACKER_URL,
show_default=True,
help="URL to a heron-tracker instance"
)
def show_version(_, __, value):
if value:
config.print_build_info()
sys.exit(0)
@click.group(context_settings=click_extra)
@click.option(
"--version",
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_version,
)
@click.option("-v", "--verbose", count=True)
def cli(verbose: int):
levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}
log.configure(levels.get(verbose, logging.DEBUG))
@cli.command("clusters")
@tracker_url_option()
def cli_clusters(tracker_url: str):
tracker.tracker_url = tracker_url
try:
clusters = tracker.get_clusters()
except requests.ConnectionError as e:
Log.error(f"Fail to connect to tracker: {e}")
sys.exit(1)
print("Available clusters:")
for cluster in clusters:
print(f" {cluster}")
@cli.command("topologies")
@tracker_url_option()
@click.argument("cre", metavar="CLUSTER[/ROLE[/ENV]]")
def cli_topologies(tracker_url: str, cre: str):
"""Show the topologies under the given CLUSTER[/ROLE[/ENV]]."""
tracker.tracker_url = tracker_url
topologies.run(
cre=cre,
)
@cli.command()
@config_path_option()
@tracker_url_option()
@click.option(
"--component-type",
type=click.Choice(["all", "spouts", "bolts"]),
default="all",
show_default=True,
)
@click.argument("cre", metavar="CLUSTER[/ROLE[/ENV]]")
@click.argument("topology")
def logical_plan(
config_path: str,
cre: str,
topology: str,
component_type: str,
tracker_url: str,
) -> None:
"""Show logical plan information for the given topology."""
tracker.tracker_url = tracker_url
cluster = config.get_heron_cluster(cre)
cluster_config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
cluster, role, environment = config.parse_cluster_role_env(cre, cluster_config_path)
logicalplan.run(
component_type=component_type,
cluster=cluster,
role=role,
environment=environment,
topology=topology,
)
@cli.group()
def physical_plan():
pass
@physical_plan.command()
@config_path_option()
@tracker_url_option()
@click.option("--component", help="name of component to limit metrics to")
@click.argument("cre", metavar="CLUSTER[/ROLE[/ENV]]")
@click.argument("topology")
def metrics(
config_path: str,
cre: str,
tracker_url: str,
topology: str,
component: str,
) -> None:
tracker.tracker_url = tracker_url
cluster = config.get_heron_cluster(cre)
cluster_config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
cluster, role, environment = config.parse_cluster_role_env(cre, cluster_config_path)
physicalplan.run_metrics(
cluster=cluster,
role=role,
environment=environment,
component=component,
topology=topology,
)
def validate_container_id(_, __, value):
if value is None:
return None
if value <= 0:
raise click.BadParameter("container id must be greather than zero")
return value - 1
@physical_plan.command()
@config_path_option()
@tracker_url_option()
@click.option("--id", "container_id", type=int, help="container id", callback=validate_container_id)
@click.argument("cre", metavar="CLUSTER[/ROLE[/ENV]]")
@click.argument("topology")
def containers(
config_path: str,
cre: str,
tracker_url: str,
topology: str,
container_id: int,
) -> None:
tracker.tracker_url = tracker_url
cluster = config.get_heron_cluster(cre)
cluster_config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
cluster, role, environment = config.parse_cluster_role_env(cre, cluster_config_path)
physicalplan.run_containers(
cluster=cluster,
role=role,
environment=environment,
container_id=container_id,
topology=topology,
)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
| 27.730392
| 100
| 0.718932
|
f38c2e33b89d23617e78a99c7fa9719213174e69
| 169
|
py
|
Python
|
snake_api/urls.py
|
merry-snakes-on-a-plane/api
|
0152114d10873de70dfc2d42ba8fa88a7f007a83
|
[
"MIT"
] | null | null | null |
snake_api/urls.py
|
merry-snakes-on-a-plane/api
|
0152114d10873de70dfc2d42ba8fa88a7f007a83
|
[
"MIT"
] | 6
|
2020-02-12T02:31:11.000Z
|
2022-02-10T09:03:39.000Z
|
snake_api/urls.py
|
merry-snakes-on-a-plane/api
|
0152114d10873de70dfc2d42ba8fa88a7f007a83
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('snake_api.urls')),
]
| 21.125
| 42
| 0.704142
|
f44b02aa11cef7df859ca2cafc8504abacf857bf
| 915
|
py
|
Python
|
projects/MyAssembler/symtbl.py
|
HideyukiFUKUHARA/nand2tetris
|
e6d1b314ca59c86d948038f96d0994aae284acbe
|
[
"MIT"
] | null | null | null |
projects/MyAssembler/symtbl.py
|
HideyukiFUKUHARA/nand2tetris
|
e6d1b314ca59c86d948038f96d0994aae284acbe
|
[
"MIT"
] | null | null | null |
projects/MyAssembler/symtbl.py
|
HideyukiFUKUHARA/nand2tetris
|
e6d1b314ca59c86d948038f96d0994aae284acbe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
class symtbl():
def __init__(self):
self.dict = {}
self.dict.update({
'R0':0, 'R1':1, 'R2':2, 'R3':3, 'R4':4, 'R5':5, 'R6':6, 'R7':7,
'R8':8, 'R9':9, 'R10':10, 'R11':11, 'R12':12, 'R13':13, 'R14':14, 'R15':15,
'SP':0, 'LCL':1, 'ARG':2, 'THIS':3, 'THAT':4, 'SCREEN':0x4000, 'KBD':0x6000})
def addEntry(self, symbol, address):
self.dict.update({symbol:address})
#print 'addEntry : ', symbol, address
def contains(self, symbol):
tmp = symbol in self.dict
#print 'contains : ', tmp
return tmp
def getAddress(self, symbol):
tmp = self.dict[symbol]
#print 'getAddress : ', tmp
return tmp
def test(self):
self.addEntry('hoge', 100)
self.contains('hoge')
self.getAddress('hoge')
self.getAddress('KBD')
#s = symtbl()
#s.test()
| 26.911765
| 89
| 0.514754
|
f18be0553f5b7223d7b090b3c804a70c65de834c
| 223
|
py
|
Python
|
python/ray/experimental/test/test_opencl.py
|
BnJam/ray
|
c32658ff7550f8672923ca6d824d85c5efd82a5b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/experimental/test/test_opencl.py
|
BnJam/ray
|
c32658ff7550f8672923ca6d824d85c5efd82a5b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/experimental/test/test_opencl.py
|
BnJam/ray
|
c32658ff7550f8672923ca6d824d85c5efd82a5b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.experimental.opencl import OpenCL
c = OpenCL()
# test if opencl drivers are available
c.get_gpu_ids()
| 20.272727
| 42
| 0.825112
|
26a340a47d17aa01178948266a1b6f0805db9b66
| 2,269
|
py
|
Python
|
setup.py
|
unmonoqueteclea/djangorestframework-simplejwt
|
5fe64beee8e46ccb408265d3dc8499f4b0608f00
|
[
"MIT"
] | null | null | null |
setup.py
|
unmonoqueteclea/djangorestframework-simplejwt
|
5fe64beee8e46ccb408265d3dc8499f4b0608f00
|
[
"MIT"
] | null | null | null |
setup.py
|
unmonoqueteclea/djangorestframework-simplejwt
|
5fe64beee8e46ccb408265d3dc8499f4b0608f00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from pathlib import Path
from setuptools import (
setup,
find_packages,
)
extras_require = {
'test': [
'cryptography',
'pytest-cov',
'pytest-django',
'pytest-xdist',
'pytest',
'tox',
],
'lint': [
'flake8',
'pep8',
'isort',
],
'doc': [
'Sphinx>=1.6.5,<2',
'sphinx_rtd_theme>=0.1.9',
],
'dev': [
'pytest-watch',
'wheel',
'twine',
'ipython',
],
'python-jose': [
'python-jose==3.0.0',
],
}
extras_require['dev'] = (
extras_require['dev'] + # noqa: W504
extras_require['test'] + # noqa: W504
extras_require['lint'] + # noqa: W504
extras_require['doc'] + # noqa: W504
extras_require['python-jose']
)
setup(
name='djangorestframework_simplejwt',
use_scm_version={"version_scheme": "post-release"},
setup_requires=["setuptools_scm"],
url='https://github.com/jazzband/djangorestframework-simplejwt',
license='MIT',
description='A minimal JSON Web Token authentication plugin for Django REST Framework',
long_description=Path('README.rst').read_text(encoding='utf-8'),
author='David Sanders',
author_email='davesque@gmail.com',
install_requires=[
'django',
'djangorestframework',
'pyjwt>=1.7,<3',
],
python_requires='>=3.7',
extras_require=extras_require,
packages=find_packages(exclude=['tests', 'tests.*', 'licenses', 'requirements']),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
]
)
| 27.011905
| 91
| 0.568532
|
482dcc6f11526bb49296761ac855f96e500d059b
| 1,078
|
py
|
Python
|
app/config.py
|
dbaelipro/coordination-dispo-lits
|
d252df23f570f6a83a5c6af36056ed5f2cf02fe0
|
[
"MIT"
] | 2
|
2020-05-10T16:06:17.000Z
|
2020-05-10T16:10:03.000Z
|
app/config.py
|
dbaelipro/coordination-dispo-lits
|
d252df23f570f6a83a5c6af36056ed5f2cf02fe0
|
[
"MIT"
] | 8
|
2020-04-07T16:36:51.000Z
|
2020-04-26T10:34:26.000Z
|
app/config.py
|
dbaelipro/coordination-dispo-lits
|
d252df23f570f6a83a5c6af36056ed5f2cf02fe0
|
[
"MIT"
] | 4
|
2020-04-18T22:53:33.000Z
|
2021-06-04T09:31:13.000Z
|
import os, logging
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, "data")
DEBUG = os.getenv("ENVIRONEMENT") == "DEV"
HOST = os.getenv('HOST', '0.0.0.0')
PORT = int(os.getenv('PORT', '5000'))
SQLALCHEMY_RECORD_QUERIES = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY', 't1NP63m4wnBg6nyHYKfmc2TpCOGI4nss')
BASE_SERVER_PATH = os.environ.get("BASE_SERVER_PATH", 'http://127.0.0.1:5000/')
FIXTURES_LIST = os.environ.get("FIXTURES_LIST", 'users.json,resources.json')
POSTGRES = {
'host': os.getenv('DB_HOST'),
'user': os.getenv('POSTGRES_USER'),
'db': os.getenv('POSTGRES_DB'),
'pw': os.getenv('POSTGRES_PASSWORD'),
'port': os.getenv('POSTGRES_PORT', 5432),
}
SQLALCHEMY_DATABASE_URI = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES
logging.basicConfig(
filename=os.getenv('SERVICE_LOG', 'server.log'),
level=logging.DEBUG,
format='%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s',
datefmt='%d/%m/%y %H:%M:%S',
)
| 31.705882
| 92
| 0.686456
|
5889b8c4ad334ed23b411641817c8b9d5f40bae4
| 534
|
py
|
Python
|
game/ghdialogue/ghoffers.py
|
fmunoz-geo/gearhead-caramel
|
315835481d543420826439245be01460fe6dd81b
|
[
"Apache-2.0"
] | 74
|
2015-03-09T00:33:09.000Z
|
2022-02-25T20:28:27.000Z
|
game/ghdialogue/ghoffers.py
|
fmunoz-geo/gearhead-caramel
|
315835481d543420826439245be01460fe6dd81b
|
[
"Apache-2.0"
] | 108
|
2017-12-30T20:26:12.000Z
|
2021-01-16T12:37:00.000Z
|
game/ghdialogue/ghoffers.py
|
CartoonFan/gearhead-caramel
|
61995f382923695176ab7a65253f42e849e0c4d7
|
[
"Apache-2.0"
] | 61
|
2018-03-03T09:55:31.000Z
|
2022-03-18T17:28:33.000Z
|
from pbge.dialogue import Offer,Reply,Cue,ContextTag
from . import context
HELLO = Offer('[HELLO_PLUS]',context=ContextTag([context.HELLO,]))
BADHELLO = Offer('[HELLO_UNFAV]',context=ContextTag([context.UNFAVORABLE_HELLO,]))
ATTACK = Offer('[ATTACK]',context=ContextTag([context.ATTACK,]))
CHALLENGE = Offer('[CHALLENGE]',context=ContextTag([context.CHALLENGE,]))
GOODBYE = Offer('[GOODBYE]',context=ContextTag([context.GOODBYE,]), is_generic=True)
CHAT = Offer('[CHAT]',context=ContextTag([context.CHAT,]), is_generic=True)
| 29.666667
| 84
| 0.745318
|
affc9ad01aae70da6f3a447826f380c1bd84ce38
| 12,642
|
py
|
Python
|
grid_search_loop/tr5000_rect_N100/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | 1
|
2020-11-29T12:42:30.000Z
|
2020-11-29T12:42:30.000Z
|
grid_search_loop/tr5000_rect_N100/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
grid_search_loop/tr5000_rect_N100/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
# biased_regress = True
# normal_equations = True
dataset = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = True
## ESN parameters
N_def = [100] # Neurons
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # scaling
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # memory
exponent_def = 1.0 # sigmoid exponent
# Script name struct for report
#script_name = 'ESNtrainCV'
#name_struct_meta = "_N_scale_mem"
#name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N_def, scale_def, mem_def)
## Imports
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
#import matplotlib.pyplot as plt
import ESNtools
import GSK
#Needed for reporting
import platform
import time
# Fix boundary nans (replicate head/tail vals)
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
# print("pointer_left:", pointer_left)
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
# print("pointer_right:", pointer_right)
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
# nan interpolation
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
# Check whether it extends
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
# Average sides
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
# Single value average
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
## Get sepsis patients
def get_sepsis_patients(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
## Create the feature matrix
features = []
patient = []
sepsis_label = []
dataloaded = False
## Read data
if not numpy_load:
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
#ADD nanfill tag
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
##Flatten patient
patient = patient.flatten()
## Separate pointers
feature_phys = feature_matrix[:,:-6] ## Physiology
feature_demog = feature_matrix[:,-6:] ## Demographics
## Normalize mm(all) or std (sepsis, phys) vals, feature-based
if mm:
scaler = MinMaxScaler()
for i in range(n):
i_pat = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
## (Get sepsis patients)
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
## nan to zero
if nan_to_zero:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
## Septic groups stratify
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
#healthy_patient_list = np.unique(patient[patient_healthy_idx])
#sep_patient_list = np.unique(patient[patient_sep_idx])
## Nonlinear mapping function
sigmoid_exponent = exponent_def
slope = exponent_def
func = ESNtools.rectifier
#SFK
#skf = StratifiedKFold(n_splits=kfold_split)
#skf.get_n_splits(X)
#GSKF
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N, scale, mem)
## ESN Generation parameters
## Perform ESN feed
pat_shift = np.append(np.where(np.diff(patient)!=0)[0] + 1, [len(patient)])
pat_ipos = 0
print("pat_shift: ",len(pat_shift))
allocateESN = True
print('ESN: ')
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
print("Feeding ESN patient:", i)
ESN[pat_ipos:pat_shift[i],:] = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, slope)
pat_ipos = pat_shift[i]
else:
for i in range(len(pat_shift)):
if i == 0:
ESN = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, slope)
else:
ESN = np.vstack((ESN, ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, slope)))
pat_ipos = pat_shift[i]
del feature_matrix
## Divide in sets
X = ESN
y = sepsis_label
## KFold
results = []
target = []
kk = 0
#for train_index, test_index in skf.split(X,y): #Stratified KFold
for j in range(len(train_index)): #GSKF
X_train, X_test = X[train_index[j]], X[test_index[j]] #GSKF
y_train, y_test = y[train_index[j]], y[test_index[j]] #GSKF
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
## Evaluate results
results = np.concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
## Threshold study
th_i = np.min(results)
th_f = np.max(results)
## AUC-based CV
AUC_CV = True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
## Max Threshold
th_max = th[np.argmax(f1)]
## Metrics
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
# write to report file
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
# f.write('Dataset: ' + path + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
## Grid_search for loop
for i_N in range(len(N_def)):
N = N_def[i_N] # Neurons
## Random seed
np.random.seed(seed=0)
## Mask parameters
M = 2*np.random.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale] # scaling factor
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem] # memory
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
| 31.763819
| 154
| 0.596583
|
9a44590935dd97ec0820a713a0018bf495ce2e49
| 1,329
|
py
|
Python
|
pytorchtools/ptschedulers/SchedulerSaveNetModule.py
|
Criscraft/pytorch_classification
|
d5772963e55ce218ae4719fb7f85604263aab65f
|
[
"MIT"
] | null | null | null |
pytorchtools/ptschedulers/SchedulerSaveNetModule.py
|
Criscraft/pytorch_classification
|
d5772963e55ce218ae4719fb7f85604263aab65f
|
[
"MIT"
] | null | null | null |
pytorchtools/ptschedulers/SchedulerSaveNetModule.py
|
Criscraft/pytorch_classification
|
d5772963e55ce218ae4719fb7f85604263aab65f
|
[
"MIT"
] | null | null | null |
import os
from ptschedulers.SchedulerBaseModule import SchedulerBaseModule
class SchedulerSaveNetModule(SchedulerBaseModule):
def __init__(self,
active_epochs=set(),
active_at_end=True,
network='network_main',
filename_base='model'):
super().__init__()
self.active_epochs = active_epochs
self.active_at_end = active_at_end
self.network = network
self.filename_base = filename_base
if not isinstance(self.active_epochs, set):
self.active_epochs = set(self.active_epochs)
def step(self, config, shared_modules, scheduler_modules_ordered_dict, epoch, force_step=False):
if epoch not in self.active_epochs and not force_step:
return None
model = shared_modules[self.network]
model.save(os.path.join(config['log_path'], "".join([self.filename_base, '_{:>04}'.format(epoch), '.pt'])))
print('successfully saved model to disk')
def finalize(self, config, shared_modules, scheduler_modules_ordered_dict):
if self.active_at_end:
if 'epochs' in config:
epoch = config['epochs']
else:
epoch = -1
self.step(config, shared_modules, scheduler_modules_ordered_dict, epoch, force_step=True)
| 34.076923
| 115
| 0.650865
|
152bc5c4a6dd53d90e5c40e10497d6b0c18b95c8
| 599
|
py
|
Python
|
code.py
|
IwanHoogland/Automated-Diplomacy
|
55e7310c2447a44f01b7220ce6e998bcf50d9de9
|
[
"MIT"
] | null | null | null |
code.py
|
IwanHoogland/Automated-Diplomacy
|
55e7310c2447a44f01b7220ce6e998bcf50d9de9
|
[
"MIT"
] | null | null | null |
code.py
|
IwanHoogland/Automated-Diplomacy
|
55e7310c2447a44f01b7220ce6e998bcf50d9de9
|
[
"MIT"
] | null | null | null |
import random
import math
class Board:
def __init__(self):
pass
#add the state of the board at the start
def update_board(self):
pass
# Get the orders
# Do all the stuff here to update the board
# print the new map and a list of order results
class Order
#with a subclass definitive order?
class Country(neighbour_list,coast_list):
def __init__(self):
self.neighbours = neighbour_list
self.coasts = coast_list
def get_neighbours(self):
return neighbour_list
class Map
# with method update map?
| 17.114286
| 55
| 0.652755
|
e858d841da2f30dafcbb4832bf496b6d29d991fc
| 629
|
py
|
Python
|
clinlog/logging/helper.py
|
salpreh/clinlog
|
7a63632e4be47ee3c55a2fc602d0f29ad9156fd4
|
[
"MIT"
] | null | null | null |
clinlog/logging/helper.py
|
salpreh/clinlog
|
7a63632e4be47ee3c55a2fc602d0f29ad9156fd4
|
[
"MIT"
] | null | null | null |
clinlog/logging/helper.py
|
salpreh/clinlog
|
7a63632e4be47ee3c55a2fc602d0f29ad9156fd4
|
[
"MIT"
] | null | null | null |
import logging
from .clinlog_handler import ClinlogHandler
LOGGER_NAME = 'clinlog'
def get_logger(log_level=None):
create_logger(log_level)
return logging.getLogger(LOGGER_NAME)
def create_logger(log_level=None):
if not log_level:
log_level = logging.DEBUG
logger = logging.getLogger(LOGGER_NAME)
if _has_handler(logger):
return
cl_handler = ClinlogHandler()
logger.setLevel(log_level)
logger.addHandler(cl_handler)
def _has_handler(logger):
for handler in logger.handlers:
if isinstance(handler, ClinlogHandler):
return True
return False
| 17.971429
| 47
| 0.712242
|
dcc1865fd3304c24e7fc8d25465bbff0cee13f52
| 846
|
py
|
Python
|
setup.py
|
DtjiAppDev/LEVEL_UP_RPG
|
74cf8b098b4101003bfb0ad81b00e9d5984722fd
|
[
"MIT"
] | null | null | null |
setup.py
|
DtjiAppDev/LEVEL_UP_RPG
|
74cf8b098b4101003bfb0ad81b00e9d5984722fd
|
[
"MIT"
] | null | null | null |
setup.py
|
DtjiAppDev/LEVEL_UP_RPG
|
74cf8b098b4101003bfb0ad81b00e9d5984722fd
|
[
"MIT"
] | null | null | null |
from setuptools import setup
def readme():
with open("README.md", "r") as fh:
long_description = fh.read()
return long_description
setup(
name='Level_Up_RPG',
version='1',
packages=['Level_Up_RPG'],
url='https://github.com/DtjiAppDev/LEVEL_UP_RPG',
license='MIT',
author='Dtji AppDev',
author_email='dtjiappdev1999@gmail.com',
description='This package contains implementation of the game "Level_Up_RPG".',
long_description=readme(),
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=[],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7"
],
entry_points={
"console_scripts": [
"Level_Up_RPG=Level_Up_RPG.level_up_rpg:main",
]
}
)
| 25.636364
| 83
| 0.64539
|
660e905df6d069bb6276abb419d10589e51322af
| 2,795
|
py
|
Python
|
1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/decorators/Transmission Capa (Power).py
|
tamas-borbath/METIS
|
cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036
|
[
"CC-BY-4.0"
] | 1
|
2021-12-07T09:05:48.000Z
|
2021-12-07T09:05:48.000Z
|
1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/decorators/Transmission Capa (Power).py
|
tamas-borbath/METIS_data_and_scripts
|
cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036
|
[
"CC-BY-4.0"
] | null | null | null |
1._metis_models_and_data_2019-12-16b/METIS source code/Indicators/decorators/Transmission Capa (Power).py
|
tamas-borbath/METIS_data_and_scripts
|
cf3dc0f0f96ccb6ad72ddf15ba7e31ddc6339036
|
[
"CC-BY-4.0"
] | null | null | null |
########################################################
# Copyright (c) 2015-2017 by European Commission. #
# All Rights Reserved. #
########################################################
"""
This view shows, for each power interconnection and each direction, the corresponding transmission capacity.
There is one arrow per direction whose size is related to the transmission capacity.
"""
from com.artelys.platform.config import Constantes
execfile(Constantes.REP_SCRIPTS+'/decoratorUtils.py')
#Need to be redefined for each decorator
transmissionUnit = "W"
def computeValue(context, testCase, energy, results, asset, timeCursor, aggregateMode):
#Need to be redefined for each decorator
scopeId = context.getScope().getId()
capacity = getInstalledCapacity(context, scopeId, asset, testCase, results)
if capacity == None or asset.getEnergyForParameter(ENERGY_DELIVERY, context) != energy:
return None
else:
if aggregateMode == AGGREGATE_MODE_ALL:
if not (isinstance(capacity, float) or isinstance(capacity, int)):
capacity = capacity.getMeanValue()
else:
if not (isinstance(capacity, float) or isinstance(capacity, int)):
capacity = capacity.getValueAt(timeCursor)
value = capacity * MW_TO_W_CONVERSION
return value
def configurePhysicalAssetRenderable(renderable, physicalAsset):
context = CONTEXTS.getObjectContext(physicalAsset)
assetName = physicalAsset.getName()
if physicalAsset.getType() in TRANSMISSION_TYPES:
if (not renderable.hasVariable(assetName)):
renderable.setVariable(assetName, "ok")
# setAssetBasicRenderable(context, renderable, asset, displayInactiveParameters=False, displayResults=False)
LAYER.setVariable(assetName + "p", ARROW_INITIAL_POSITION)
updatePhysicalAssetRenderable(renderable, physicalAsset, timeCursor=context.getIndex(0), aggregateMode=AGGREGATE_MODE_ALL)
def updatePhysicalAssetRenderable(renderable, physicalAsset, timeCursor, aggregateMode):
if physicalAsset.getType() in TRANSMISSION_TYPES:
context = CONTEXTS.getObjectContext(physicalAsset)
energy = Crystal.getEnergy(context, ELECTRICITY)
if energy == None:
return
scope = context.getScope().getId()
testCase = TEST_CASE
results = Crystal.getComputationResults(context, scope, testCase)
updateTransmissionAssetRenderable(renderable, context, testCase, energy, results, physicalAsset, timeCursor, aggregateMode)
def configureFinancialAssetRenderable(renderable, financialAsset):
pass
def configureDeliveryPointRenderable(renderable, deliveryPoint):
setDeliveryPointShape(renderable, deliveryPoint)
# setDeliveryPointLabel(renderable, deliveryPoint)
pass
def configureZoneRenderable(renderable, zone):
setZoneShape(renderable,zone)
setZoneLabel(renderable,zone)
pass
| 39.366197
| 125
| 0.753131
|
805a6a7f36fc3fe1121e7ea9c2bdab93931137e1
| 1,819
|
py
|
Python
|
rovers/fastdownward/experiments/issue627/v2.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | 1
|
2021-09-09T13:03:02.000Z
|
2021-09-09T13:03:02.000Z
|
rovers/fastdownward/experiments/issue627/v2.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | null | null | null |
rovers/fastdownward/experiments/issue627/v2.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']),
IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']),
IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']),
}
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='florian.pommerening@unibas.ch',
)
exp.add_comparison_table_step()
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-base-%s" % config.nick,
"issue627-v2-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v2_memory_%s.png' % config.nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-base-%s" % config.nick,
"issue627-v2-%s" % config.nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_base_v2_total_time_%s.png' % config.nick
)
exp()
main(revisions=['issue627-base', 'issue627-v2'])
| 33.072727
| 106
| 0.57669
|
f4f48e1f05e2c0fe5536e64b7a1ec29f1395e8ce
| 4,312
|
py
|
Python
|
benchmark/startQiskit_QC2561.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2561.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2561.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.rx(-3.1101767270538954,input_qubit[1]) # number=27
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=26
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[3]) # number=29
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.cx(input_qubit[3],input_qubit[0]) # number=23
prog.z(input_qubit[3]) # number=24
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[3],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[3]) # number=28
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=30
prog.x(input_qubit[1]) # number=31
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2561.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.344262
| 165
| 0.65654
|
d0743689458373d774e94eaf64f69605b3a02652
| 6,764
|
py
|
Python
|
mitty/lib/reads.py
|
latticelabs/Mitty-deprecated-
|
bf192600233daea8a42a1f995c60b1e883cbaaba
|
[
"Apache-2.0"
] | 1
|
2015-10-21T23:43:34.000Z
|
2015-10-21T23:43:34.000Z
|
mitty/lib/reads.py
|
latticelabs/Mitty
|
bf192600233daea8a42a1f995c60b1e883cbaaba
|
[
"Apache-2.0"
] | null | null | null |
mitty/lib/reads.py
|
latticelabs/Mitty
|
bf192600233daea8a42a1f995c60b1e883cbaaba
|
[
"Apache-2.0"
] | null | null | null |
"""Infrastructure to handle reads"""
import re
import numpy as np
def expand_sequence(ref_seq, ml, chrom, copy):
"""Apply the variants in the list and return the consensus sequence
:param ref_seq: reference sequence
:param ml: master list of variants
:param chrom: [(no, het) ...] list of variants pointing to master list
no -> index on ml,
het -> 0 = copy 0, 1 = copy 1, 2 = homozygous
:param copy: 0/1 which copy of the chromosome
:return alt_seq, variant_waypoint, var_loc_alt_coordinates
variant_waypoint -> recarray with the fields
consensus sequence and array used by roll_cigars to determine POS and CIGAR strings for reads
pos_ref: position on ref seq
pos_alt: position on alt seq
delta: +k for insertions of length k, -k for deletions of length k, 0 for SNPs
var_loc_alt_coordinates -> array of variants locations in the expanded sequence coordinates
"""
pos_ref, pos_alt = 0, 0 # Current position in ref and alt coordinates
alt_fragments = []
variant_waypoint = [(-1, -1, 0)] # The start waypoint, guaranteed to be to the left and out of range of any base and not an insertion or deletion
var_loc_alt_coordinates = []
pos, stop, ref, alt = ml.variants['pos'], ml.variants['stop'], ml.variants['ref'], ml.variants['alt']
c_iter = chrom.__iter__()
variant = next(c_iter, None)
while variant is not None:
if pos_ref < pos[variant[0]]:
alt_fragments += [ref_seq[pos_ref:pos[variant[0]]]]
pos_alt += pos[variant[0]] - pos_ref
pos_ref = pos[variant[0]]
else:
if pos_ref == pos[variant[0]]:
var_loc_alt_coordinates += [pos_alt]
if variant[1] == 2 or variant[1] == copy: # The variant applies to this chromosome copy
alt_fragments += [alt[variant[0]]]
dl = len(alt[variant[0]]) - len(ref[variant[0]])
if dl == 0:
variant_waypoint += [(pos_ref, pos_alt, dl)] # For SNPs the waypoints don't move, so ref/alt stay same
else:
variant_waypoint += [(pos_ref + len(ref[variant[0]]), pos_alt + 1, dl)]
# We shift the waypoint position to be the first non-match base
pos_alt += len(alt[variant[0]])
else: # Skip this variant
alt_fragments += [ref[variant[0]]]
pos_alt += len(ref[variant[0]])
pos_ref = stop[variant[0]]
#pos_ref += len(ref[variant[0]])
variant = next(c_iter, None)
alt_fragments += [ref_seq[pos_ref:]]
final_delta = variant_waypoint[-1][0] - variant_waypoint[-1][1]
if final_delta > 0:
final_ref, final_alt = 2 ** 31 - 1, 2 ** 31 - 1 - final_delta
else:
final_ref, final_alt = 2 ** 31 - 1 - final_delta, 2 ** 31 - 1
variant_waypoint += [(final_ref, final_alt, -1)]
# The end waypoint, guaranteed to be to the right of any base and not a SNP, and maintaining the delta
dtype = [('ref_pos', 'i4'), ('alt_pos', 'i4'), ('delta', 'i4')]
return ''.join(alt_fragments), np.rec.fromrecords(variant_waypoint, dtype=dtype), var_loc_alt_coordinates
# TODO: make this code more elegant
# TODO: write up algorithm. See if we can refactor it
# TODO: revise algorithm to handle reads in the middle of insertions properly
# TODO: Add appropriate tests for longer insertions POS and CIGAR
def roll_cigars(variant_waypoints, reads):
"""Use beacons to generate POS and CIGAR strings for reads
:param variant_waypoints: recarray, as returned by expand_sequence (pos_ref, pos_alt, delta)
:param reads: numpy recarray with fields 'start_a' and 'read_len'
:return: pos, cigars
- list of POS values
- list of CIGAR strings same length as reads array
"""
v_r, v_a, dl = variant_waypoints['ref_pos'], variant_waypoints['alt_pos'], variant_waypoints['delta']
rd_st, rd_len = reads['start_a'], reads['read_len']
waypoint_right = np.searchsorted(v_a, rd_st)
cigars = []
pos = []
for rd_no in range(reads.shape[0]):
r_start = rd_st[rd_no]
r_stop = rd_st[rd_no] + rd_len[rd_no] - 1
n = waypoint_right[rd_no]
m = min(v_a[n], r_stop + 1) - r_start
cigar = str(m) + '=' if m > 0 else ''
this_pos = v_r[n - 1] + r_start - v_a[n - 1] # In our system the previous waypoint has the delta between ref and alt
if dl[n - 1] > 0: # The previous variant was an insertion, possibility for soft-clipping
this_pos = v_r[n - 1] + max(r_start - v_a[n - 1] - dl[n - 1], 0) # POS is nearest match base (and INS has been shifted one base to the right for waypoint)
sc = v_a[n - 1] + dl[n - 1] - r_start
if sc > 0: # Yes, a soft-clip
cigar = str(sc) + 'S' + (str(m - sc) + '=' if m - sc > 0 else '')
if r_start == v_a[n] and dl[n] < 0: # Corner case: we are starting at a deletion
this_pos = v_r[n] + r_start - v_a[n]
pos += [this_pos] # POS is 0 indexed as per BAM spec
while r_stop >= v_a[n]:
if dl[n] == 0: # SNP
m = min(v_a[n+1], r_stop + 1) - v_a[n] - 1
cigar += '1X' + (str(m) + '=' if m > 0 else '')
elif dl[n] > 0: # INS
if r_start == v_a[n]: # Corner case: we are starting right at an insertion
if v_a[n] + dl[n] - 1 >= r_stop: # Completely inside insertion
cigar = str(rd_len[rd_no]) + 'S'
else: # Soft-clipped, then with Ms
m = min(v_a[n + 1], r_stop + 1) - r_start
sc = min(dl[n], r_stop + 1 - r_start)
cigar = str(sc) + 'S' + str(m - sc) + '='
elif v_a[n] + dl[n] - 1 < r_stop: # Insert has anchor on other side
m = min(v_a[n + 1], r_stop + 1) - v_a[n] - dl[n]
cigar += str(dl[n]) + 'I' + (str(m) + '=' if m > 0 else '')
else: # Handle soft-clip at end
cigar += str(r_stop - v_a[n] + 1) + 'S'
else: # DEL
m = min(v_a[n + 1], r_stop + 1) - v_a[n]
if r_start != v_a[n]:
cigar += str(-dl[n]) + 'D' + str(m) + '='
else:
cigar += str(m) + '=' # Corner case: if we start right at a deletion
n += 1
cigars += [cigar]
return pos, cigars
cig_re = re.compile(r'(\d+?)M(\d+?)M')
def old_style_cigar(cigar):
"""Given an extended cigar ('X's for mismatch and '=' for match) convert it into an old style cigar with 'M's and no
'X's
:param cigar:
:return: old style cigar
"""
cigar = cigar.replace('=', 'M') # First replace all '='s with 'M's
if 'X' in cigar: # More complicated - need to merge Xs and =s into Ms as needed
cigar = cigar.replace('X', 'M')
# Now we need to collapse the 'M's as needed
cigar, n = cig_re.subn(lambda m: str(int(m.group(1)) + int(m.group(2))) + 'M', cigar)
while n:
cigar, n = cig_re.subn(lambda m: str(int(m.group(1)) + int(m.group(2))) + 'M', cigar)
return cigar
| 45.395973
| 161
| 0.610585
|
d520eabe3c1182f87ebdc5e2b6b4c9ec355d5061
| 6,806
|
py
|
Python
|
frbhostdm.py
|
NihanPol/DM_IGM
|
7b44aa0281030081b4340e5146e8c31503692846
|
[
"BSD-3-Clause"
] | 1
|
2019-03-21T05:54:14.000Z
|
2019-03-21T05:54:14.000Z
|
frbhostdm.py
|
NihanPol/DM_IGM
|
7b44aa0281030081b4340e5146e8c31503692846
|
[
"BSD-3-Clause"
] | null | null | null |
frbhostdm.py
|
NihanPol/DM_IGM
|
7b44aa0281030081b4340e5146e8c31503692846
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import numpy as np
import glob
import pdf
PDF = pdf.PDF
import scipy.interpolate as interpolate
import NE2001
import argparse
import sys
import pyymw16
# ==================================================
# From a pdf, get the mode and innter 95% confidence intervals
# ==================================================
def getbounds(p):
p.resample(1000)# enforce linear bins for proper cdf-ing
mode,b,c,d = p.likelihood_evaluator(median=False,pm=False)
xm,xp = p.likelihood_evaluator(values=[0.025,0.975])
return mode,mode-xm,xp-mode
# ==================================================
# Other utility functions
# ==================================================
def gaussian(x,b,c):
a = 1.0/np.sqrt(2*np.pi*c**2) #normalized, though pdf will renormalize anyway
return a*np.exp(-0.5*(x-b)**2 / c**2)
def normalize_area(array,x=None,full=False):
if x is None:
x=np.arange(len(array))
area=np.trapz(array,x=x)
if full:
return array/area,area
return array/area
# ==================================================
# Primary function
# ==================================================
def calchostDM(z,dm,dmerr,mwarg,weighted=True,evaluate=True,NEDIR="NE2001/bin.NE2001", ymw = False):
"""
Calculates the PDF for the host DM of the FRB
z : Redshift
dm : DM value [pc cm^-3]
dmerr : DM error [pc cm^-3]
mwarg : Either:
: tuple of (Galactic longitude, Galactic latitude) in [deg], or
: Milky Way DM [pc cm^-3]
weighted : use matter weighted distribution if true
evaluate : if true, returns the DM value with minus and plus errors
ymw : Set flag to true to use YMW16 model instead of NE2001
-----
Returns
* A PDF instance of the redshift if evaluate = False
* The host DM value, the minus uncertainity, the plus uncertainty
"""
# First copy "z" into "redshift" variable to avoid clashes below
redshift = z
if weighted:
filenames = sorted(glob.glob("slices_weighted/*npz"))
else:
filenames = sorted(glob.glob("slices_unweighted/*npz"))
'''
--------------------------------------------------
Read in integrated redshift slices
--------------------------------------------------
'''
zs = np.zeros(len(filenames))
pdfs = []
for i,filename in enumerate(filenames):
npz = np.load(filename)
z = filename.split("/")[-1][2:-4]
zs[i] = float(z)
if weighted:
bins = 10**npz['arr_0'] #lengths the same
else:
bins = npz['arr_0'] #length 201
bins = (bins[1:] + bins[:-1])/2.0
counts = npz['arr_1']
p = interpolate.interp1d(bins,counts,fill_value=0,kind='linear',bounds_error=False)
pdfs.append(p)
'''
--------------------------------------------------
Make 2D DM,z histogram and relevant internal functions, then grab the 1D PDF
--------------------------------------------------
'''
dms = np.arange(0,15000,1.0)
histogram2d = np.zeros((len(zs),len(dms))) #f(DM|z)
for i,z in enumerate(zs):
p = pdfs[i]
histogram2d[i] = p(dms)
histogram2d /= np.max(histogram2d)
spline = interpolate.RectBivariateSpline(zs,dms,histogram2d) # use RectBivariateSpline instead of interp2d for speed over grid
def get_fDMz(z):
retval = spline(z,dms)[0]
return normalize_area(retval)
igm_pdf = PDF(dms,get_fDMz(redshift),truncate=False)
'''
--------------------------------------------------
Determine Milky Way contribution to the DM
--------------------------------------------------
'''
if isinstance(mwarg,(tuple,list)):
gl, gb = mwarg
if ymw:
mwdm = pyymw16.dist_to_dm(gl, gb, 50e3)[0].value
else:
mwdm = NE2001.NE2001(gl,gb,50.0,ndir=-1,DIR=NEDIR)['output']['DM']
else:
mwdm = mwarg
xs = np.arange(0,200,0.01)
mw_pdf = PDF(xs,gaussian(xs,mwdm,0.2*mwdm)) #20% error
'''
--------------------------------------------------
Determine CGM contribution to the DM
--------------------------------------------------
'''
xs = np.arange(0,200,0.01) #same as above
cgm_pdf = PDF(xs,gaussian(xs,65.0,15.0)) #50-80 pc cm^-3 range
'''
--------------------------------------------------
Determine observed DM PDF
--------------------------------------------------
'''
xs = np.arange(dm-6*dmerr,dm+6*dmerr,0.01)
obs_pdf = PDF(xs,gaussian(xs,dm,dmerr))
host_pdf = obs_pdf - mw_pdf - cgm_pdf - igm_pdf
# Clean up the PDF
x = host_pdf.x
inds = np.where(x>=0)
host_pdf.x = host_pdf.x[inds]
host_pdf.y = host_pdf.y[inds]
host_pdf.run()
if evaluate:
y,ym,yp = getbounds(host_pdf)
return y,ym,yp
else:
return host_pdf
if __name__ == "__main__":
# FRB 121102 test, note that this is not the same asymmetric distribution as used in the paper
#print(calchostDM(0.192,557.0,5.0,(174.95,-0.225138),weighted=True,NEDIR='/home/michael/Research/Programs/NE2001/bin.NE2001/'))
parser = argparse.ArgumentParser(description="FRB Host DM Estimator")
parser.add_argument('--NE2001',dest='NEDIR',default='NE2001/bin.NE2001/',help="Path pointing to the NE2001 bin.NE2001/ directory location")
parser.add_argument('--unweighted',dest="unweighted",action="store_true",default=False,help="Use uniform weighted distribution (versus matter weighted distribution")
#Additional flag for YMW16 model
parser.add_argument('--ymw', dest='ymw',action='store_true',default=False,help="Use YMW model instead of NE2001")
parser.add_argument('--mwdm',type=float,default=None,help="Milky Way DM [pc cm^-3]")
parser.add_argument('z', action="store",type=float,help="Redshift")
parser.add_argument('dm', action="store",type=float,help="Observed DM [pc cm^-3]")
parser.add_argument('dmerr', action="store",type=float,help="Error on observed DM [pc cm^-3]")
parser.add_argument('galcoord',action="store",type=float,nargs=argparse.REMAINDER,help="If --mwdm is not provided, two values separated by a space: Galactic latitude and Galactic longitude [deg]")
results = parser.parse_args()
weighted = not results.unweighted
ymw = results.ymw
if results.mwdm is not None: # Do not use NE2001
print("DM=%0.3f-%0.3f+%0.3f pc cm^-3"%(calchostDM(results.z,results.dm,results.dmerr,results.mwdm,weighted=weighted,NEDIR=results.NEDIR, ymw = ymw)))
else:
gb, gl = results.galcoord
print("DM=%0.3f-%0.3f+%0.3f pc cm^-3"%(calchostDM(results.z,results.dm,results.dmerr,(gb,gl),weighted=weighted,NEDIR=results.NEDIR, ymw = ymw)))
| 35.821053
| 200
| 0.563767
|
8c512511b3c247d66d35a4641789d2cd07ba4a00
| 83,892
|
py
|
Python
|
tests/block_tools.py
|
MinerGreggy/taco-blockchain
|
4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d
|
[
"Apache-2.0"
] | 18
|
2021-07-14T09:56:37.000Z
|
2022-02-09T04:32:58.000Z
|
tests/block_tools.py
|
MinerGreggy/taco-blockchain
|
4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d
|
[
"Apache-2.0"
] | 9
|
2021-07-14T15:48:28.000Z
|
2021-10-10T02:32:59.000Z
|
tests/block_tools.py
|
MinerGreggy/taco-blockchain
|
4f8e9c9d7df2181c81b247e35bdb5ad4ff99b19d
|
[
"Apache-2.0"
] | 10
|
2021-07-18T03:22:43.000Z
|
2022-03-15T08:40:06.000Z
|
import asyncio
import copy
import logging
import os
import random
import shutil
import ssl
import sys
import tempfile
import time
from argparse import Namespace
from dataclasses import replace
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Any
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from chiabip158 import PyBIP158
from taco.cmds.init_funcs import create_all_ssl, create_default_taco_config
from taco.daemon.keychain_proxy import connect_to_keychain_and_validate, wrap_local_keychain
from taco.full_node.bundle_tools import (
best_solution_generator_from_template,
detect_potential_template_generator,
simple_solution_generator,
)
from taco.util.errors import Err
from taco.full_node.generator import setup_generator_args
from taco.full_node.mempool_check_conditions import GENERATOR_MOD
from taco.plotting.create_plots import create_plots, PlotKeys
from taco.consensus.block_creation import unfinished_block_to_full_block
from taco.consensus.block_record import BlockRecord
from taco.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from taco.consensus.blockchain_interface import BlockchainInterface
from taco.consensus.coinbase import create_puzzlehash_for_pk, create_farmer_coin, create_pool_coin
from taco.consensus.condition_costs import ConditionCost
from taco.consensus.constants import ConsensusConstants
from taco.consensus.default_constants import DEFAULT_CONSTANTS
from taco.consensus.deficit import calculate_deficit
from taco.consensus.full_block_to_block_record import block_to_block_record
from taco.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from taco.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from taco.consensus.vdf_info_computation import get_signage_point_vdf_info
from taco.full_node.signage_point import SignagePoint
from taco.plotting.util import PlotsRefreshParameter, PlotRefreshResult, PlotRefreshEvents, parse_plot_info
from taco.plotting.manager import PlotManager
from taco.server.server import ssl_context_for_server
from taco.types.blockchain_format.classgroup import ClassgroupElement
from taco.types.blockchain_format.coin import Coin, hash_coin_list
from taco.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from taco.types.blockchain_format.pool_target import PoolTarget
from taco.types.blockchain_format.program import INFINITE_COST
from taco.types.blockchain_format.proof_of_space import ProofOfSpace
from taco.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from taco.types.blockchain_format.sized_bytes import bytes32
from taco.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from taco.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from taco.types.blockchain_format.vdf import VDFInfo, VDFProof
from taco.types.end_of_slot_bundle import EndOfSubSlotBundle
from taco.types.full_block import FullBlock
from taco.types.generator_types import BlockGenerator, CompressorArg
from taco.types.spend_bundle import SpendBundle
from taco.types.unfinished_block import UnfinishedBlock
from taco.util.bech32m import encode_puzzle_hash
from taco.util.block_cache import BlockCache
from taco.util.condition_tools import ConditionOpcode
from taco.util.config import load_config, save_config
from taco.util.hash import std_hash
from taco.util.ints import uint8, uint16, uint32, uint64, uint128
from taco.util.keychain import Keychain, bytes_to_mnemonic
from taco.util.merkle_set import MerkleSet
from taco.util.prev_transaction_block import get_prev_transaction_block
from taco.util.path import mkdir
from taco.util.vdf_prover import get_vdf_info_and_proof
from tests.time_out_assert import time_out_assert
from tests.wallet_tools import WalletTool
from taco.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_local_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
)
test_constants = DEFAULT_CONSTANTS.replace(
**{
"MIN_PLOT_SIZE": 18,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 12,
"DIFFICULTY_STARTING": 2 ** 12,
"DISCRIMINANT_SIZE_BITS": 16,
"SUB_EPOCH_BLOCKS": 170,
"WEIGHT_PROOF_THRESHOLD": 2,
"WEIGHT_PROOF_RECENT_BLOCKS": 380,
"DIFFICULTY_CONSTANT_FACTOR": 33554432,
"NUM_SPS_SUB_SLOT": 16, # Must be a power of 2
"MAX_SUB_SLOT_BLOCKS": 50,
"EPOCH_BLOCKS": 340,
"BLOCKS_CACHE_SIZE": 340 + 3 * 50, # Coordinate with the above values
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"SUB_SLOT_ITERS_STARTING": 2 ** 10, # Must be a multiple of 64
"NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"MAX_FUTURE_TIME": 3600
* 24
* 10, # Allows creating blockchains with timestamps up to 10 days in the future, for testing
"COST_PER_BYTE": 1337,
"MEMPOOL_BLOCK_BUFFER": 6,
"NETWORK_TYPE": 1,
}
)
log = logging.getLogger(__name__)
class BlockTools:
"""
Tools to generate blocks for testing.
"""
def __init__(
self,
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
):
self._tempdir = None
if root_path is None:
self._tempdir = tempfile.TemporaryDirectory()
root_path = Path(self._tempdir.name)
self.root_path = root_path
self.local_keychain = keychain
create_default_taco_config(root_path)
create_all_ssl(root_path)
self.local_sk_cache: Dict[bytes32, Tuple[PrivateKey, Any]] = {}
self._config = load_config(self.root_path, "config.yaml")
self._config["logging"]["log_stdout"] = True
self._config["selected_network"] = "testnet0"
for service in ["harvester", "farmer", "full_node", "wallet", "introducer", "timelord", "pool"]:
self._config[service]["selected_network"] = "testnet0"
save_config(self.root_path, "config.yaml", self._config)
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = constants.replace_str_to_bytes(**overrides)
if const_dict is not None:
updated_constants = updated_constants.replace(**const_dict)
self.constants = updated_constants
self.refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(batch_size=2)
self.plot_dir: Path = get_plot_dir()
self.temp_dir: Path = get_plot_tmp_dir()
mkdir(self.plot_dir)
mkdir(self.temp_dir)
self.expected_plots: Dict[bytes32, Path] = {}
self.total_result = PlotRefreshResult()
def test_callback(event: PlotRefreshEvents, update_result: PlotRefreshResult):
assert update_result.duration < 5
if event == PlotRefreshEvents.started:
self.total_result = PlotRefreshResult()
if event == PlotRefreshEvents.batch_processed:
self.total_result.loaded += update_result.loaded
self.total_result.processed += update_result.processed
self.total_result.duration += update_result.duration
assert update_result.remaining == len(self.expected_plots) - self.total_result.processed
assert update_result.loaded <= self.refresh_parameter.batch_size
if event == PlotRefreshEvents.done:
assert self.total_result.loaded == update_result.loaded
assert self.total_result.processed == update_result.processed
assert self.total_result.duration == update_result.duration
assert update_result.remaining == 0
assert len(self.plot_manager.plots) == len(self.expected_plots)
self.plot_manager: PlotManager = PlotManager(
self.root_path, refresh_parameter=self.refresh_parameter, refresh_callback=test_callback
)
async def setup_keys(self):
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(
self.root_path, log, user="testing-1.8.0", service="taco-testing-1.8.0"
)
await self.keychain_proxy.delete_all_keys()
self.farmer_master_sk_entropy = std_hash(b"block_tools farmer key")
self.pool_master_sk_entropy = std_hash(b"block_tools pool key")
self.farmer_master_sk = await self.keychain_proxy.add_private_key(
bytes_to_mnemonic(self.farmer_master_sk_entropy), ""
)
self.pool_master_sk = await self.keychain_proxy.add_private_key(
bytes_to_mnemonic(self.pool_master_sk_entropy), ""
)
self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1()
self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()
self.farmer_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()
)
self.pool_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()
)
self.all_sks: List[PrivateKey] = [sk for sk, _ in await self.keychain_proxy.get_all_private_keys()]
self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks]
self.farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks]
if len(self.pool_pubkeys) == 0 or len(self.farmer_pubkeys) == 0:
raise RuntimeError("Keys not generated. Run `taco generate keys`")
self.plot_manager.set_public_keys(self.farmer_pubkeys, self.pool_pubkeys)
def change_config(self, new_config: Dict):
self._config = new_config
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = self.constants.replace_str_to_bytes(**overrides)
self.constants = updated_constants
save_config(self.root_path, "config.yaml", self._config)
async def setup_plots(self):
assert len(self.expected_plots) == 0
# OG Plots
for i in range(15):
await self.new_plot()
# Pool Plots
for i in range(5):
await self.new_plot(self.pool_ph)
await self.refresh_plots()
async def new_plot(
self, pool_contract_puzzle_hash: Optional[bytes32] = None, path: Path = None
) -> Optional[bytes32]:
final_dir = self.plot_dir
if path is not None:
final_dir = path
mkdir(final_dir)
args = Namespace()
# Can't go much lower than 20, since plots start having no solutions and more buggy
args.size = 22
# Uses many plots for testing, in order to guarantee proofs of space at every height
args.num = 1
args.buffer = 100
args.tmp_dir = self.temp_dir
args.tmp2_dir = final_dir
args.final_dir = final_dir
args.plotid = None
args.memo = None
args.buckets = 0
args.stripe_size = 2000
args.num_threads = 0
args.nobitfield = False
args.exclude_final_dir = False
args.list_duplicates = False
try:
pool_pk: Optional[G1Element] = None
pool_address: Optional[str] = None
if pool_contract_puzzle_hash is None:
pool_pk = self.pool_pk
else:
pool_address = encode_puzzle_hash(pool_contract_puzzle_hash, "xtx")
keys = PlotKeys(self.farmer_pk, pool_pk, pool_address)
# No datetime in the filename, to get deterministic filenames and not re-plot
created, existed = await create_plots(
args,
keys,
self.root_path,
use_datetime=False,
test_private_keys=[AugSchemeMPL.key_gen(std_hash(len(self.expected_plots).to_bytes(2, "big")))],
)
plot_id_new: Optional[bytes32] = None
path_new: Path = Path()
if len(created):
assert len(existed) == 0
plot_id_new, path_new = list(created.items())[0]
if len(existed):
assert len(created) == 0
plot_id_new, path_new = list(existed.items())[0]
self.expected_plots[plot_id_new] = path_new
# create_plots() updates plot_directories. Ensure we refresh our config to reflect the updated value
self._config["harvester"]["plot_directories"] = load_config(self.root_path, "config.yaml", "harvester")[
"plot_directories"
]
return plot_id_new
except KeyboardInterrupt:
shutil.rmtree(self.plot_dir, ignore_errors=True)
sys.exit(1)
async def refresh_plots(self):
self.plot_manager.refresh_parameter.batch_size = (
4 if len(self.expected_plots) % 3 == 0 else 3
) # Make sure we have at least some batches + a remainder
self.plot_manager.trigger_refresh()
assert self.plot_manager.needs_refresh()
self.plot_manager.start_refreshing()
await time_out_assert(10, self.plot_manager.needs_refresh, value=False)
self.plot_manager.stop_refreshing()
assert not self.plot_manager.needs_refresh()
async def delete_plot(self, plot_id: bytes32):
assert plot_id in self.expected_plots
self.expected_plots[plot_id].unlink()
del self.expected_plots[plot_id]
await self.refresh_plots()
@property
def config(self) -> Dict:
return copy.deepcopy(self._config)
def get_daemon_ssl_context(self) -> Optional[ssl.SSLContext]:
crt_path = self.root_path / self.config["daemon_ssl"]["private_crt"]
key_path = self.root_path / self.config["daemon_ssl"]["private_key"]
ca_cert_path = self.root_path / self.config["private_ssl_ca"]["crt"]
ca_key_path = self.root_path / self.config["private_ssl_ca"]["key"]
return ssl_context_for_server(ca_cert_path, ca_key_path, crt_path, key_path)
def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element:
"""
Returns the plot signature of the header data.
"""
farmer_sk = master_sk_to_farmer_sk(self.all_sks[0])
for plot_info in self.plot_manager.plots.values():
if plot_pk == plot_info.plot_public_key:
# Look up local_sk from plot to save locked memory
if plot_info.prover.get_id() in self.local_sk_cache:
local_master_sk, pool_pk_or_ph = self.local_sk_cache[plot_info.prover.get_id()]
else:
pool_pk_or_ph, _, local_master_sk = parse_plot_info(plot_info.prover.get_memo())
self.local_sk_cache[plot_info.prover.get_id()] = (local_master_sk, pool_pk_or_ph)
if isinstance(pool_pk_or_ph, G1Element):
include_taproot = False
else:
assert isinstance(pool_pk_or_ph, bytes32)
include_taproot = True
local_sk = master_sk_to_local_sk(local_master_sk)
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_sk.get_g1(), include_taproot)
assert agg_pk == plot_pk
harv_share = AugSchemeMPL.sign(local_sk, m, agg_pk)
farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_sk.get_g1(), farmer_sk.get_g1())
taproot_share: G2Element = AugSchemeMPL.sign(taproot_sk, m, agg_pk)
else:
taproot_share = G2Element()
return AugSchemeMPL.aggregate([harv_share, farm_share, taproot_share])
raise ValueError(f"Do not have key {plot_pk}")
def get_pool_key_signature(self, pool_target: PoolTarget, pool_pk: Optional[G1Element]) -> Optional[G2Element]:
# Returns the pool signature for the corresponding pk. If no pk is provided, returns None.
if pool_pk is None:
return None
for sk in self.all_sks:
sk_child = master_sk_to_pool_sk(sk)
if sk_child.get_g1() == pool_pk:
return AugSchemeMPL.sign(sk_child, bytes(pool_target))
raise ValueError(f"Do not have key {pool_pk}")
def get_farmer_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.farmer_master_sk)
def get_pool_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.pool_master_sk)
def get_consecutive_blocks(
self,
num_blocks: int,
block_list_input: List[FullBlock] = None,
farmer_reward_puzzle_hash: Optional[bytes32] = None,
pool_reward_puzzle_hash: Optional[bytes32] = None,
transaction_data: Optional[SpendBundle] = None,
seed: bytes = b"",
time_per_block: Optional[float] = None,
force_overflow: bool = False,
skip_slots: int = 0, # Force at least this number of empty slots before the first SB
guarantee_transaction_block: bool = False, # Force that this block must be a tx block
normalized_to_identity_cc_eos: bool = False,
normalized_to_identity_icc_eos: bool = False,
normalized_to_identity_cc_sp: bool = False,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
previous_generator: CompressorArg = None,
genesis_timestamp: Optional[uint64] = None,
force_plot_id: Optional[bytes32] = None,
) -> List[FullBlock]:
assert num_blocks > 0
if block_list_input is not None:
block_list = block_list_input.copy()
else:
block_list = []
constants = self.constants
transaction_data_included = False
if time_per_block is None:
time_per_block = float(constants.SUB_SLOT_TIME_TARGET) / float(constants.SLOT_BLOCKS_TARGET)
if farmer_reward_puzzle_hash is None:
farmer_reward_puzzle_hash = self.farmer_ph
if len(block_list) == 0:
if force_plot_id is not None:
raise ValueError("Cannot specify plot_id for genesis block")
initial_block_list_len = 0
genesis = self.create_genesis_block(
constants,
seed,
force_overflow=force_overflow,
skip_slots=skip_slots,
timestamp=(uint64(int(time.time())) if genesis_timestamp is None else genesis_timestamp),
)
log.info(f"Created block 0 iters: {genesis.total_iters}")
num_empty_slots_added = skip_slots
block_list = [genesis]
num_blocks -= 1
else:
initial_block_list_len = len(block_list)
num_empty_slots_added = uint32(0) # Allows forcing empty slots in the beginning, for testing purposes
if num_blocks == 0:
return block_list
height_to_hash, difficulty, blocks = load_block_list(block_list, constants)
latest_block: BlockRecord = blocks[block_list[-1].header_hash]
curr = latest_block
while not curr.is_transaction_block:
curr = blocks[curr.prev_hash]
start_timestamp = curr.timestamp
start_height = curr.height
curr = latest_block
blocks_added_this_sub_slot = 1
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
blocks_added_this_sub_slot += 1
finished_sub_slots_at_sp: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to signage point
finished_sub_slots_at_ip: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to infusion point
sub_slot_iters: uint64 = latest_block.sub_slot_iters # The number of iterations in one sub-slot
same_slot_as_last = True # Only applies to first slot, to prevent old blocks from being added
sub_slot_start_total_iters: uint128 = latest_block.ip_sub_slot_total_iters(constants)
sub_slots_finished = 0
pending_ses: bool = False
# Start at the last block in block list
# Get the challenge for that slot
while True:
slot_cc_challenge, slot_rc_challenge = get_challenges(
constants,
blocks,
finished_sub_slots_at_sp,
latest_block.header_hash,
)
prev_num_of_blocks = num_blocks
if num_empty_slots_added < skip_slots:
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
num_empty_slots_added += 1
else:
# Loop over every signage point (Except for the last ones, which are used for overflows)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA):
curr = latest_block
while curr.total_iters > sub_slot_start_total_iters + calculate_sp_iters(
constants, sub_slot_iters, uint8(signage_point_index)
):
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
if curr.total_iters > sub_slot_start_total_iters:
finished_sub_slots_at_sp = []
if same_slot_as_last:
if signage_point_index < latest_block.signage_point_index:
# Ignore this signage_point because it's in the past
continue
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache(blocks),
latest_block,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_at_sp,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = slot_cc_challenge
else:
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS or force_overflow:
break
if same_slot_as_last:
if signage_point_index == latest_block.signage_point_index:
# Ignore this block because it's in the past
if required_iters <= latest_block.required_iters:
continue
assert latest_block.header_hash in blocks
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator: Optional[BlockGenerator] = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
else:
if guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
blocks[full_block.header_hash] = block_record
log.info(f"Created block {block_record.height} ove=False, iters " f"{block_record.total_iters}")
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
num_blocks -= 1
if num_blocks == 0:
return block_list
# Finish the end of sub-slot and try again next sub-slot
# End of sub-slot logic
if len(finished_sub_slots_at_ip) == 0:
# Block has been created within this sub-slot
eos_iters: uint64 = uint64(sub_slot_iters - (latest_block.total_iters - sub_slot_start_total_iters))
cc_input: ClassgroupElement = latest_block.challenge_vdf_output
rc_challenge: bytes32 = latest_block.reward_infusion_new_challenge
else:
# No blocks were successfully created within this sub-slot
eos_iters = sub_slot_iters
cc_input = ClassgroupElement.get_default_element()
rc_challenge = slot_rc_challenge
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
cc_input,
slot_cc_challenge,
eos_iters,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
eos_iters,
)
eos_deficit: uint8 = (
latest_block.deficit if latest_block.deficit > 0 else constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
icc_eos_vdf, icc_ip_proof = get_icc(
constants,
uint128(sub_slot_start_total_iters + sub_slot_iters),
finished_sub_slots_at_ip,
latest_block,
blocks,
sub_slot_start_total_iters,
eos_deficit,
)
# End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
# in order for light clients to validate.
cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)
if normalized_to_identity_cc_eos:
_, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_vdf.challenge,
sub_slot_iters,
True,
)
if pending_ses:
sub_epoch_summary: Optional[SubEpochSummary] = None
else:
sub_epoch_summary = next_sub_epoch_summary(
constants,
BlockCache(blocks, height_to_hash),
latest_block.required_iters,
block_list[-1],
False,
)
pending_ses = True
if sub_epoch_summary is not None:
ses_hash = sub_epoch_summary.get_hash()
new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
log.info(f"Sub epoch summary: {sub_epoch_summary}")
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
if icc_eos_vdf is not None:
# Icc vdf (Deficit of latest block is <= 4)
if len(finished_sub_slots_at_ip) == 0:
# This means there are blocks in this sub-slot
curr = latest_block
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
if curr.is_challenge_block(constants):
icc_eos_iters = uint64(sub_slot_start_total_iters + sub_slot_iters - curr.total_iters)
else:
icc_eos_iters = sub_slot_iters
else:
# This means there are no blocks in this sub-slot
icc_eos_iters = sub_slot_iters
icc_eos_vdf = VDFInfo(
icc_eos_vdf.challenge,
icc_eos_iters,
icc_eos_vdf.output,
)
if normalized_to_identity_icc_eos:
_, icc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
icc_eos_vdf.challenge,
icc_eos_iters,
True,
)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_eos_vdf)
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf,
icc_sub_slot_hash,
ses_hash,
new_sub_slot_iters,
new_difficulty,
)
else:
# No icc
icc_sub_slot = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, None, ses_hash, new_sub_slot_iters, new_difficulty)
finished_sub_slots_at_ip.append(
EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
),
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
)
finished_sub_slots_eos = finished_sub_slots_at_ip.copy()
latest_block_eos = latest_block
overflow_cc_challenge = finished_sub_slots_at_ip[-1].challenge_chain.get_hash()
overflow_rc_challenge = finished_sub_slots_at_ip[-1].reward_chain.get_hash()
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
sub_slots_finished += 1
log.info(
f"Sub slot finished. blocks included: {blocks_added_this_sub_slot} blocks_per_slot: "
f"{(len(block_list) - initial_block_list_len)/sub_slots_finished}"
)
blocks_added_this_sub_slot = 0 # Sub slot ended, overflows are in next sub slot
# Handle overflows: No overflows on new epoch
if new_sub_slot_iters is None and num_empty_slots_added >= skip_slots and new_difficulty is None:
for signage_point_index in range(
constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA,
constants.NUM_SPS_SUB_SLOT,
):
# note that we are passing in the finished slots which include the last slot
signage_point = get_signage_point(
constants,
BlockCache(blocks),
latest_block_eos,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_eos,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash = slot_cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS:
break
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
overflow_cc_challenge=overflow_cc_challenge,
overflow_rc_challenge=overflow_rc_challenge,
normalized_to_identity_cc_ip=normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
elif guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
log.info(f"Created block {block_record.height } ov=True, iters " f"{block_record.total_iters}")
num_blocks -= 1
if num_blocks == 0:
return block_list
blocks[full_block.header_hash] = block_record
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
finished_sub_slots_at_sp = finished_sub_slots_eos.copy()
same_slot_as_last = False
sub_slot_start_total_iters = uint128(sub_slot_start_total_iters + sub_slot_iters)
if num_blocks < prev_num_of_blocks:
num_empty_slots_added += 1
if new_sub_slot_iters is not None:
assert new_difficulty is not None
sub_slot_iters = new_sub_slot_iters
difficulty = new_difficulty
def create_genesis_block(
self,
constants: ConsensusConstants,
seed: bytes32 = b"",
timestamp: Optional[uint64] = None,
force_overflow: bool = False,
skip_slots: int = 0,
) -> FullBlock:
if timestamp is None:
timestamp = uint64(int(time.time()))
finished_sub_slots: List[EndOfSubSlotBundle] = []
unfinished_block: Optional[UnfinishedBlock] = None
ip_iters: uint64 = uint64(0)
sub_slot_total_iters: uint128 = uint128(0)
# Keep trying until we get a good proof of space that also passes sp filter
while True:
cc_challenge, rc_challenge = get_challenges(constants, {}, finished_sub_slots, None)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT):
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache({}, {}),
None,
sub_slot_total_iters,
uint8(signage_point_index),
finished_sub_slots,
constants.SUB_SLOT_ITERS_STARTING,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
cc_challenge,
cc_sp_output_hash,
seed,
constants.DIFFICULTY_STARTING,
constants.SUB_SLOT_ITERS_STARTING,
)
# Try each of the proofs of space
for required_iters, proof_of_space in qualified_proofs:
sp_iters: uint64 = calculate_sp_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
)
ip_iters = calculate_ip_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
required_iters,
)
is_overflow = is_overflow_block(constants, uint8(signage_point_index))
if force_overflow and not is_overflow:
continue
if len(finished_sub_slots) < skip_slots:
continue
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_total_iters,
constants.SUB_SLOT_ITERS_STARTING,
uint8(signage_point_index),
sp_iters,
ip_iters,
proof_of_space,
cc_challenge,
constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH,
PoolTarget(constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)),
self.get_plot_signature,
self.get_pool_key_signature,
signage_point,
timestamp,
BlockCache({}),
seed=seed,
finished_sub_slots_input=finished_sub_slots,
)
assert unfinished_block is not None
if not is_overflow:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
ip_iters,
)
assert unfinished_block is not None
total_iters_sp = uint128(sub_slot_total_iters + sp_iters)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
if signage_point_index == constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA - 1:
# Finish the end of sub-slot and try again next sub-slot
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
cc_slot = ChallengeChainSubSlot(cc_vdf, None, None, None, None)
finished_sub_slots.append(
EndOfSubSlotBundle(
cc_slot,
None,
RewardChainSubSlot(
rc_vdf,
cc_slot.get_hash(),
None,
uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK),
),
SubSlotProofs(cc_proof, None, rc_proof),
)
)
if unfinished_block is not None:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].challenge_chain.get_hash(),
ip_iters,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].reward_chain.get_hash(),
ip_iters,
)
total_iters_sp = uint128(
sub_slot_total_iters
+ calculate_sp_iters(
self.constants,
self.constants.SUB_SLOT_ITERS_STARTING,
unfinished_block.reward_chain_block.signage_point_index,
)
)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
sub_slot_total_iters = uint128(sub_slot_total_iters + constants.SUB_SLOT_ITERS_STARTING)
def get_pospaces_for_challenge(
self,
constants: ConsensusConstants,
challenge_hash: bytes32,
signage_point: bytes32,
seed: bytes,
difficulty: uint64,
sub_slot_iters: uint64,
force_plot_id: Optional[bytes32] = None,
) -> List[Tuple[uint64, ProofOfSpace]]:
found_proofs: List[Tuple[uint64, ProofOfSpace]] = []
random.seed(seed)
for plot_info in self.plot_manager.plots.values():
plot_id: bytes32 = plot_info.prover.get_id()
if force_plot_id is not None and plot_id != force_plot_id:
continue
if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point):
new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point)
qualities = plot_info.prover.get_qualities_for_challenge(new_challenge)
for proof_index, quality_str in enumerate(qualities):
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
plot_info.prover.get_size(),
difficulty,
signage_point,
)
if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters):
proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index)
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
include_taproot = False
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
include_taproot = True
plot_pk = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, include_taproot
)
proof_of_space: ProofOfSpace = ProofOfSpace(
new_challenge,
plot_info.pool_public_key,
plot_info.pool_contract_puzzle_hash,
plot_pk,
plot_info.prover.get_size(),
proof_xs,
)
found_proofs.append((required_iters, proof_of_space))
random_sample = found_proofs
if len(found_proofs) >= 1:
if random.random() < 0.1:
# Removes some proofs of space to create "random" chains, based on the seed
random_sample = random.sample(found_proofs, len(found_proofs) - 1)
return random_sample
def get_signage_point(
constants: ConsensusConstants,
blocks: BlockchainInterface,
latest_block: Optional[BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_iters: uint64,
normalized_to_identity_cc_sp: bool = False,
) -> SignagePoint:
if signage_point_index == 0:
return SignagePoint(None, None, None, None)
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
overflow = is_overflow_block(constants, signage_point_index)
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
finished_sub_slots,
overflow,
latest_block,
blocks,
sp_total_iters,
sp_iters,
)
cc_sp_vdf, cc_sp_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
cc_vdf_iters,
)
rc_sp_vdf, rc_sp_proof = get_vdf_info_and_proof(
constants,
rc_vdf_input,
rc_vdf_challenge,
rc_vdf_iters,
)
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
if normalized_to_identity_cc_sp:
_, cc_sp_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_sp_vdf.challenge,
sp_iters,
True,
)
return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof)
def finish_block(
constants: ConsensusConstants,
blocks: Dict[bytes32, BlockRecord],
height_to_hash: Dict[uint32, bytes32],
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
unfinished_block: UnfinishedBlock,
required_iters: uint64,
ip_iters: uint64,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
latest_block: BlockRecord,
sub_slot_iters: uint64,
difficulty: uint64,
normalized_to_identity_cc_ip: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
is_overflow = is_overflow_block(constants, signage_point_index)
cc_vdf_challenge = slot_cc_challenge
if len(finished_sub_slots) == 0:
new_ip_iters = unfinished_block.total_iters - latest_block.total_iters
cc_vdf_input = latest_block.challenge_vdf_output
rc_vdf_challenge = latest_block.reward_infusion_new_challenge
else:
new_ip_iters = ip_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = slot_rc_challenge
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
new_ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
if normalized_to_identity_cc_ip:
_, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_ip_vdf.challenge,
ip_iters,
True,
)
deficit = calculate_deficit(
constants,
uint32(latest_block.height + 1),
latest_block,
is_overflow,
len(finished_sub_slots),
)
icc_ip_vdf, icc_ip_proof = get_icc(
constants,
unfinished_block.total_iters,
finished_sub_slots,
latest_block,
blocks,
uint128(sub_slot_start_total_iters + sub_slot_iters) if is_overflow else sub_slot_start_total_iters,
deficit,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_vdf_challenge,
new_ip_iters,
)
assert unfinished_block is not None
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
full_block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
icc_ip_vdf,
icc_ip_proof,
finished_sub_slots,
latest_block,
BlockCache(blocks),
sp_total_iters,
difficulty,
)
block_record = block_to_block_record(constants, BlockCache(blocks), required_iters, full_block, None)
return full_block, block_record
def get_challenges(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_header_hash: Optional[bytes32],
) -> Tuple[bytes32, bytes32]:
if len(finished_sub_slots) == 0:
if prev_header_hash is None:
return constants.GENESIS_CHALLENGE, constants.GENESIS_CHALLENGE
curr: BlockRecord = blocks[prev_header_hash]
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
cc_challenge = curr.finished_challenge_slot_hashes[-1]
rc_challenge = curr.finished_reward_slot_hashes[-1]
else:
cc_challenge = finished_sub_slots[-1].challenge_chain.get_hash()
rc_challenge = finished_sub_slots[-1].reward_chain.get_hash()
return cc_challenge, rc_challenge
def get_plot_dir() -> Path:
cache_path = Path(os.path.expanduser(os.getenv("TACO_ROOT", "~/.taco/"))) / "test-plots"
mkdir(cache_path)
return cache_path
def get_plot_tmp_dir():
return get_plot_dir() / "tmp"
def load_block_list(
block_list: List[FullBlock], constants: ConsensusConstants
) -> Tuple[Dict[uint32, bytes32], uint64, Dict[uint32, BlockRecord]]:
difficulty = 0
height_to_hash: Dict[uint32, bytes32] = {}
blocks: Dict[uint32, BlockRecord] = {}
for full_block in block_list:
if full_block.height == 0:
difficulty = uint64(constants.DIFFICULTY_STARTING)
else:
difficulty = full_block.weight - block_list[full_block.height - 1].weight
if full_block.reward_chain_block.signage_point_index == 0:
challenge = full_block.reward_chain_block.pos_ss_cc_challenge_hash
sp_hash = challenge
else:
assert full_block.reward_chain_block.challenge_chain_sp_vdf is not None
challenge = full_block.reward_chain_block.challenge_chain_sp_vdf.challenge
sp_hash = full_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_str = full_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, sp_hash
)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
full_block.reward_chain_block.proof_of_space.size,
uint64(difficulty),
sp_hash,
)
blocks[full_block.header_hash] = block_to_block_record(
constants,
BlockCache(blocks),
required_iters,
full_block,
None,
)
height_to_hash[uint32(full_block.height)] = full_block.header_hash
return height_to_hash, uint64(difficulty), blocks
def get_icc(
constants: ConsensusConstants,
vdf_end_total_iters: uint128,
finished_sub_slots: List[EndOfSubSlotBundle],
latest_block: BlockRecord,
blocks: Dict[bytes32, BlockRecord],
sub_slot_start_total_iters: uint128,
deficit: uint8,
) -> Tuple[Optional[VDFInfo], Optional[VDFProof]]:
if len(finished_sub_slots) == 0:
prev_deficit = latest_block.deficit
else:
prev_deficit = finished_sub_slots[-1].reward_chain.deficit
if deficit == prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# new slot / overflow sb to new slot / overflow sb
return None, None
if deficit == (prev_deficit - 1) == (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1):
# new slot / overflow sb to challenge sb
return None, None
if len(finished_sub_slots) != 0:
last_ss = finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
assert finished_sub_slots[-1].reward_chain.deficit <= (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
return get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
last_ss.infused_challenge_chain.get_hash(),
uint64(vdf_end_total_iters - sub_slot_start_total_iters),
)
curr = latest_block # curr deficit is 0, 1, 2, 3, or 4
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
icc_iters = uint64(vdf_end_total_iters - latest_block.total_iters)
if latest_block.is_challenge_block(constants):
icc_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
icc_input = latest_block.infused_challenge_vdf_output
assert icc_input is not None
if curr.is_challenge_block(constants): # Deficit 4
icc_challenge_hash = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
# First block in sub slot has deficit 0,1,2 or 3
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
return get_vdf_info_and_proof(
constants,
icc_input,
icc_challenge_hash,
icc_iters,
)
def get_full_block_and_block_record(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
start_timestamp: uint64,
start_height: uint32,
time_per_block: float,
block_generator: Optional[BlockGenerator],
aggregate_signature: G2Element,
additions: Optional[List[Coin]],
removals: Optional[List[Coin]],
height_to_hash: Dict[uint32, bytes32],
difficulty: uint64,
required_iters: uint64,
sub_slot_iters: uint64,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
finished_sub_slots: List[EndOfSubSlotBundle],
signage_point: SignagePoint,
prev_block: BlockRecord,
seed: bytes = b"",
overflow_cc_challenge: bytes32 = None,
overflow_rc_challenge: bytes32 = None,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
if current_time is True:
if prev_block.timestamp is not None:
timestamp = uint64(max(int(time.time()), prev_block.timestamp + int(time_per_block)))
else:
timestamp = uint64(int(time.time()))
else:
timestamp = uint64(start_timestamp + int((prev_block.height + 1 - start_height) * time_per_block))
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_start_total_iters,
sub_slot_iters,
signage_point_index,
sp_iters,
ip_iters,
proof_of_space,
slot_cc_challenge,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
signage_point,
timestamp,
BlockCache(blocks),
seed,
block_generator,
aggregate_signature,
additions,
removals,
prev_block,
finished_sub_slots,
)
if (overflow_cc_challenge is not None) and (overflow_rc_challenge is not None):
slot_cc_challenge = overflow_cc_challenge
slot_rc_challenge = overflow_rc_challenge
full_block, block_record = finish_block(
constants,
blocks,
height_to_hash,
finished_sub_slots,
sub_slot_start_total_iters,
signage_point_index,
unfinished_block,
required_iters,
ip_iters,
slot_cc_challenge,
slot_rc_challenge,
prev_block,
sub_slot_iters,
difficulty,
normalized_to_identity_cc_ip,
)
return full_block, block_record
def compute_cost_test(generator: BlockGenerator, cost_per_byte: int) -> Tuple[Optional[uint16], uint64]:
try:
block_program, block_program_args = setup_generator_args(generator)
clvm_cost, result = GENERATOR_MOD.run_safe_with_cost(INFINITE_COST, block_program, block_program_args)
size_cost = len(bytes(generator.program)) * cost_per_byte
condition_cost = 0
for res in result.first().as_iter():
res = res.rest() # skip parent coind id
res = res.rest() # skip puzzle hash
res = res.rest() # skip amount
for cond in res.first().as_iter():
condition = cond.first().as_atom()
if condition in [ConditionOpcode.AGG_SIG_UNSAFE, ConditionOpcode.AGG_SIG_ME]:
condition_cost += ConditionCost.AGG_SIG.value
elif condition == ConditionOpcode.CREATE_COIN:
condition_cost += ConditionCost.CREATE_COIN.value
return None, uint64(clvm_cost + size_cost + condition_cost)
except Exception:
return uint16(Err.GENERATOR_RUNTIME_ERROR.value), uint64(0)
def create_test_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(0, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
err, cost = compute_cost_test(block_generator, constants.COST_PER_BYTE)
assert err is None
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
# in order to allow creating blocks that mint coins, clamp the fee
# to 0, if it ends up being negative
if spend_bundle_fees < 0:
spend_bundle_fees = 0
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
tx_removals.append(coin.name())
byte_array_tx.append(bytearray(coin.name()))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_test_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: Optional[bytes32] = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_test_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
async def create_block_tools_async(
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
) -> BlockTools:
bt = BlockTools(constants, root_path, const_dict, keychain)
await bt.setup_keys()
await bt.setup_plots()
return bt
def create_block_tools(
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
) -> BlockTools:
bt = BlockTools(constants, root_path, const_dict, keychain)
asyncio.get_event_loop().run_until_complete(bt.setup_keys())
asyncio.get_event_loop().run_until_complete(bt.setup_plots())
return bt
| 43.243299
| 120
| 0.606792
|
6ef34b184eee84b9f9632a02de602c87020d103c
| 2,764
|
py
|
Python
|
index.py
|
liujiage/DevOps
|
87e7343f1a574bb05fe7915ba05d8757e6913779
|
[
"Apache-2.0"
] | 3
|
2022-01-11T14:03:19.000Z
|
2022-01-11T14:25:24.000Z
|
index.py
|
liujiage/DevOps
|
87e7343f1a574bb05fe7915ba05d8757e6913779
|
[
"Apache-2.0"
] | null | null | null |
index.py
|
liujiage/DevOps
|
87e7343f1a574bb05fe7915ba05d8757e6913779
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, request, session
from flask_socketio import SocketIO, emit
import logging
from view.DeployView import DeployView as dv
from view.HistoryView import HistoryView as hv
from view.AuthenticationView import AuthenticationView as auth
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, logger=True, engineio_logger=True)
'''
@Function login system
@Author Jiage
@Date 2021-09-03
'''
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get('userid'):
session.pop('userid')
return render_template('login.html')
'''
@Function the first page
@Author Jiage
@Date 2021-08-07
'''
@app.route("/")
def index():
return render_template('index.html', user = session['userid'])
'''
@Function deploy first page
@Author Jiage
@Date 2021-08-07
'''
@app.route("/deploy")
def deploy():
return render_template('deploy.html', services=dv.loadView())
'''
@Function deploy service selected
@Author Jiage
@Date 2021-08-07
'''
@app.route("/deploy/service/<service>", methods=['GET','POST'])
def deployService(service=None):
# app.logger.info('===========deploy/service')
dv.deployService(service)
return "got it, deploy services info is " + service
'''
@Function display deploys history detail by id
@Author Jiage
@Date 2021-09-10
'''
@app.route("/history/detail/<id>", methods=['GET','POST'])
def historyService(id):
return hv.loadDetail(id)
'''
@Function trace the log real-time by id
@Author Jiage
@Date 2021-09-13
'''
@app.route("/history/web/<id>", methods=['GET','POST'])
def historyWebService(id):
return hv.loadDetail(id)
'''
@Function search history deploys records
@Author Jiage
@Date 2021-09-09
'''
@app.route("/history")
def history():
return render_template('history.html', records=hv.loadView())
'''
@Function watch currently hosts memory usage.
@Author Jiage
@Date 2021-08-23
'''
@app.route("/memory")
def memory():
return render_template('memory.html')
'''
@Function received requests from the client
@Author Jiage
@Date 2021-08-18
@Data
Json {event:{connect|start_deploy},data: {type of string}}
@Param event
connect, the client connect to service at the first time.
start_deploy, the user submit deploy service.
@Param data
connect, string
start_deploy, string
'''
@socketio.on('request')
def socketRequest(msg):
dv.socketRequest(app, msg)
@socketio.on('connect')
def socketConnect(msg):
emit('response', {'event': 'connect', 'data': 'Connected'})
'''
@Function Intercept request, authentication.
@Author Jiage
@Date 2021-09-03
'''
@app.before_request
def authentication():
return auth.auth(app)
if __name__ == "__main__":
socketio.run(app)
| 22.842975
| 66
| 0.708755
|
9df62591ae02e2a1ed36bad46726e1cf8c2c0c3b
| 809
|
py
|
Python
|
chapter0/find-perm.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | 1
|
2021-12-01T13:26:10.000Z
|
2021-12-01T13:26:10.000Z
|
chapter0/find-perm.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | null | null | null |
chapter0/find-perm.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | null | null | null |
"""
Example: Given a smaller string a and a bigger string b, design an algorithm to find all permutations
of the shorter string within the longer one. Print the location of each permutation.
"""
from collections import Counter
sstring = 'abbc'
lstring = "cbabadcbbabbcbabaabccbabc"
ssCounter = Counter(sstring)
for i in range(len((lstring))):
tmpCounter = Counter()
sub_lstring_of_size_sstring = lstring[i:i+len(sstring)]
if len(sub_lstring_of_size_sstring) != len(sstring):
#print("reached the end with", sub_lstring_of_size_sstring)
break
for ll in sub_lstring_of_size_sstring:
if ll in sstring:
tmpCounter[ll] += 1
else:
break
if not ssCounter - tmpCounter:
print(sub_lstring_of_size_sstring, "location", i)
| 27.896552
| 102
| 0.690977
|
9bfcb7d83d5a26eaef421e92fc9ed48c9f8be4fb
| 1,832
|
py
|
Python
|
setup.py
|
fish2000/pilkit
|
cf60f3cbc28a15db46acfda1e61a704efad31c23
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
fish2000/pilkit
|
cf60f3cbc28a15db46acfda1e61a704efad31c23
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
fish2000/pilkit
|
cf60f3cbc28a15db46acfda1e61a704efad31c23
|
[
"BSD-3-Clause"
] | null | null | null |
#/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
# Workaround for multiprocessing/nose issue. See http://bugs.python.org/msg170215
try:
import multiprocessing
except ImportError:
pass
read = lambda filepath: codecs.open(filepath, 'r', 'utf-8').read()
# Load package meta from the pkgmeta module without loading the package.
pkgmeta = {}
pkgmeta_file = os.path.join(os.path.dirname(__file__), 'pilkit', 'pkgmeta.py')
with open(pkgmeta_file) as f:
code = compile(f.read(), 'pkgmeta.py', 'exec')
exec(code, pkgmeta)
setup(
name='pilkit',
version=pkgmeta['__version__'],
description='A collection of utilities and processors for the Python Imaging Library.',
long_description=read(os.path.join(os.path.dirname(__file__), 'README.rst')),
author='Matthew Tretter',
author_email='m@tthewwithanm.com',
license='BSD',
url='http://github.com/matthewwithanm/pilkit/',
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
tests_require=[
'mock>=1.0.1',
'nose>=1.3.6',
'nose-progressive>=1.5.1',
'Pillow',
],
test_suite='nose.collector',
install_requires=[],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities'
],
)
| 32.140351
| 91
| 0.643559
|
55fcb967df1dc0ba5cba0eefec39c83358e12c26
| 383
|
py
|
Python
|
auth-api/migrations/versions/a930f64458f6_.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 11
|
2019-09-26T06:58:25.000Z
|
2022-01-26T06:19:39.000Z
|
auth-api/migrations/versions/a930f64458f6_.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 1,622
|
2019-05-07T21:08:38.000Z
|
2022-03-28T17:07:15.000Z
|
auth-api/migrations/versions/a930f64458f6_.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 98
|
2019-03-01T21:36:15.000Z
|
2021-12-01T22:11:25.000Z
|
"""empty message
Revision ID: a930f64458f6
Revises: 8111f90828de, 3939342073ea
Create Date: 2020-03-09 19:01:44.023061
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a930f64458f6'
down_revision = ('8111f90828de', '3939342073ea')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 15.32
| 48
| 0.741514
|
06e8a6c082ae7bb075b7482bd5044ec7d2d480ec
| 604
|
py
|
Python
|
py/py_0381_(prime-k)_factorial.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0381_(prime-k)_factorial.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0381_(prime-k)_factorial.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
# Solution of;
# Project Euler Problem 381: (prime-k) factorial
# https://projecteuler.net/problem=381
#
# For a prime p let S(p) = (∑ (p-k)!) mod(p) for 1 ≤ k ≤ 5. For example, if
# p=7,(7-1)! + (7-2)! + (7-3)! + (7-4)! + (7-5)! = 6! + 5! + 4! + 3! + 2! =
# 720+120+24+6+2 = 872. As 872 mod(7) = 4, S(7) = 4. It can be verified that ∑
# S(p) = 480 for 5 ≤ p < 100. Find ∑ S(p) for 5 ≤ p < 108.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 381
timed.caller(dummy, n, i, prob_id)
| 26.26087
| 79
| 0.549669
|
9e6e24acb8ed3d5635febb77f316868d1d312970
| 20,373
|
py
|
Python
|
InvenTree/InvenTree/helpers.py
|
rkalman/InvenTree
|
8ceff063f86394cd1be9f5c57e6302b79782a526
|
[
"MIT"
] | null | null | null |
InvenTree/InvenTree/helpers.py
|
rkalman/InvenTree
|
8ceff063f86394cd1be9f5c57e6302b79782a526
|
[
"MIT"
] | null | null | null |
InvenTree/InvenTree/helpers.py
|
rkalman/InvenTree
|
8ceff063f86394cd1be9f5c57e6302b79782a526
|
[
"MIT"
] | null | null | null |
"""
Provides helper functions used throughout the InvenTree project
"""
import io
import re
import json
import os.path
from PIL import Image
from decimal import Decimal, InvalidOperation
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse
from django.core.exceptions import ValidationError, FieldError
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import Permission
import InvenTree.version
from common.models import InvenTreeSetting
from .settings import MEDIA_URL, STATIC_URL
from common.settings import currency_code_default
from djmoney.money import Money
def getSetting(key, backup_value=None):
"""
Shortcut for reading a setting value from the database
"""
return InvenTreeSetting.get_setting(key, backup_value=backup_value)
def generateTestKey(test_name):
"""
Generate a test 'key' for a given test name.
This must not have illegal chars as it will be used for dict lookup in a template.
Tests must be named such that they will have unique keys.
"""
key = test_name.strip().lower()
key = key.replace(" ", "")
# Remove any characters that cannot be used to represent a variable
key = re.sub(r'[^a-zA-Z0-9]', '', key)
return key
def getMediaUrl(filename):
"""
Return the qualified access path for the given file,
under the media directory.
"""
return os.path.join(MEDIA_URL, str(filename))
def getStaticUrl(filename):
"""
Return the qualified access path for the given file,
under the static media directory.
"""
return os.path.join(STATIC_URL, str(filename))
def construct_absolute_url(*arg):
"""
Construct (or attempt to construct) an absolute URL from a relative URL.
This is useful when (for example) sending an email to a user with a link
to something in the InvenTree web framework.
This requires the BASE_URL configuration option to be set!
"""
base = str(InvenTreeSetting.get_setting('INVENTREE_BASE_URL'))
url = '/'.join(arg)
if not base:
return url
# Strip trailing slash from base url
if base.endswith('/'):
base = base[:-1]
if url.startswith('/'):
url = url[1:]
url = f"{base}/{url}"
return url
def getBlankImage():
"""
Return the qualified path for the 'blank image' placeholder.
"""
return getStaticUrl("img/blank_image.png")
def getBlankThumbnail():
"""
Return the qualified path for the 'blank image' thumbnail placeholder.
"""
return getStaticUrl("img/blank_image.thumbnail.png")
def TestIfImage(img):
""" Test if an image file is indeed an image """
try:
Image.open(img).verify()
return True
except:
return False
def TestIfImageURL(url):
""" Test if an image URL (or filename) looks like a valid image format.
Simply tests the extension against a set of allowed values
"""
return os.path.splitext(os.path.basename(url))[-1].lower() in [
'.jpg', '.jpeg',
'.png', '.bmp',
'.tif', '.tiff',
'.webp', '.gif',
]
def str2bool(text, test=True):
""" Test if a string 'looks' like a boolean value.
Args:
text: Input text
test (default = True): Set which boolean value to look for
Returns:
True if the text looks like the selected boolean value
"""
if test:
return str(text).lower() in ['1', 'y', 'yes', 't', 'true', 'ok', 'on', ]
else:
return str(text).lower() in ['0', 'n', 'no', 'none', 'f', 'false', 'off', ]
def is_bool(text):
"""
Determine if a string value 'looks' like a boolean.
"""
if str2bool(text, True):
return True
elif str2bool(text, False):
return True
else:
return False
def isNull(text):
"""
Test if a string 'looks' like a null value.
This is useful for querying the API against a null key.
Args:
text: Input text
Returns:
True if the text looks like a null value
"""
return str(text).strip().lower() in ['top', 'null', 'none', 'empty', 'false', '-1', '']
def normalize(d):
"""
Normalize a decimal number, and remove exponential formatting.
"""
if type(d) is not Decimal:
d = Decimal(d)
d = d.normalize()
# Ref: https://docs.python.org/3/library/decimal.html
return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()
def increment(n):
"""
Attempt to increment an integer (or a string that looks like an integer!)
e.g.
001 -> 002
2 -> 3
AB01 -> AB02
QQQ -> QQQ
"""
value = str(n).strip()
# Ignore empty strings
if not value:
return value
pattern = r"(.*?)(\d+)?$"
result = re.search(pattern, value)
# No match!
if result is None:
return value
groups = result.groups()
# If we cannot match the regex, then simply return the provided value
if len(groups) != 2:
return value
prefix, number = groups
# No number extracted? Simply return the prefix (without incrementing!)
if not number:
return prefix
# Record the width of the number
width = len(number)
try:
number = int(number) + 1
number = str(number)
except ValueError:
pass
number = number.zfill(width)
return prefix + number
def decimal2string(d):
"""
Format a Decimal number as a string,
stripping out any trailing zeroes or decimal points.
Essentially make it look like a whole number if it is one.
Args:
d: A python Decimal object
Returns:
A string representation of the input number
"""
if type(d) is Decimal:
d = normalize(d)
try:
# Ensure that the provided string can actually be converted to a float
float(d)
except ValueError:
# Not a number
return str(d)
s = str(d)
# Return entire number if there is no decimal place
if '.' not in s:
return s
return s.rstrip("0").rstrip(".")
def decimal2money(d, currency=None):
"""
Format a Decimal number as Money
Args:
d: A python Decimal object
currency: Currency of the input amount, defaults to default currency in settings
Returns:
A Money object from the input(s)
"""
if not currency:
currency = currency_code_default()
return Money(d, currency)
def WrapWithQuotes(text, quote='"'):
""" Wrap the supplied text with quotes
Args:
text: Input text to wrap
quote: Quote character to use for wrapping (default = "")
Returns:
Supplied text wrapped in quote char
"""
if not text.startswith(quote):
text = quote + text
if not text.endswith(quote):
text = text + quote
return text
def MakeBarcode(object_name, object_pk, object_data=None, **kwargs):
""" Generate a string for a barcode. Adds some global InvenTree parameters.
Args:
object_type: string describing the object type e.g. 'StockItem'
object_id: ID (Primary Key) of the object in the database
object_url: url for JSON API detail view of the object
data: Python dict object containing extra datawhich will be rendered to string (must only contain stringable values)
Returns:
json string of the supplied data plus some other data
"""
if object_data is None:
object_data = {}
url = kwargs.get('url', False)
brief = kwargs.get('brief', True)
data = {}
if url:
request = object_data.get('request', None)
item_url = object_data.get('item_url', None)
absolute_url = None
if request and item_url:
absolute_url = request.build_absolute_uri(item_url)
# Return URL (No JSON)
return absolute_url
if item_url:
# Return URL (No JSON)
return item_url
elif brief:
data[object_name] = object_pk
else:
data['tool'] = 'InvenTree'
data['version'] = InvenTree.version.inventreeVersion()
data['instance'] = InvenTree.version.inventreeInstanceName()
# Ensure PK is included
object_data['id'] = object_pk
data[object_name] = object_data
return json.dumps(data, sort_keys=True)
def GetExportFormats():
""" Return a list of allowable file formats for exporting data """
return [
'csv',
'tsv',
'xls',
'xlsx',
'json',
'yaml',
]
def DownloadFile(data, filename, content_type='application/text', inline=False):
"""
Create a dynamic file for the user to download.
Args:
data: Raw file data (string or bytes)
filename: Filename for the file download
content_type: Content type for the download
inline: Download "inline" or as attachment? (Default = attachment)
Return:
A StreamingHttpResponse object wrapping the supplied data
"""
filename = WrapWithQuotes(filename)
if type(data) == str:
wrapper = FileWrapper(io.StringIO(data))
else:
wrapper = FileWrapper(io.BytesIO(data))
response = StreamingHttpResponse(wrapper, content_type=content_type)
response['Content-Length'] = len(data)
disposition = "inline" if inline else "attachment"
response['Content-Disposition'] = f'{disposition}; filename={filename}'
return response
def extract_serial_numbers(serials, expected_quantity, next_number: int):
"""
Attempt to extract serial numbers from an input string:
Requirements:
- Serial numbers can be either strings, or integers
- Serial numbers can be split by whitespace / newline / commma chars
- Serial numbers can be supplied as an inclusive range using hyphen char e.g. 10-20
- Serial numbers can be defined as ~ for getting the next available serial number
- Serial numbers can be supplied as <start>+ for getting all expecteded numbers starting from <start>
- Serial numbers can be supplied as <start>+<length> for getting <length> numbers starting from <start>
Args:
serials: input string with patterns
expected_quantity: The number of (unique) serial numbers we expect
next_number(int): the next possible serial number
"""
serials = serials.strip()
# fill in the next serial number into the serial
while '~' in serials:
serials = serials.replace('~', str(next_number), 1)
next_number += 1
# Split input string by whitespace or comma (,) characters
groups = re.split(r"[\s,]+", serials)
numbers = []
errors = []
# Helper function to check for duplicated numbers
def add_sn(sn):
# Attempt integer conversion first, so numerical strings are never stored
try:
sn = int(sn)
except ValueError:
pass
if sn in numbers:
errors.append(_('Duplicate serial: {sn}').format(sn=sn))
else:
numbers.append(sn)
try:
expected_quantity = int(expected_quantity)
except ValueError:
raise ValidationError([_("Invalid quantity provided")])
if len(serials) == 0:
raise ValidationError([_("Empty serial number string")])
# If the user has supplied the correct number of serials, don't process them for groups
# just add them so any duplicates (or future validations) are checked
if len(groups) == expected_quantity:
for group in groups:
add_sn(group)
if len(errors) > 0:
raise ValidationError(errors)
return numbers
for group in groups:
group = group.strip()
# Hyphen indicates a range of numbers
if '-' in group:
items = group.split('-')
if len(items) == 2 and all([i.isnumeric() for i in items]):
a = items[0].strip()
b = items[1].strip()
try:
a = int(a)
b = int(b)
if a < b:
for n in range(a, b + 1):
add_sn(n)
else:
errors.append(_("Invalid group range: {g}").format(g=group))
except ValueError:
errors.append(_("Invalid group: {g}").format(g=group))
continue
else:
# More than 2 hyphens or non-numeric group so add without interpolating
add_sn(group)
# plus signals either
# 1: 'start+': expected number of serials, starting at start
# 2: 'start+number': number of serials, starting at start
elif '+' in group:
items = group.split('+')
# case 1, 2
if len(items) == 2:
start = int(items[0])
# case 2
if bool(items[1]):
end = start + int(items[1]) + 1
# case 1
else:
end = start + (expected_quantity - len(numbers))
for n in range(start, end):
add_sn(n)
# no case
else:
errors.append(_("Invalid group sequence: {g}").format(g=group))
# At this point, we assume that the "group" is just a single serial value
elif group:
add_sn(group)
# No valid input group detected
else:
raise ValidationError(_(f"Invalid/no group {group}"))
if len(errors) > 0:
raise ValidationError(errors)
if len(numbers) == 0:
raise ValidationError([_("No serial numbers found")])
# The number of extracted serial numbers must match the expected quantity
if expected_quantity != len(numbers):
raise ValidationError([_("Number of unique serial numbers ({s}) must match quantity ({q})").format(s=len(numbers), q=expected_quantity)])
return numbers
def validateFilterString(value, model=None):
"""
Validate that a provided filter string looks like a list of comma-separated key=value pairs
These should nominally match to a valid database filter based on the model being filtered.
e.g. "category=6, IPN=12"
e.g. "part__name=widget"
The ReportTemplate class uses the filter string to work out which items a given report applies to.
For example, an acceptance test report template might only apply to stock items with a given IPN,
so the string could be set to:
filters = "IPN = ACME0001"
Returns a map of key:value pairs
"""
# Empty results map
results = {}
value = str(value).strip()
if not value or len(value) == 0:
return results
groups = value.split(',')
for group in groups:
group = group.strip()
pair = group.split('=')
if len(pair) != 2:
raise ValidationError(
"Invalid group: {g}".format(g=group)
)
k, v = pair
k = k.strip()
v = v.strip()
if not k or not v:
raise ValidationError(
"Invalid group: {g}".format(g=group)
)
results[k] = v
# If a model is provided, verify that the provided filters can be used against it
if model is not None:
try:
model.objects.filter(**results)
except FieldError as e:
raise ValidationError(
str(e),
)
return results
def addUserPermission(user, permission):
"""
Shortcut function for adding a certain permission to a user.
"""
perm = Permission.objects.get(codename=permission)
user.user_permissions.add(perm)
def addUserPermissions(user, permissions):
"""
Shortcut function for adding multiple permissions to a user.
"""
for permission in permissions:
addUserPermission(user, permission)
def getMigrationFileNames(app):
"""
Return a list of all migration filenames for provided app
"""
local_dir = os.path.dirname(os.path.abspath(__file__))
migration_dir = os.path.join(local_dir, '..', app, 'migrations')
files = os.listdir(migration_dir)
# Regex pattern for migration files
pattern = r"^[\d]+_.*\.py$"
migration_files = []
for f in files:
if re.match(pattern, f):
migration_files.append(f)
return migration_files
def getOldestMigrationFile(app, exclude_extension=True, ignore_initial=True):
"""
Return the filename associated with the oldest migration
"""
oldest_num = -1
oldest_file = None
for f in getMigrationFileNames(app):
if ignore_initial and f.startswith('0001_initial'):
continue
num = int(f.split('_')[0])
if oldest_file is None or num < oldest_num:
oldest_num = num
oldest_file = f
if exclude_extension:
oldest_file = oldest_file.replace('.py', '')
return oldest_file
def getNewestMigrationFile(app, exclude_extension=True):
"""
Return the filename associated with the newest migration
"""
newest_file = None
newest_num = -1
for f in getMigrationFileNames(app):
num = int(f.split('_')[0])
if newest_file is None or num > newest_num:
newest_num = num
newest_file = f
if exclude_extension:
newest_file = newest_file.replace('.py', '')
return newest_file
def clean_decimal(number):
""" Clean-up decimal value """
# Check if empty
if number is None or number == '' or number == 0:
return Decimal(0)
# Convert to string and remove spaces
number = str(number).replace(' ', '')
# Guess what type of decimal and thousands separators are used
count_comma = number.count(',')
count_point = number.count('.')
if count_comma == 1:
# Comma is used as decimal separator
if count_point > 0:
# Points are used as thousands separators: remove them
number = number.replace('.', '')
# Replace decimal separator with point
number = number.replace(',', '.')
elif count_point == 1:
# Point is used as decimal separator
if count_comma > 0:
# Commas are used as thousands separators: remove them
number = number.replace(',', '')
# Convert to Decimal type
try:
clean_number = Decimal(number)
except InvalidOperation:
# Number cannot be converted to Decimal (eg. a string containing letters)
return Decimal(0)
return clean_number.quantize(Decimal(1)) if clean_number == clean_number.to_integral() else clean_number.normalize()
def get_objectreference(obj, type_ref: str = 'content_type', object_ref: str = 'object_id'):
"""lookup method for the GenericForeignKey fields
Attributes:
- obj: object that will be resolved
- type_ref: field name for the contenttype field in the model
- object_ref: field name for the object id in the model
Example implementation in the serializer:
```
target = serializers.SerializerMethodField()
def get_target(self, obj):
return get_objectreference(obj, 'target_content_type', 'target_object_id')
```
The method name must always be the name of the field prefixed by 'get_'
"""
model_cls = getattr(obj, type_ref)
obj_id = getattr(obj, object_ref)
# check if references are set -> return nothing if not
if model_cls is None or obj_id is None:
return None
# resolve referenced data into objects
model_cls = model_cls.model_class()
item = model_cls.objects.get(id=obj_id)
url_fnc = getattr(item, 'get_absolute_url', None)
# create output
ret = {}
if url_fnc:
ret['link'] = url_fnc()
return {
'name': str(item),
'model': str(model_cls._meta.verbose_name),
**ret
}
def inheritors(cls):
"""
Return all classes that are subclasses from the supplied cls
"""
subcls = set()
work = [cls]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subcls:
subcls.add(child)
work.append(child)
return subcls
| 25.985969
| 145
| 0.614048
|
8a19e4ca4c2dccf606eee42b827bac1c41aa214e
| 6,111
|
py
|
Python
|
logtools/_sumstat.py
|
AlainLich/logtools
|
584e575d25f0ebcd7a51cc6d5aefb530f80f6d22
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:48:18.000Z
|
2022-03-09T05:50:13.000Z
|
logtools/_sumstat.py
|
AlainLich/logtools
|
584e575d25f0ebcd7a51cc6d5aefb530f80f6d22
|
[
"Apache-2.0"
] | null | null | null |
logtools/_sumstat.py
|
AlainLich/logtools
|
584e575d25f0ebcd7a51cc6d5aefb530f80f6d22
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ........................................ NOTICE
#
# This file has been derived and modified from a source licensed under Apache Version 2.0.
# See files NOTICE and README.md for more details.
#
# ........................................ ******
"""
logtools._sumstat
Generates summary statistics
for a given logfile of the form:
<count> <value>
logfile is expected to be pre-sorted by count.
"""
import sys
import locale
import logging
from textwrap import dedent
from optparse import OptionParser
from prettytable import PrettyTable
from ._config import interpolate_config, AttrDict
__all__ = ['sumstat_parse_args', 'sumstat', 'sumstat_main']
# problematic in my environment
if False:
locale.setlocale(locale.LC_ALL, "")
def arith_mean(values):
"""Computes the arithmetic mean of a list of numbers"""
return sum(values, 0.0) / len(values)
def sumstat_parse_args():
usage = dedent("""
%prog -d <delimiter> [--reverse]
Generates summary statistics
for a given logfile of the form:
<count> <value>
logfile is expected to be pre-sorted by count.
""")
parser = OptionParser(usage=usage)
parser.add_option("-r", "--reverse", dest="reverse", action="store_true",
help="Reverse ordering of entries (toggle between increasing/decreasing sort order") # noqa
parser.add_option("-d", "--delimiter", dest="delimiter",
help="Delimiter character for field-separation")
parser.add_option("-P", "--profile", dest="profile", default='qps',
help="Configuration profile (section in configuration file)") # noqa
options, args = parser.parse_args()
options.delimiter = interpolate_config(options.delimiter, options.profile, 'delimiter', default=' ') # noqa
options.reverse = interpolate_config(options.reverse, options.profile, 'reverse', type=bool, default=False) # noqa
return AttrDict(options.__dict__), args
def sumstat(fh, delimiter, reverse=False, **kwargs):
counts = []
N, M = 0, 0
for line in map(lambda x: x.strip(), fh):
try:
row = line.split(delimiter, 1)
count = row[0]
except ValueError:
logging.exception("Exception while trying to parse log line: '%s', skipping", line) # noqa
else:
count = int(count)
counts.append(count)
M += 1
N += count
if reverse is True:
logging.info("Reversing row ordering")
counts.reverse()
avg = arith_mean(counts)
minv, maxv = min(counts), max(counts)
# Percentiles
percentiles_idx = [M/10, M/4, M/2, 3*M/4, 9*M/10, 95*M/100, 99*M/100, 999*M/1000] # noqa
percentiles = map(lambda x: "%d (Idx: %s)" %
(counts[x], locale.format('%d', x, True)),
percentiles_idx)
S10th, S25th, S40th, S50th, S75th, S90th = None, None, None, None, None, None # noqa
accum = 0.
for idx, c in enumerate(reversed(counts)):
accum += c
if not S10th and accum/N >= 0.1:
S10th = idx+1
elif not S25th and accum/N >= 0.25:
S25th = idx+1
elif not S40th and accum/N >= 0.4:
S40th = idx+1
elif not S50th and accum/N >= 0.5:
S50th = idx+1
elif not S75th and accum/N >= 0.75:
S75th = idx+1
elif not S90th and accum/N >= 0.9:
S90th = idx+1
return {
"M": M,
"N": N,
"avg": avg,
"min": minv,
"max": maxv,
"percentiles": percentiles,
"cover": [S10th, S25th, S40th, S50th, S75th, S90th]
}
def sumstat_main():
"""Console entry-point"""
options, args = sumstat_parse_args()
stat_dict = sumstat(fh=sys.stdin, *args, **options)
table = PrettyTable([
"Num. Samples / Cumulative Value (N)",
"Num. Values (M)",
"Min. Value",
"Max. Value",
"Average Value",
"10th Percentile",
"25th Percentile",
"50th Percentile",
"75th Percentile",
"90th Percentile",
"95th Percentile",
"99th Percentile",
"99.9th Percentile"
])
table.add_row(
map(lambda x: locale.format('%d', x, True), [stat_dict['N'], stat_dict['M']]) + # noqa
[stat_dict['min'], stat_dict['max'], stat_dict['avg']] + \
stat_dict['percentiles']
)
print(table)
S10th, S25th, S40th, S50th, S75th, S90th = stat_dict['cover']
M = stat_dict['M']
print("10%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S10th, True), 100.*S10th/M))
print("25%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S25th, True), 100.*S25th/M))
print("40%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S40th, True), 100.*S40th/M))
print("50%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S50th, True), 100.*S50th/M))
print("75%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S75th, True), 100.*S75th/M))
print("90%% of Sample Volume is encompassed within the top %s (%.4f%%) sample values" % # noqa
(locale.format("%d", S90th, True), 100.*S90th/M))
return 0
| 33.032432
| 119
| 0.595811
|
01c268b39d087816d8b38c7538765a19cd9dc0e4
| 466
|
py
|
Python
|
applications/popart/conformer_asr/logging_util.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 260
|
2019-11-18T01:50:00.000Z
|
2022-03-28T23:08:53.000Z
|
applications/popart/conformer_asr/logging_util.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 27
|
2020-01-28T23:07:50.000Z
|
2022-02-14T15:37:06.000Z
|
applications/popart/conformer_asr/logging_util.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 56
|
2019-11-18T02:13:12.000Z
|
2022-02-28T14:36:09.000Z
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import logging
import sys
def get_basic_logger(name):
lh = logging.StreamHandler(sys.stdout)
lh.setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[lh])
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
return logger
| 31.066667
| 85
| 0.60515
|
4c2850d4a979f65195a195bf2ddc134dbfa94050
| 819
|
py
|
Python
|
drf_generators/templates/modelviewset.py
|
quangvinh1225/drf-generators
|
4f096c89fe768939c8fe838ac0c29b24c6d11f01
|
[
"MIT"
] | null | null | null |
drf_generators/templates/modelviewset.py
|
quangvinh1225/drf-generators
|
4f096c89fe768939c8fe838ac0c29b24c6d11f01
|
[
"MIT"
] | null | null | null |
drf_generators/templates/modelviewset.py
|
quangvinh1225/drf-generators
|
4f096c89fe768939c8fe838ac0c29b24c6d11f01
|
[
"MIT"
] | null | null | null |
__all__ = ['MODEL_URL', 'MODEL_VIEW']
MODEL_URL = """from rest_framework.routers import SimpleRouter
from {{ app }} import views
router = SimpleRouter()
{% for model in models %}
router.register(r'{{ model | lower }}', views.{{ model }}ViewSet){% endfor %}
urlpatterns = router.urls
"""
MODEL_VIEW = """from rest_framework.viewsets import ModelViewSet
from django_filters.rest_framework import DjangoFilterBackend
from {{ app }}.serializers import {{ serializers|join:', ' }}
from {{ app }}.models import {{ models|join:', ' }}
{% for model in models %}
class {{ model }}ViewSet(ModelViewSet):
__doc__ = ModelViewSet.__doc__
queryset = {{ model }}.objects.all()
serializer_class = {{ model }}Serializer
filter_backends = [DjangoFilterBackend]
filter_fields = '__all__'
{% endfor %}"""
| 26.419355
| 77
| 0.687424
|
c56f7fddc4a11b4cf73a3f073901c218e59f11de
| 1,073
|
py
|
Python
|
mypoll_site/polls/migrations/0001_initial.py
|
theekeen/pollsite_project
|
82a058202372c56cbef780a94954ef5f87695378
|
[
"MIT"
] | null | null | null |
mypoll_site/polls/migrations/0001_initial.py
|
theekeen/pollsite_project
|
82a058202372c56cbef780a94954ef5f87695378
|
[
"MIT"
] | null | null | null |
mypoll_site/polls/migrations/0001_initial.py
|
theekeen/pollsite_project
|
82a058202372c56cbef780a94954ef5f87695378
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-07-12 10:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| 32.515152
| 114
| 0.586207
|
85711a6058dc7fbeeefc9362cef22ccd92a48905
| 3,121
|
py
|
Python
|
latex-knapsack/code/knapsack_test.py
|
shevkunov/workout
|
d36b84f4341d36a6c45553a1c7fa7d147370fba8
|
[
"BSD-3-Clause"
] | null | null | null |
latex-knapsack/code/knapsack_test.py
|
shevkunov/workout
|
d36b84f4341d36a6c45553a1c7fa7d147370fba8
|
[
"BSD-3-Clause"
] | null | null | null |
latex-knapsack/code/knapsack_test.py
|
shevkunov/workout
|
d36b84f4341d36a6c45553a1c7fa7d147370fba8
|
[
"BSD-3-Clause"
] | null | null | null |
from knapsack import *
def knapsack_stress_test_nw(n, w, eps=None, maxcost=10, mincost=1,
algo=knapsack_pseudopolynomial_nc,
checker=knapsack_brutal,
cluster_test=False,
float_numbers=False,
log=None,
print_log=True):
assert not (cluster_test and float_numbers)
if cluster_test:
costs = []
length = len(mincost)
for i in range(length):
step_length = (
n // length if i + 1 != length else n - len(costs)
)
costs += list(np.random.randint(low=mincost[i],
high=maxcost[i],
size=step_length))
costs = np.array(costs)
weights = np.random.randint(low=1, high=w + 2, size=n)
elif not float_numbers:
costs = np.random.randint(low=mincost, high=maxcost, size=n)
weights = np.random.randint(low=1, high=w + 2, size=n)
else:
costs = np.random.uniform(low=0, high=maxcost, size=n) + 1e-9
weights = np.random.uniform(low=0, high=w + 2, size=n) + 1e-9
if eps is None:
c1, ans1 = algo(w, weights, costs)
else:
c1, ans1 = algo(w, weights, costs, eps=eps)
c2, ans2 = checker(w, weights, costs)
if print_log:
print("===TEST===")
print("W = ", w, ", eps = ", eps)
print("weights = ", weights)
print("costs = ", costs)
print(c1, weights[ans1])
print(c2, weights[ans2])
assert costs[ans2].sum() == c2
assert costs[ans1].sum() == c1
if eps is None:
assert c1 == c2
else:
# assert c1 <= c2 TODO ADD tolerance to comparsions
assert (c2 == c1) or (c1 / c2 >= eps)
if not log is None:
log.append([w, eps, weights, costs, c1, ans1, c2, ans2])
def knapsack_stress_test(n_range=range(0, 10),
w_range=range(0, 10),
i_count = 10,
maxcost=10,
mincost=1,
eps=None,
algo=knapsack_pseudopolynomial_nc,
checker=knapsack_brutal,
cluster_test=False,
float_numbers=False,
log=None,
print_log=True):
for n in n_range:
for w in w_range:
for i in range(i_count):
knapsack_stress_test_nw(n, w,
maxcost=maxcost,
mincost=mincost,
eps=eps,
algo=algo,
checker=checker,
cluster_test=cluster_test,
float_numbers=float_numbers,
log=log,
print_log=print_log)
| 37.154762
| 69
| 0.441846
|
f71db52a6273627b9fdb578a9b437983757a0692
| 8,369
|
py
|
Python
|
src/sagemaker_tensorflow_container/training.py
|
Freakawho/sagemaker-tensorflow-training-toolkit-master
|
f37c7d85600beb5461788db8c471b66c25beff8f
|
[
"Apache-2.0"
] | 156
|
2018-07-10T13:37:16.000Z
|
2020-06-04T13:40:17.000Z
|
src/sagemaker_tensorflow_container/training.py
|
Freakawho/sagemaker-tensorflow-training-toolkit-master
|
f37c7d85600beb5461788db8c471b66c25beff8f
|
[
"Apache-2.0"
] | 166
|
2018-07-09T09:03:26.000Z
|
2020-06-10T23:27:52.000Z
|
src/sagemaker_tensorflow_container/training.py
|
Freakawho/sagemaker-tensorflow-training-toolkit-master
|
f37c7d85600beb5461788db8c471b66c25beff8f
|
[
"Apache-2.0"
] | 129
|
2018-07-04T20:00:29.000Z
|
2020-06-10T02:47:54.000Z
|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import logging
import multiprocessing
import os
import subprocess
import time
from sagemaker_training import entry_point, environment, mapping, runner
import tensorflow as tf
from sagemaker_tensorflow_container import s3_utils
logger = logging.getLogger(__name__)
SAGEMAKER_PARAMETER_SERVER_ENABLED = "sagemaker_parameter_server_enabled"
MODEL_DIR = "/opt/ml/model"
def _is_host_master(hosts, current_host):
return current_host == hosts[0]
def _build_tf_config(hosts, current_host, ps_task=False):
"""Builds a dictionary containing cluster information based on number of hosts and number of
parameter servers.
Args:
hosts (list[str]): List of host names in the cluster
current_host (str): Current host name
ps_task (bool): Set to True if this config is built for a parameter server process
(default: False)
Returns:
dict[str: dict]: A dictionary describing the cluster setup for distributed training.
For more information regarding TF_CONFIG:
https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details
"""
# Assign the first host as the master. Rest of the hosts if any will be worker hosts.
# The first ps_num hosts will also have a parameter task assign to them.
masters = hosts[:1]
workers = hosts[1:]
ps = hosts if len(hosts) > 1 else None
def host_addresses(hosts, port=2222):
return ["{}:{}".format(host, port) for host in hosts]
tf_config = {"cluster": {"master": host_addresses(masters)}, "environment": "cloud"}
if ps:
tf_config["cluster"]["ps"] = host_addresses(ps, port="2223")
if workers:
tf_config["cluster"]["worker"] = host_addresses(workers)
if ps_task:
if ps is None:
raise ValueError(
"Cannot have a ps task if there are no parameter servers in the cluster"
)
task_type = "ps"
task_index = ps.index(current_host)
elif _is_host_master(hosts, current_host):
task_type = "master"
task_index = 0
else:
task_type = "worker"
task_index = workers.index(current_host)
tf_config["task"] = {"index": task_index, "type": task_type}
return tf_config
def _run_ps(env, cluster):
logger.info("Running distributed training job with parameter servers")
cluster_spec = tf.train.ClusterSpec(cluster)
task_index = env.hosts.index(env.current_host)
# Force parameter server to run on cpu. Running multiple TensorFlow processes on the same
# GPU is not safe:
# https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu
no_gpu_config = tf.ConfigProto(device_count={"GPU": 0})
server = tf.train.Server(
cluster_spec, job_name="ps", task_index=task_index, config=no_gpu_config
)
multiprocessing.Process(target=lambda: server.join()).start()
def _run_worker(env, cmd_args, tf_config):
env_vars = env.to_env_vars()
env_vars["TF_CONFIG"] = json.dumps(tf_config)
entry_point.run(
uri=env.module_dir,
user_entry_point=env.user_entry_point,
args=cmd_args,
env_vars=env_vars,
capture_error=True,
)
def _wait_until_master_is_down(master):
while True:
try:
subprocess.check_call(
["curl", "{}:2222".format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
logger.info("master {} is still up, waiting for it to exit".format(master))
time.sleep(10)
except subprocess.CalledProcessError:
logger.info("master {} is down, stopping parameter server".format(master))
return
def train(env, cmd_args):
"""Get training job environment from env and run the training job.
Args:
env (sagemaker_training.env.TrainingEnv): Instance of TrainingEnv class
"""
parameter_server_enabled = env.additional_framework_parameters.get(
SAGEMAKER_PARAMETER_SERVER_ENABLED, False
)
if len(env.hosts) > 1 and parameter_server_enabled:
tf_config = _build_tf_config(hosts=env.hosts, current_host=env.current_host)
logger.info("Running distributed training job with parameter servers")
logger.info("Launching parameter server process")
_run_ps(env, tf_config["cluster"])
logger.info("Launching worker process")
_run_worker(env, cmd_args, tf_config)
if not _is_host_master(env.hosts, env.current_host):
_wait_until_master_is_down(env.hosts[0])
else:
mpi_enabled = env.additional_framework_parameters.get("sagemaker_mpi_enabled")
if mpi_enabled:
runner_type = runner.MPIRunnerType
else:
runner_type = runner.ProcessRunnerType
entry_point.run(
uri=env.module_dir,
user_entry_point=env.user_entry_point,
args=cmd_args,
env_vars=env.to_env_vars(),
capture_error=True,
runner_type=runner_type,
)
def _log_model_missing_warning(model_dir):
pb_file_exists = False
file_exists = False
for dirpath, dirnames, filenames in os.walk(model_dir):
if filenames:
file_exists = True
for f in filenames:
if "saved_model.pb" in f or "saved_model.pbtxt" in f:
pb_file_exists = True
path, direct_parent_dir = os.path.split(dirpath)
if not str.isdigit(direct_parent_dir):
logger.warn(
"Your model will NOT be servable with SageMaker TensorFlow Serving containers. "
'The SavedModel bundle is under directory "{}", not a numeric name.'.format(
direct_parent_dir
)
)
if not file_exists:
logger.warn(
"No model artifact is saved under path {}."
" Your training job will not save any model files to S3.\n"
"For details of how to construct your training script see:\n"
"https://sagemaker.readthedocs.io/en/stable/using_tf.html#adapting-your-local-tensorflow-script".format(
model_dir
)
)
elif not pb_file_exists:
logger.warn(
"Your model will NOT be servable with SageMaker TensorFlow Serving container. "
"The model artifact was not saved in the TensorFlow SavedModel directory structure:\n"
"https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory"
)
def _model_dir_with_training_job(model_dir, job_name):
if model_dir and model_dir.startswith("/opt/ml"):
return model_dir
else:
return "{}/{}/model".format(model_dir, job_name)
def main():
"""Training entry point
"""
hyperparameters = environment.read_hyperparameters()
env = environment.Environment(hyperparameters=hyperparameters)
user_hyperparameters = env.hyperparameters
# If the training job is part of the multiple training jobs for tuning, we need to append the training job name to
# model_dir in case they read from/write to the same object
if "_tuning_objective_metric" in hyperparameters:
model_dir = _model_dir_with_training_job(hyperparameters.get("model_dir"), env.job_name)
logger.info("Appending the training job name to model_dir: {}".format(model_dir))
user_hyperparameters["model_dir"] = model_dir
s3_utils.configure(user_hyperparameters.get("model_dir"), os.environ.get("SAGEMAKER_REGION"))
train(env, mapping.to_cmd_args(user_hyperparameters))
_log_model_missing_warning(MODEL_DIR)
| 36.229437
| 118
| 0.674394
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.