hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1d0a9e739d5a5dc64a86da53c971b24b0c6fcbb
| 690
|
py
|
Python
|
2020/Day-25/Combo_Breaker/solve_1.py
|
sreekesari-vangeepuram/aoc-2020
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
[
"MIT"
] | 1
|
2021-07-09T07:56:14.000Z
|
2021-07-09T07:56:14.000Z
|
2020/Day-25/Combo_Breaker/solve_1.py
|
sreekesari-vangeepuram/adventofcode
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
[
"MIT"
] | null | null | null |
2020/Day-25/Combo_Breaker/solve_1.py
|
sreekesari-vangeepuram/adventofcode
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# card_pubkey, door_pubkey = map(int, open("input.txt").read().strip().split("\n"))
# card_pubkey, door_pubkey = 13316116, 13651422 # Obtained from `sample.txt`
# prime = 20201227
# ****************************************************** #
# Hard-coding numbers insted of assigning to variables #
# decreases the runtime due to decrease in IO-operations #
# ****************************************************** #
subnum, enckey = 7, 13651422 # door_pubkey
while subnum!= 13316116: # card_pubkey
enckey = (enckey * 13651422) % 20201227 # Encryption key
subnum = (subnum * 7) % 20201227 # Subject number
print(f"Encryption key: {enckey}")
| 38.333333
| 83
| 0.572464
|
79db868f495fc646639a46aa8462b89b08957630
| 1,071
|
py
|
Python
|
ckanext-hdx_service_checker/ckanext/hdx_service_checker/plugin.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
ckanext-hdx_service_checker/ckanext/hdx_service_checker/plugin.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
ckanext-hdx_service_checker/ckanext/hdx_service_checker/plugin.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 17
|
2015-05-06T14:04:21.000Z
|
2021-11-11T19:58:16.000Z
|
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import ckanext.hdx_service_checker.actions.get as actions
import ckanext.hdx_service_checker.actions.authorize as authorize
import ckanext.hdx_service_checker.views.run_checks as run_checks
class HdxServiceCheckerPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IActions)
plugins.implements(plugins.IAuthFunctions)
plugins.implements(plugins.IBlueprint)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
# toolkit.add_public_directory(config_, 'public')
# toolkit.add_resource('fanstatic', 'hdx_service_checker')
#IActions
def get_actions(self):
return {
'run_checks': actions.run_checks
}
# IAuthFunctions
def get_auth_functions(self):
return {
'run_checks': authorize.run_checks,
}
# IBlueprint
def get_blueprint(self):
return run_checks.hdx_run_checks
| 28.945946
| 66
| 0.728291
|
433355584f4c2c8f126dd107c5f7775ae9ee3e1b
| 513
|
py
|
Python
|
workshops/migrations/0123_auto_20161218_0538.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | 1
|
2015-04-03T20:26:56.000Z
|
2015-04-03T20:26:56.000Z
|
workshops/migrations/0123_auto_20161218_0538.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | 1
|
2019-12-13T11:22:47.000Z
|
2019-12-13T11:22:47.000Z
|
workshops/migrations/0123_auto_20161218_0538.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2016-12-18 10:38
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0122_auto_20161101_1441'),
]
operations = [
migrations.AlterField(
model_name='award',
name='awarded',
field=models.DateField(default=datetime.date.today),
),
]
| 22.304348
| 64
| 0.647173
|
35ef3bff44aed1459841d876209c7f5e16a551ed
| 1,694
|
py
|
Python
|
doc/integrations/pytorch/setup.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2020-09-27T05:00:06.000Z
|
2020-09-27T05:00:06.000Z
|
doc/integrations/pytorch/setup.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:17:39.000Z
|
2021-08-04T11:17:39.000Z
|
doc/integrations/pytorch/setup.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-05-03T13:27:14.000Z
|
2021-05-03T13:27:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from setuptools import setup, find_packages
VERSION = '1.2.0' # if you update, update parlai/__init__.py too!
if sys.version_info < (3, 7):
sys.exit('Sorry, Python >=3.7 is required for ParlAI.')
with open('README.md', encoding="utf8") as f:
# strip the header and badges etc
readme = f.read().split('--------------------')[-1]
with open('requirements.txt') as f:
reqs = []
for line in f:
line = line.strip()
reqs.append(line.split('==')[0])
if __name__ == '__main__':
setup(
name='parlai',
version=VERSION,
description='Unified platform for dialogue research.',
long_description=readme,
long_description_content_type='text/markdown',
url='http://parl.ai/',
python_requires='>=3.7',
packages=find_packages(exclude=('data', 'docs', 'tests', 'parlai_internal*')),
install_requires=reqs,
include_package_data=True,
package_data={'': ['*.txt', '*.md', '*.opt']},
entry_points={
"flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"],
"console_scripts": ["parlai=parlai.__main__:main"],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Natural Language :: English",
],
)
| 32.576923
| 87
| 0.582645
|
c8685702a89531b79bdbe5c3cbd4c7341001b001
| 2,807
|
py
|
Python
|
src/pygerber/renderer/arc_util_mixin.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | 3
|
2021-08-30T07:07:59.000Z
|
2021-09-29T22:14:43.000Z
|
src/pygerber/renderer/arc_util_mixin.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | 1
|
2021-09-26T13:28:49.000Z
|
2021-09-26T13:28:49.000Z
|
src/pygerber/renderer/arc_util_mixin.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
from math import cos, degrees, radians, sin, tau
from pygerber.mathclasses import Vector2D, angle_from_zero
from pygerber.renderer.spec import ArcSpec
class ArcUtilMixin:
@property
def isCCW(self):
return self.renderer.isCCW()
def get_begin_end_angles(self, spec: ArcSpec):
begin_relative = spec.begin - spec.center
end_relative = spec.end - spec.center
begin_angle = degrees(angle_from_zero(begin_relative))
end_angle = degrees(angle_from_zero(end_relative))
if begin_angle >= end_angle:
end_angle += 360
return begin_angle, end_angle
def get_arc_points(self, spec: ArcSpec, is_ccw: bool) -> Vector2D:
begin_angle, end_angle = self.get_begin_end_angles(spec)
radius = spec.get_radius()
x, y = self.get_arc_co_functions(radius)
delta = self.get_arc_traverse_step_angle(begin_angle, end_angle, radius)
if is_ccw:
return self.__get_arc_points_ccw(end_angle, begin_angle, x, spec, y, delta)
else:
return self.__get_arc_points_cw(end_angle, begin_angle, x, spec, y, delta)
def get_arc_traverse_step_angle(self, begin_angle, end_angle, radius):
raise NotImplementedError(
"get_arc_traverse_step_angle() have to be implemented in subclass."
)
def __get_arc_points_ccw(self, end_angle, begin_angle, x, spec, y, delta):
end_relative_angle = end_angle - begin_angle
angle_offset = begin_angle
current_angle = 0
while current_angle <= end_relative_angle:
yield Vector2D(
x(current_angle + angle_offset) + spec.center.x,
y(current_angle + angle_offset) + spec.center.y,
)
current_angle += delta
def __get_arc_points_cw(self, end_angle, begin_angle, x, spec, y, delta):
end_relative_angle = end_angle - begin_angle
angle_offset = begin_angle
current_angle = 360
while current_angle >= end_relative_angle:
yield Vector2D(
x(current_angle + angle_offset) + spec.center.x,
y(current_angle + angle_offset) + spec.center.y,
)
current_angle -= delta
@staticmethod
def get_arc_co_functions(radius):
def x(alpha):
return radius * cos(radians(alpha))
def y(alpha):
return radius * sin(radians(alpha))
return x, y
@staticmethod
def get_arc_length(radius) -> float:
return tau * radius
@staticmethod
def get_arc_ratio(relative_angle):
return relative_angle / 360
@staticmethod
def get_relative_angle(begin_angle, end_angle):
return end_angle - begin_angle
| 34.231707
| 87
| 0.652298
|
23db8ac90743a7d88fedbec6801eb22b36b1a9d7
| 16,379
|
py
|
Python
|
tensorflow/python/ops/gradients_impl.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/gradients_impl.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/gradients_impl.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import manip_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["gradients"])
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
All integer tensors are considered constant with respect to all `xs`, as if
they were included in `stop_gradients`.
`unconnected_gradients` determines the value returned for each x in xs if it
is unconnected in the graph to ys. By default this is None to safeguard
against errors. MAthematically these gradients are zero which can be requested
using the `'zero'` option. `tf.UnconnectedGradients` provides the
following options and behaviors:
```python
a = tf.ones([1, 2])
b = tf.ones([3, 1])
g1 = tf.gradients([b], [a], unnconnected_gradients='none')
sess.run(g1) # [None]
g2 = tf.gradients([b], [a], unconnected_gradients='zero')
sess.run(g2) # [array([[0., 0.]], dtype=float32)]
```
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
unconnected_gradients: Optional. Specifies the gradient value returned when
the given input tensors are unconnected. Accepted values are constants
defined in the class `tf.UnconnectedGradients` and the default value is
`none`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
# Creating the gradient graph for control flow mutates Operations.
# _mutation_lock ensures a Session.run call cannot occur between creating and
# mutating new ops.
# pylint: disable=protected-access
with ops.get_default_graph()._mutation_lock():
return gradients_util._GradientsHelper(
ys, xs, grad_ys, name, colocate_gradients_with_ops,
gate_gradients, aggregation_method, stop_gradients,
unconnected_gradients)
# pylint: enable=protected-access
@tf_export("gradients", v1=[])
def gradients_v2(ys, # pylint: disable=invalid-name
xs,
grad_ys=None,
name="gradients",
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
All integer tensors are considered constant with respect to all `xs`, as if
they were included in `stop_gradients`.
`unconnected_gradients` determines the value returned for each x in xs if it
is unconnected in the graph to ys. By default this is None to safeguard
against errors. Mathematically these gradients are zero which can be requested
using the `'zero'` option. `tf.UnconnectedGradients` provides the
following options and behaviors:
```python
a = tf.ones([1, 2])
b = tf.ones([3, 1])
g1 = tf.gradients([b], [a], unnconnected_gradients='none')
sess.run(g1) # [None]
g2 = tf.gradients([b], [a], unconnected_gradients='zero')
sess.run(g2) # [array([[0., 0.]], dtype=float32)]
```
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
unconnected_gradients: Optional. Specifies the gradient value returned when
the given input tensors are unconnected. Accepted values are constants
defined in the class `tf.UnconnectedGradients` and the default value is
`none`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
# Creating the gradient graph for control flow mutates Operations.
# _mutation_lock ensures a Session.run call cannot occur between creating and
# mutating new ops.
# pylint: disable=protected-access
with ops.get_default_graph()._mutation_lock():
return gradients_util._GradientsHelper(
ys, xs, grad_ys, name, True, gate_gradients,
aggregation_method, stop_gradients,
unconnected_gradients)
# pylint: enable=protected-access
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
@tf_export("hessians")
def hessians(ys,
xs,
name="hessians",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = gradients_util._AsList(xs) # pylint: disable=protected-access
kwargs = {
"colocate_gradients_with_ops": colocate_gradients_with_ops,
"gate_gradients": gate_gradients,
"aggregation_method": aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for gradient, x in zip(_gradients, xs):
# change shape to one-dimension without graph branching
gradient = array_ops.reshape(gradient, [-1])
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(gradient[j], x)[0])),
loop_vars
)
_shape = array_ops.shape(x)
_reshaped_hessian = array_ops.reshape(hessian.stack(),
array_ops.concat((_shape, _shape), 0))
hessians.append(_reshaped_hessian)
return hessians
| 41.361111
| 84
| 0.707674
|
dddb2768657f9c78531a9ac7a7303bc1043bf7fd
| 23,280
|
py
|
Python
|
Tea/vendored/aiohttp/web_protocol.py
|
yndu13/tea-python
|
6a23fbf6c1dd3b60103251b4740cbfac4de2de7e
|
[
"Apache-2.0"
] | null | null | null |
Tea/vendored/aiohttp/web_protocol.py
|
yndu13/tea-python
|
6a23fbf6c1dd3b60103251b4740cbfac4de2de7e
|
[
"Apache-2.0"
] | null | null | null |
Tea/vendored/aiohttp/web_protocol.py
|
yndu13/tea-python
|
6a23fbf6c1dd3b60103251b4740cbfac4de2de7e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import asyncio.streams
import traceback
import warnings
from collections import deque
from contextlib import suppress
from html import escape as html_escape
from http import HTTPStatus
from logging import Logger
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, Type, cast
import Tea.vendored.aiohttp.yarl as yarl
from .abc import AbstractAccessLogger, AbstractStreamWriter
from .base_protocol import BaseProtocol
from .helpers import CeilTimeout, current_task
from .http import (
HttpProcessingError,
HttpRequestParser,
HttpVersion10,
RawRequestMessage,
StreamWriter,
)
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .tcp_helpers import tcp_keepalive
from .web_exceptions import HTTPException
from .web_log import AccessLogger
from .web_request import BaseRequest
from .web_response import Response, StreamResponse
__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError")
if TYPE_CHECKING: # pragma: no cover
from .web_server import Server
_RequestFactory = Callable[
[
RawRequestMessage,
StreamReader,
"RequestHandler",
AbstractStreamWriter,
"asyncio.Task[None]",
],
BaseRequest,
]
_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
ERROR = RawRequestMessage(
"UNKNOWN", "/", HttpVersion10, {}, {}, True, False, False, False, yarl.URL("/")
)
class RequestPayloadError(Exception):
"""Payload parsing error."""
class PayloadAccessError(Exception):
"""Payload was accessed after response was sent."""
class RequestHandler(BaseProtocol):
"""HTTP protocol implementation.
RequestHandler handles incoming HTTP request. It reads request line,
request headers and request payload and calls handle_request() method.
By default it always returns with 404 response.
RequestHandler handles errors in incoming request, like bad
status line, bad headers or incomplete payload. If any error occurs,
connection gets closed.
:param keepalive_timeout: number of seconds before closing
keep-alive connection
:type keepalive_timeout: int or None
:param bool tcp_keepalive: TCP keep-alive is on, default is on
:param bool debug: enable debug mode
:param logger: custom logger object
:type logger: aiohttp.log.server_logger
:param access_log_class: custom class for access_logger
:type access_log_class: aiohttp.abc.AbstractAccessLogger
:param access_log: custom logging object
:type access_log: aiohttp.log.server_logger
:param str access_log_format: access log format string
:param loop: Optional event loop
:param int max_line_size: Optional maximum header line size
:param int max_field_size: Optional maximum header field size
:param int max_headers: Optional maximum header size
"""
KEEPALIVE_RESCHEDULE_DELAY = 1
__slots__ = (
"_request_count",
"_keepalive",
"_manager",
"_request_handler",
"_request_factory",
"_tcp_keepalive",
"_keepalive_time",
"_keepalive_handle",
"_keepalive_timeout",
"_lingering_time",
"_messages",
"_message_tail",
"_waiter",
"_error_handler",
"_task_handler",
"_upgrade",
"_payload_parser",
"_request_parser",
"_reading_paused",
"logger",
"debug",
"access_log",
"access_logger",
"_close",
"_force_close",
"_current_request",
)
def __init__(
self,
manager: "Server",
*,
loop: asyncio.AbstractEventLoop,
keepalive_timeout: float = 75.0, # NGINX default is 75 secs
tcp_keepalive: bool = True,
logger: Logger = server_logger,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log: Logger = access_logger,
access_log_format: str = AccessLogger.LOG_FORMAT,
debug: bool = False,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
lingering_time: float = 10.0,
read_bufsize: int = 2 ** 16,
):
super().__init__(loop)
self._request_count = 0
self._keepalive = False
self._current_request = None # type: Optional[BaseRequest]
self._manager = manager # type: Optional[Server]
self._request_handler = (
manager.request_handler
) # type: Optional[_RequestHandler]
self._request_factory = (
manager.request_factory
) # type: Optional[_RequestFactory]
self._tcp_keepalive = tcp_keepalive
# placeholder to be replaced on keepalive timeout setup
self._keepalive_time = 0.0
self._keepalive_handle = None # type: Optional[asyncio.Handle]
self._keepalive_timeout = keepalive_timeout
self._lingering_time = float(lingering_time)
self._messages = deque() # type: Any # Python 3.5 has no typing.Deque
self._message_tail = b""
self._waiter = None # type: Optional[asyncio.Future[None]]
self._error_handler = None # type: Optional[asyncio.Task[None]]
self._task_handler = None # type: Optional[asyncio.Task[None]]
self._upgrade = False
self._payload_parser = None # type: Any
self._request_parser = HttpRequestParser(
self,
loop,
read_bufsize,
max_line_size=max_line_size,
max_field_size=max_field_size,
max_headers=max_headers,
payload_exception=RequestPayloadError,
) # type: Optional[HttpRequestParser]
self.logger = logger
self.debug = debug
self.access_log = access_log
if access_log:
self.access_logger = access_log_class(
access_log, access_log_format
) # type: Optional[AbstractAccessLogger]
else:
self.access_logger = None
self._close = False
self._force_close = False
def __repr__(self) -> str:
return "<{} {}>".format(
self.__class__.__name__,
"connected" if self.transport is not None else "disconnected",
)
@property
def keepalive_timeout(self) -> float:
return self._keepalive_timeout
async def shutdown(self, timeout: Optional[float] = 15.0) -> None:
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with CeilTimeout(timeout, loop=self._loop):
if self._error_handler is not None and not self._error_handler.done():
await self._error_handler
if self._current_request is not None:
self._current_request._cancel(asyncio.CancelledError())
if self._task_handler is not None and not self._task_handler.done():
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
real_transport = cast(asyncio.Transport, transport)
if self._tcp_keepalive:
tcp_keepalive(real_transport)
self._task_handler = self._loop.create_task(self.start())
assert self._manager is not None
self._manager.connection_made(self, real_transport)
def connection_lost(self, exc: Optional[BaseException]) -> None:
if self._manager is None:
return
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
self._manager = None
self._force_close = True
self._request_factory = None
self._request_handler = None
self._request_parser = None
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._current_request is not None:
if exc is None:
exc = ConnectionResetError("Connection lost")
self._current_request._cancel(exc)
if self._error_handler is not None:
self._error_handler.cancel()
if self._task_handler is not None:
self._task_handler.cancel()
if self._waiter is not None:
self._waiter.cancel()
self._task_handler = None
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
def set_parser(self, parser: Any) -> None:
# Actual type is WebReader
assert self._payload_parser is None
self._payload_parser = parser
if self._message_tail:
self._payload_parser.feed_data(self._message_tail)
self._message_tail = b""
def eof_received(self) -> None:
pass
def data_received(self, data: bytes) -> None:
if self._force_close or self._close:
return
# parse http messages
if self._payload_parser is None and not self._upgrade:
assert self._request_parser is not None
try:
messages, upgraded, tail = self._request_parser.feed_data(data)
except HttpProcessingError as exc:
# something happened during parsing
self._error_handler = self._loop.create_task(
self.handle_parse_error(
StreamWriter(self, self._loop), 400, exc, exc.message
)
)
self.close()
except Exception as exc:
# 500: internal error
self._error_handler = self._loop.create_task(
self.handle_parse_error(StreamWriter(self, self._loop), 500, exc)
)
self.close()
else:
if messages:
# sometimes the parser returns no messages
for (msg, payload) in messages:
self._request_count += 1
self._messages.append((msg, payload))
waiter = self._waiter
if waiter is not None:
if not waiter.done():
# don't set result twice
waiter.set_result(None)
self._upgrade = upgraded
if upgraded and tail:
self._message_tail = tail
# no parser, just store
elif self._payload_parser is None and self._upgrade and data:
self._message_tail += data
# feed payload
elif data:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self.close()
def keep_alive(self, val: bool) -> None:
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keepalive = val
if self._keepalive_handle:
self._keepalive_handle.cancel()
self._keepalive_handle = None
def close(self) -> None:
"""Stop accepting new pipelinig messages and close
connection when handlers done processing messages"""
self._close = True
if self._waiter:
self._waiter.cancel()
def force_close(self) -> None:
"""Force close connection"""
self._force_close = True
if self._waiter:
self._waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def log_access(
self, request: BaseRequest, response: StreamResponse, time: float
) -> None:
if self.access_logger is not None:
self.access_logger.log(request, response, self._loop.time() - time)
def log_debug(self, *args: Any, **kw: Any) -> None:
if self.debug:
self.logger.debug(*args, **kw)
def log_exception(self, *args: Any, **kw: Any) -> None:
self.logger.exception(*args, **kw)
def _process_keepalive(self) -> None:
if self._force_close or not self._keepalive:
return
next = self._keepalive_time + self._keepalive_timeout
# handler in idle state
if self._waiter:
if self._loop.time() > next:
self.force_close()
return
# not all request handlers are done,
# reschedule itself to next second
self._keepalive_handle = self._loop.call_later(
self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive
)
async def _handle_request(
self,
request: BaseRequest,
start_time: float,
) -> Tuple[StreamResponse, bool]:
assert self._request_handler is not None
try:
try:
self._current_request = request
resp = await self._request_handler(request)
finally:
self._current_request = None
except HTTPException as exc:
resp = Response(
status=exc.status, reason=exc.reason, text=exc.text, headers=exc.headers
)
reset = await self.finish_response(request, resp, start_time)
except asyncio.CancelledError:
raise
except asyncio.TimeoutError as exc:
self.log_debug("Request handler timed out.", exc_info=exc)
resp = self.handle_error(request, 504)
reset = await self.finish_response(request, resp, start_time)
except Exception as exc:
resp = self.handle_error(request, 500, exc)
reset = await self.finish_response(request, resp, start_time)
else:
reset = await self.finish_response(request, resp, start_time)
return resp, reset
async def start(self) -> None:
"""Process incoming request.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
loop = self._loop
handler = self._task_handler
assert handler is not None
manager = self._manager
assert manager is not None
keepalive_timeout = self._keepalive_timeout
resp = None
assert self._request_factory is not None
assert self._request_handler is not None
while not self._force_close:
if not self._messages:
try:
# wait for next request
self._waiter = loop.create_future()
await self._waiter
except asyncio.CancelledError:
break
finally:
self._waiter = None
message, payload = self._messages.popleft()
start = loop.time()
manager.requests_count += 1
writer = StreamWriter(self, loop)
request = self._request_factory(message, payload, self, writer, handler)
try:
# a new task is used for copy context vars (#3406)
task = self._loop.create_task(self._handle_request(request, start))
try:
resp, reset = await task
except (asyncio.CancelledError, ConnectionError):
self.log_debug("Ignored premature client disconnection")
break
# Deprecation warning (See #2415)
if getattr(resp, "__http_exception__", False):
warnings.warn(
"returning HTTPException object is deprecated "
"(#2415) and will be removed, "
"please raise the exception instead",
DeprecationWarning,
)
# Drop the processed task from asyncio.Task.all_tasks() early
del task
if reset:
self.log_debug("Ignored premature client disconnection 2")
break
# notify server about keep-alive
self._keepalive = bool(resp.keep_alive)
# check payload
if not payload.is_eof():
lingering_time = self._lingering_time
if not self._force_close and lingering_time:
self.log_debug(
"Start lingering close timer for %s sec.", lingering_time
)
now = loop.time()
end_t = now + lingering_time
with suppress(asyncio.TimeoutError, asyncio.CancelledError):
while not payload.is_eof() and now < end_t:
with CeilTimeout(end_t - now, loop=loop):
# read and ignore
await payload.readany()
now = loop.time()
# if payload still uncompleted
if not payload.is_eof() and not self._force_close:
self.log_debug("Uncompleted request.")
self.close()
payload.set_exception(PayloadAccessError())
except asyncio.CancelledError:
self.log_debug("Ignored premature client disconnection ")
break
except RuntimeError as exc:
if self.debug:
self.log_exception("Unhandled runtime exception", exc_info=exc)
self.force_close()
except Exception as exc:
self.log_exception("Unhandled exception", exc_info=exc)
self.force_close()
finally:
if self.transport is None and resp is not None:
self.log_debug("Ignored premature client disconnection.")
elif not self._force_close:
if self._keepalive and not self._close:
# start keep-alive timer
if keepalive_timeout is not None:
now = self._loop.time()
self._keepalive_time = now
if self._keepalive_handle is None:
self._keepalive_handle = loop.call_at(
now + keepalive_timeout, self._process_keepalive
)
else:
break
# remove handler, close transport if no handlers left
if not self._force_close:
self._task_handler = None
if self.transport is not None and self._error_handler is None:
self.transport.close()
async def finish_response(
self, request: BaseRequest, resp: StreamResponse, start_time: float
) -> bool:
"""
Prepare the response and write_eof, then log access. This has to
be called within the context of any exception so the access logger
can get exception information. Returns True if the client disconnects
prematurely.
"""
if self._request_parser is not None:
self._request_parser.set_upgraded(False)
self._upgrade = False
if self._message_tail:
self._request_parser.feed_data(self._message_tail)
self._message_tail = b""
try:
prepare_meth = resp.prepare
except AttributeError:
if resp is None:
raise RuntimeError("Missing return " "statement on request handler")
else:
raise RuntimeError(
"Web-handler should return "
"a response instance, "
"got {!r}".format(resp)
)
try:
await prepare_meth(request)
await resp.write_eof()
except ConnectionError:
self.log_access(request, resp, start_time)
return True
else:
self.log_access(request, resp, start_time)
return False
def handle_error(
self,
request: BaseRequest,
status: int = 500,
exc: Optional[BaseException] = None,
message: Optional[str] = None,
) -> StreamResponse:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
self.log_exception("Error handling request", exc_info=exc)
ct = "text/plain"
if status == HTTPStatus.INTERNAL_SERVER_ERROR:
title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR)
msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
tb = None
if self.debug:
with suppress(Exception):
tb = traceback.format_exc()
if "text/html" in request.headers.get("Accept", ""):
if tb:
tb = html_escape(tb)
msg = f"<h2>Traceback:</h2>\n<pre>{tb}</pre>"
message = (
"<html><head>"
"<title>{title}</title>"
"</head><body>\n<h1>{title}</h1>"
"\n{msg}\n</body></html>\n"
).format(title=title, msg=msg)
ct = "text/html"
else:
if tb:
msg = tb
message = title + "\n\n" + msg
resp = Response(status=status, text=message, content_type=ct)
resp.force_close()
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close()
return resp
async def handle_parse_error(
self,
writer: AbstractStreamWriter,
status: int,
exc: Optional[BaseException] = None,
message: Optional[str] = None,
) -> None:
task = current_task()
assert task is not None
request = BaseRequest(
ERROR, EMPTY_PAYLOAD, self, writer, task, self._loop # type: ignore
)
resp = self.handle_error(request, status, exc, message)
await resp.prepare(request)
await resp.write_eof()
if self.transport is not None:
self.transport.close()
self._error_handler = None
| 34.850299
| 88
| 0.582861
|
66b5763ae851aa07864ad8c740f3bb241f113001
| 305
|
py
|
Python
|
src/sentry_wxwork/forms.py
|
liangxg787/sentry-for-wxwork
|
192123496b1e5c4fa61de0c1c0742e786826ff5c
|
[
"MIT"
] | null | null | null |
src/sentry_wxwork/forms.py
|
liangxg787/sentry-for-wxwork
|
192123496b1e5c4fa61de0c1c0742e786826ff5c
|
[
"MIT"
] | 1
|
2022-01-28T03:46:17.000Z
|
2022-01-28T03:46:17.000Z
|
src/sentry_wxwork/forms.py
|
liangxg787/sentry-for-wxwork
|
192123496b1e5c4fa61de0c1c0742e786826ff5c
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
@Time : 2022/1/20 2:34 PM
@Author : xiaoguangliang
@File : forms.py
@Project : sentry-for-wxwork
"""
from django import forms
class WxWorkOptionsForm(forms.Form):
access_token = forms.CharField(
max_length=255,
help_text='WxWork robot access_token'
)
| 19.0625
| 45
| 0.662295
|
3631465d918cb98dc5eb3be2da7388a4b389cdff
| 1,014
|
py
|
Python
|
src/Python/Tutorial/Basic/rgbd_tum.py
|
cnheider/Open3D
|
eb0267dee7c50a824d4f94e9bd0f18dccbd3eb5d
|
[
"MIT"
] | 1
|
2019-01-26T05:41:10.000Z
|
2019-01-26T05:41:10.000Z
|
src/Python/Tutorial/Basic/rgbd_tum.py
|
fate3439/open3D
|
2698eac71993ad135acc17b5115d700735517197
|
[
"MIT"
] | null | null | null |
src/Python/Tutorial/Basic/rgbd_tum.py
|
fate3439/open3D
|
2698eac71993ad135acc17b5115d700735517197
|
[
"MIT"
] | 1
|
2021-01-31T07:02:30.000Z
|
2021-01-31T07:02:30.000Z
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
#conda install pillow matplotlib
from open3d import *
import matplotlib.pyplot as plt
if __name__ == "__main__":
print("Read TUM dataset")
color_raw = read_image("../../TestData/RGBD/other_formats/TUM_color.png")
depth_raw = read_image("../../TestData/RGBD/other_formats/TUM_depth.png")
rgbd_image = create_rgbd_image_from_tum_format(color_raw, depth_raw);
print(rgbd_image)
plt.subplot(1, 2, 1)
plt.title('TUM grayscale image')
plt.imshow(rgbd_image.color)
plt.subplot(1, 2, 2)
plt.title('TUM depth image')
plt.imshow(rgbd_image.depth)
plt.show()
pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic(
PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
draw_geometries([pcd])
| 36.214286
| 80
| 0.698225
|
06108a962903f6503895af7355f92731c41bd052
| 400
|
py
|
Python
|
plugins/addtodb.py
|
mrcentimetre/gpytranslator-bot
|
d3a77a58e3e0a8ff098f16dd74af2559c242e5f4
|
[
"MIT"
] | 14
|
2021-03-05T19:35:29.000Z
|
2021-11-08T09:52:18.000Z
|
plugins/addtodb.py
|
telegrambotdev/gpytranslator-bot
|
a762b16da45ed2e8bc27f6f36a821b796a44951a
|
[
"MIT"
] | 7
|
2021-03-07T02:32:28.000Z
|
2021-11-14T21:04:54.000Z
|
plugins/addtodb.py
|
telegrambotdev/gpytranslator-bot
|
a762b16da45ed2e8bc27f6f36a821b796a44951a
|
[
"MIT"
] | 19
|
2021-03-06T03:27:54.000Z
|
2021-12-01T15:17:43.000Z
|
from pyrogram import Client, filters
from pyrogram.types import Message
from db import functions as db
@Client.on_message(filters.private, group=-1)
async def check_chat(bot: Client, message: Message):
chat_id = message.chat.id
check_if_chat_exists = await db.chat_exists(chat_id)
if not check_if_chat_exists:
await db.add_chat(chat_id)
await db.set_lang(chat_id, "en")
| 30.769231
| 56
| 0.7475
|
3d30e892e3a81ef8299a341cee65a3491ab93f47
| 15,844
|
py
|
Python
|
hctiws.py
|
zms21/hctiws
|
a416e42a18625d2c69477932db582c661df0c954
|
[
"MIT"
] | 1
|
2018-05-07T07:57:53.000Z
|
2018-05-07T07:57:53.000Z
|
hctiws.py
|
zms21/hctiws
|
a416e42a18625d2c69477932db582c661df0c954
|
[
"MIT"
] | null | null | null |
hctiws.py
|
zms21/hctiws
|
a416e42a18625d2c69477932db582c661df0c954
|
[
"MIT"
] | null | null | null |
import sys
import os
import csv
import toml
import xlrd
from PIL import Image, ImageFont, ImageDraw
def find_file(filename, style_dir=""):
"""Finding file in possible directories"""
tmp_filename = os.path.join(INPUT_DIR, filename)
if os.path.exists(tmp_filename):
return tmp_filename
tmp_filename = os.path.join(style_dir, filename)
if os.path.exists(tmp_filename):
return tmp_filename
tmp_filename = os.path.join(os.path.dirname(
os.path.dirname(os.getcwd())), filename)
if os.path.exists(tmp_filename):
return tmp_filename
raise NameError("File not found")
def get_style(style_name):
"""Getting the style information"""
style_filename = os.path.join("config", style_name, "layout.")
if os.path.exists(style_filename + "toml"):
style_filename += "toml"
elif os.path.exists(style_filename + "ini"):
style_filename += "ini"
else:
raise NameError("Layout file not found")
return toml.load(style_filename)
def get_text_size(text, font, size, offset=0):
"""Getting the proper size in point of the text"""
tmp_size = size[1]
tmp_font = ImageFont.truetype(font, tmp_size)
# while tmp_font.getsize(text)[0] <= size[0] and \
# tmp_font.getsize(tmp_text)[1] <= size[1] / (1 - offset):
# tmp_size += 1
# tmp_font = ImageFont.truetype(font, tmp_size)
#tmp_size -= 1
#tmp_font = ImageFont.truetype(font, tmp_size)
#print (text, " ", tmp_size, " ", size)
while tmp_font.getsize(text)[0] > size[0]:
if tmp_size == 1:
break
tmp_size -= 1
tmp_font = ImageFont.truetype(font, tmp_size)
return [tmp_font, tmp_size]
def draw_text(s_img, text, font, color, pos, size, h_align="left", v_align="top", offset=0):
"""Drawing the text to the image"""
new_img = s_img
try:
font_loc = find_file(font)
except NameError:
font_loc = font
tmp_font = get_text_size(text, font_loc, size, offset)[0]
text_size = [tmp_font.getsize(text)[0],
tmp_font.getsize(text)[1] * (1 - offset)]
tmp_pos = [pos[0], pos[1] - tmp_font.getsize(text)[1] * offset]
if h_align == "center":
tmp_pos[0] += (size[0] - text_size[0]) / 2
elif h_align == "right":
tmp_pos[0] += size[0] - text_size[0]
if v_align == "center":
tmp_pos[1] += (size[1] - text_size[1]) / 2
elif v_align == "bottom":
tmp_pos[1] += size[1] - text_size[1]
text += " " #for Source Han Sans
#if all(ord(c) < 128 for c in text): #for Source Han Sans
# tmp_pos[1] -= 2
ImageDraw.Draw(new_img).text(tmp_pos, text, fill=color, font=tmp_font)
#print (tmp_size," ",text_size," ", size[0],"*",size[1])
return new_img
def generate_text(s_img, item_list, index, para_list):
"""Generating image of 'text' type"""
#print(index, ",", para_list["position"])
new_img = draw_text(s_img, item_list[index],
para_list["font"], para_list["color"], para_list["position"],
[para_list["width"], para_list["height"]],
para_list["horizontal_align"],
para_list["vertical_align"], para_list["offset"])
return [new_img, index + 1]
def generate_colortext(s_img, item_list, index, para_list):
"""Generating image of 'colortext' type"""
new_img = draw_text(s_img, item_list[index + 1],
para_list["font"], item_list[index], para_list["position"],
[para_list["width"], para_list["height"]],
para_list["horizontal_align"],
para_list["vertical_align"], para_list["offset"])
return [new_img, index + 2]
def generate_vertitext(s_img, item_list, index, para_list):
"""Generating image of 'vertitext' type"""
text = item_list[index]
text_len = len(text)
[tmp_font, tmp_size] = get_text_size(text[0], para_list["font"],
[para_list["width"], int(para_list["height"] /
text_len)],
para_list["offset"])
for i in text[1:]:
tmp_fontset = get_text_size(i, para_list["font"],
[para_list["width"], int(para_list["height"] /
text_len)],
para_list["offset"])
if tmp_fontset[1] >= tmp_size:
[tmp_font, tmp_size] = tmp_fontset
text_size = [0, 0]
v_step = []
for i in text:
single_size = [tmp_font.getsize(i)[0],
tmp_font.getsize(i)[1] * (1 - para_list["offset"])]
text_size[0] = max(text_size[0], single_size[0])
text_size[1] += single_size[1]
v_step.append(single_size[1])
if i != text[-1]:
text_size[1] += para_list["space"]
v_step[-1] += para_list["space"]
if para_list["vertical_align"] == "center":
cur_v_pos = int((para_list["height"] - text_size[1]) / 2)
elif para_list["vertical_align"] == "bottom":
cur_v_pos = para_list["height"] - text_size[1]
else:
cur_v_pos = 0
cur_v_pos += para_list["position"][1]
for i in range(text_len):
new_img = draw_text(s_img, text[i], para_list["font"], para_list["color"],
[para_list["position"][0], int(cur_v_pos)],
[para_list["width"], int(v_step[i])],
para_list["horizontal_align"], "center",
para_list["offset"])
cur_v_pos += v_step[i]
return [new_img, index + 1]
def generate_doubletext_nl(s_img, item_list, index, para_list):
"""Generating image of 'doubletext_nl' type"""
single_height = int((para_list["height"] - para_list["space"])/2)
new_img = draw_text(s_img, item_list[index],
para_list["font"], para_list["color"], para_list["position"],
[para_list["width"],
single_height], para_list["horizontal_align"],
para_list["vertical_align"], para_list["offset"])
new_img = draw_text(s_img, item_list[index + 1],
para_list["font"], para_list["color"],
[para_list["position"][0], para_list["position"]
[1] + single_height + para_list["space"]],
[para_list["width"],
single_height], para_list["horizontal_align"],
para_list["vertical_align"], para_list["offset"])
return [new_img, index + 2]
def generate_doubletext(s_img, item_list, index, para_list):
"""Generating image of 'doubletext' type"""
if item_list[index + 1] == "":
return [generate_text(s_img, item_list, index, para_list)[0], index + 2]
i_size = [para_list["width"], para_list["height"]]
[major_font, major_size] = get_text_size(item_list[index], para_list["font"],
i_size, para_list["offset"])
major_text_size = major_font.getsize(item_list[index])
[minor_font, minor_size] = get_text_size(item_list[index + 1], para_list["font"],
[i_size[0] - major_text_size[0] - para_list["space"],
i_size[1]], para_list["offset"])
while minor_size < para_list["minimum_point"] or \
major_size - minor_size > para_list["maximum_diff"]:
major_size -= 1
major_font = ImageFont.truetype(para_list["font"], major_size)
major_text_size = major_font.getsize(item_list[index])
[minor_font, minor_size] = get_text_size(item_list[index + 1], para_list["font"],
[i_size[0] - major_text_size[0] -
para_list["space"], i_size[1]],
para_list["offset"])
if major_size - minor_size <= para_list["minimum_diff"]:
minor_size = max(para_list["minimum_point"],
major_size - para_list["minimum_diff"])
minor_font = major_font = ImageFont.truetype(para_list["font"], minor_size)
minor_text_size = minor_font.getsize(item_list[index + 1])
new_img = draw_text(s_img, item_list[index],
para_list["font"], para_list["color"], para_list["position"],
[major_text_size[0], para_list["height"]], "left",
para_list["vertical_align"], para_list["offset"])
minor_pos = para_list["position"].copy()
minor_pos[0] += major_text_size[0] + para_list["space"]
minor_textsize = [minor_text_size[0], para_list["height"]].copy()
new_img2 = draw_text(new_img, item_list[index + 1],
para_list["font"], para_list["color"], minor_pos,
minor_textsize, para_list["horizontal_align_right"],
para_list["vertical_align"], para_list["offset"])
#print (major_size,", ",minor_size,", ",item_list[index])
return [new_img2, index + 2]
def generate_figure(s_img, item_list, index, para_list):
"""Generating image of 'figure' type"""
try:
if item_list[index] == "":
return [s_img, index + 1]
except KeyError:
return [s_img, index + 1]
#print(para_list, ", ", item_list[index])
new_img = s_img
try:
imgfile = find_file(item_list[index])
except NameError:
imgfile = para_list["figure_alias"][item_list[index]]
fig = Image.open(imgfile)
if para_list["keep_aspect_ratio"] == 0:
fig = fig.resize((para_list["width"], para_list["height"]))
pos = para_list["position"]
else:
fig.thumbnail((para_list["width"], para_list["height"]))
pos = para_list["position"].copy()
if para_list["horizontal_align"] == "center":
pos[0] += int((para_list["width"] - fig.size[0]) / 2)
elif para_list["horizontal_align"] == "right":
pos[0] += para_list["width"] - fig.size[0]
if para_list["vertical_align"] == "center":
pos[1] += int((para_list["height"] - fig.size[1]) / 2)
elif para_list["vertical_align"] == "bottom":
pos[1] += para_list["height"] - fig.size[1]
new_img.paste(fig, pos, fig)
return [new_img, index + 1]
# def generate_figuregroup(s_img, item_list, index, para_list):
# """Generate image of 'figuregroup' type"""
# new_img = s_img
# return [new_img, index+1]
def process_section(style, layout_type, item_list):
"""Processing a single section"""
switchfunc = {
"text": generate_text,
"colortext": generate_colortext,
"vertitext": generate_vertitext,
"doubletext_nl": generate_doubletext_nl,
"doubletext": generate_doubletext,
"figure": generate_figure
#"figuregroup": generate_figuregroup
}
if layout_type == "figure_alias":
raise NameError("Invalid type name")
layout = style[layout_type]
s_img = Image.open(find_file(layout["background"]))
index = 0
for i in layout["item"]:
try:
i_conf = layout["default_" + i["type"]].copy()
i_conf.update(i)
except KeyError:
i_conf = i.copy()
try:
f_alias = style["figure_alias"]
i_conf.update({"figure_alias": f_alias})
except KeyError:
pass
[s_img, index] = switchfunc[i["type"]](
s_img, item_list, index, i_conf)
return s_img
def process_sheet(s_sheet):
"""Processing a single sheet"""
s_index = 0
s_len = len(s_sheet)
while s_sheet[s_index][0] == "//":
s_index += 1
if s_index == s_len:
raise NameError("No valid style")
style_name = s_sheet[s_index][0]
style = get_style(style_name)
os.chdir(os.path.join("config", style_name))
s_index += 1
current_type = "default"
main_img = None
main_size = [0, 0]
for i in s_sheet[s_index:]:
if i[0] == "//":
continue
if i[0] != "":
current_type = i[0]
s_img = process_section(
style, current_type, i[1:])
s_size = list(s_img.size)
if main_img is None:
main_img = s_img
main_size = s_size
else:
new_size = [max(main_size[0], s_size[0]), main_size[1] + s_size[1]]
main_img = main_img.crop([0, 0] + new_size)
new_box = [0, main_size[1], s_size[0], new_size[1]]
main_img.paste(s_img, new_box)
main_size = new_size
os.chdir(os.path.dirname(os.path.dirname(os.getcwd())))
return main_img
def read_csv_file(csv_filename):
"""Reading CSV file"""
c_file = open(csv_filename, "r")
content = list(csv.reader(c_file))
c_file.close()
return {"csvsheet": content}
def read_excel_file(excel_filename):
"""Reading .xlsx/.xls file"""
x_book = xlrd.open_workbook(excel_filename)
content = {}
tmp_sheetname = x_book.sheet_names()
for i in range(x_book.nsheets):
tmp_content = []
tmp_sheet = x_book.sheet_by_index(i)
for j in range(tmp_sheet.nrows):
tmp_content += [tmp_sheet.row_values(j)]
content[tmp_sheetname[i]] = tmp_content
return content
def valid_filename(input_filename):
"""Making filename valid"""
return input_filename.translate(str.maketrans("*/\\<>:\"|", "--------"))
def main(argv=None):
"""Main function of HCTIWS"""
global INPUT_DIR
if argv is None:
argv = sys.argv
# display the version info
print("HCTIWS Creates the Image with Sheets")
print(" Made by ZMSOFT")
print("version 2.67-3\n")
# get input filename
if len(argv) == 1:
print("Usage: hctiws [INPUT_FILE [OUTPUT_DIRECTORY]]\n")
input_filename = input("Input file (.csv, .xlsx, .xls): ")
INPUT_DIR = os.path.dirname(input_filename)
elif len(argv) == 2:
input_filename = argv[1]
INPUT_DIR = os.path.dirname(input_filename)
else:
input_filename = argv[1]
INPUT_DIR = argv[2]
# open worksheet/sheet file
if input_filename[-4:] == ".csv":
content = read_csv_file(input_filename)
elif input_filename[-5:] == ".xlsx" or input_filename[-4:] == ".xls":
content = read_excel_file(input_filename)
# process and save the result of each sheet
for i in content:
tmp_img = process_sheet(content[i])
# tmp_img.show() # show the image before saving for debug
tmp_name = os.path.join(
INPUT_DIR, valid_filename(
os.path.basename(input_filename)).
rsplit(".", 1)[0] + "_" + valid_filename(i))
if os.path.exists(tmp_name + ".png"):
for j in range(1, 100):
if not os.path.exists(tmp_name + "-" + str(j) + ".png"):
tmp_name += "-" + str(j) + ".png"
break
else:
raise NameError("Too many duplicated file names")
else:
tmp_name += ".png"
tmp_img.save(tmp_name)
print(tmp_name, "DONE")
return 0
INPUT_DIR = ""
if __name__ == "__main__":
sys.exit(main())
| 40.940568
| 99
| 0.558129
|
365176a746d2ab43a529b5ae19852239f2c1e03c
| 7,568
|
py
|
Python
|
src/models/gru.py
|
geektoni/concept-tagging-with-neural-networks
|
6b2d3360cb21348b83063eac35acb8a2af95ed75
|
[
"Apache-2.0"
] | 7
|
2019-04-21T05:45:05.000Z
|
2021-12-14T11:42:22.000Z
|
src/models/gru.py
|
geektoni/concept-tagging-with-neural-networks
|
6b2d3360cb21348b83063eac35acb8a2af95ed75
|
[
"Apache-2.0"
] | null | null | null |
src/models/gru.py
|
geektoni/concept-tagging-with-neural-networks
|
6b2d3360cb21348b83063eac35acb8a2af95ed75
|
[
"Apache-2.0"
] | 6
|
2019-03-04T12:53:34.000Z
|
2021-04-27T09:18:24.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import data_manager
class GRU(nn.Module):
def __init__(self, device, w2v_weights, hidden_dim, tagset_size, drop_rate, bidirectional=False,
freeze=True, embedding_norm=10., c2v_weights=None, pad_word_length=16):
"""
:param device: Device to which to map tensors (GPU or CPU).
:param w2v_weights: Matrix of w2v w2v_weights, ith row contains the embedding for the word mapped to the ith index, the
last row should correspond to the padding token, <padding>.
:param hidden_dim Size of the hidden dimension of the recurrent layer.
:param tagset_size: Number of possible classes, this will be the dimension of the output vector.
:param drop_rate: Drop rate for regularization.
:param bidirectional: If the recurrent should be bidirectional.
:param freeze: If the embedding parameters should be frozen or trained during training.
:param embedding_norm: Max norm of the embeddings.
:param c2v_weights: Matrix of w2v c2v_weights, ith row contains the embedding for the char mapped to the ith index, the
last row should correspond to the padding character, by passing this the nn will use a convolutional network
on character representations add that the obtained feature vector to the embedding vector of the token.
:param pad_word_length: Length to which each word is padded to, only used if c2v_weights has been passed and
the network is going to use char representations, it is needed for the size of the maxpooling window.
"""
super(GRU, self).__init__()
self.device = device
self.hidden_dim = hidden_dim
self.tagset_size = tagset_size
self.embedding_dim = w2v_weights.shape[1]
self.w2v_weights = w2v_weights
self.c2v_weights = c2v_weights
self.bidirectional = bidirectional
self.pad_word_length = pad_word_length
self.bidirectional = bidirectional
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(w2v_weights), freeze=freeze)
self.embedding.max_norm = embedding_norm
self.drop_rate = drop_rate
self.drop = nn.Dropout(self.drop_rate)
# The recurrent layer takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.recurrent = nn.GRU(self.embedding_dim, self.hidden_dim // (1 if not self.bidirectional else 2),
batch_first=True, bidirectional=self.bidirectional)
self.hidden2tag = nn.Sequential(
nn.BatchNorm2d(1),
nn.Dropout(self.drop_rate),
nn.Linear(self.hidden_dim, self.tagset_size),
nn.ReLU(inplace=True)
)
# setup convolution on characters if c2v_weights are passed
if self.c2v_weights is not None:
self.char_embedding_dim = c2v_weights.shape[1]
self.char_embedding = nn.Embedding.from_pretrained(torch.FloatTensor(c2v_weights), freeze=freeze)
self.char_embedding.max_norm = embedding_norm
self.feats = 20 # for the output channels of the conv layers
self.recurrent = nn.GRU(self.embedding_dim + 50,
self.hidden_dim // (1 if not self.bidirectional else 2),
batch_first=True, bidirectional=self.bidirectional)
# conv layers for single character, pairs of characters, 3x characters
self.ngram1 = nn.Sequential(
nn.Conv2d(1, self.feats * 1, kernel_size=(1, self.char_embedding_dim),
stride=(1, self.char_embedding_dim),
padding=0),
nn.Dropout2d(p=self.drop_rate),
nn.MaxPool2d(kernel_size=(self.pad_word_length, 1)),
nn.Tanh(),
)
self.ngram2 = nn.Sequential(
nn.Conv2d(1, self.feats * 2, kernel_size=(2, self.char_embedding_dim),
stride=(1, self.char_embedding_dim),
padding=0),
nn.Dropout2d(p=self.drop_rate),
nn.MaxPool2d(kernel_size=(self.pad_word_length - 1, 1)),
nn.Tanh(),
)
self.ngram3 = nn.Sequential(
nn.Conv2d(1, self.feats * 3, kernel_size=(3, self.char_embedding_dim),
stride=(1, self.char_embedding_dim),
padding=0),
nn.Dropout2d(p=self.drop_rate),
nn.MaxPool2d(kernel_size=(self.pad_word_length - 2, 1)),
nn.Tanh(),
)
# seq layers to elaborate on the output of conv layers
self.fc1 = nn.Sequential(
nn.Linear(self.feats, 10),
)
self.fc2 = nn.Sequential(
nn.Linear(self.feats * 2, 20),
)
self.fc3 = nn.Sequential(
nn.Linear(self.feats * 3, 20),
)
def init_hidden(self, batch_size):
"""
Inits the hidden state of the recurrent layer.
:param batch_size
:return: Initialized hidden state of the recurrent encoder.
"""
if self.bidirectional:
state = torch.zeros(self.recurrent.num_layers * 2, batch_size, self.hidden_dim // 2).to(self.device)
else:
state = torch.zeros(self.recurrent.num_layers, batch_size, self.hidden_dim).to(self.device)
return state
def forward(self, batch):
"""
Forward pass given data.
:param batch: List of samples containing data as transformed by the init transformer of this class.
:return: A (batch of) vectors of length equal to tagset, scoring each possible class for each word in a sentence,
for all sentences; a tensor containing the true label for each word and a tensor containing the lengths
of the sequences in descending order.
"""
hidden = self.init_hidden(len(batch))
# pack sentences and pass through rnn
data, labels, char_data = data_manager.batch_sequence(batch, self.device)
data = self.embedding(data)
data = self.drop(data)
if self.c2v_weights is not None:
batched_conv = []
char_data = self.char_embedding(char_data)
char_data = self.drop(char_data)
num_words = char_data.size()[2]
for i in range(num_words):
# get word for each batch, then convolute on the ith word of each batch and concatenate
c = char_data[:, 0, i, :, :].unsqueeze(1)
ngram1 = self.ngram1(c).view(char_data.size()[0], 1, 1, -1)
ngram2 = self.ngram2(c).view(char_data.size()[0], 1, 1, -1)
ngram3 = self.ngram3(c).view(char_data.size()[0], 1, 1, -1)
ngram1 = self.fc1(ngram1)
ngram2 = self.fc2(ngram2)
ngram3 = self.fc3(ngram3)
batched_conv.append(torch.cat([ngram1, ngram2, ngram3], dim=3))
batched_conv = torch.cat(batched_conv, dim=1).squeeze(2)
data = torch.cat([data, batched_conv], dim=2)
rec_out, hidden = self.recurrent(data, hidden)
# send output to fc layer(s)
tag_space = self.hidden2tag(rec_out.unsqueeze(1).contiguous())
tag_scores = F.log_softmax(tag_space, dim=3)
return tag_scores.view(-1, self.tagset_size), labels.view(-1)
| 47.898734
| 127
| 0.614693
|
0cb0e57fb70a629d8ca57c63a409172ee21bbd64
| 10,743
|
py
|
Python
|
lib/model/config.py
|
zmskye/pytorch-faster-rcnn
|
bb66361d8f922b456261014e2b13c29cddd5ebd4
|
[
"MIT"
] | null | null | null |
lib/model/config.py
|
zmskye/pytorch-faster-rcnn
|
bb66361d8f922b456261014e2b13c29cddd5ebd4
|
[
"MIT"
] | null | null | null |
lib/model/config.py
|
zmskye/pytorch-faster-rcnn
|
bb66361d8f922b456261014e2b13c29cddd5ebd4
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default pooling mode
__C.POOLING_MODE = 'align_2'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8,16,32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5,1,2]
# Number of filters for the RPN layer
__C.RPN_CHANNELS = 512
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 28.049608
| 91
| 0.725403
|
05d7599f192967af4d5d9833c5eef9c41a523860
| 7,066
|
py
|
Python
|
conf.py
|
brianjo/pygallery
|
d256c59b7ff7f21a5d484c80e3db159d82f94239
|
[
"BSD-3-Clause"
] | 1
|
2019-07-24T18:19:42.000Z
|
2019-07-24T18:19:42.000Z
|
conf.py
|
brianjo/pygallery
|
d256c59b7ff7f21a5d484c80e3db159d82f94239
|
[
"BSD-3-Clause"
] | 4
|
2020-12-02T18:36:32.000Z
|
2022-03-11T23:34:54.000Z
|
conf.py
|
brianjo/pygallery
|
d256c59b7ff7f21a5d484c80e3db159d82f94239
|
[
"BSD-3-Clause"
] | 13
|
2019-06-22T19:27:40.000Z
|
2021-11-09T07:30:46.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch Tutorials documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 8 22:38:10 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
import pytorch_sphinx_theme
import torch
import glob
import shutil
from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective
try:
import torchvision
except ImportError:
import warnings
warnings.warn('unable to load "torchvision" package')
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery']
# -- Sphinx-gallery configuration --------------------------------------------
sphinx_gallery_conf = {
'examples_dirs': ['beginner_source'],
'gallery_dirs': ['beginner'],
'filename_pattern': 'tutorial.py',
'backreferences_dir': False
}
for i in range(len(sphinx_gallery_conf['examples_dirs'])):
gallery_dir = sphinx_gallery_conf['gallery_dirs'][i]
source_dir = sphinx_gallery_conf['examples_dirs'][i]
# Create gallery dirs if it doesn't exist
try:
os.mkdir(gallery_dir)
except OSError:
pass
# Copy rst files from source dir to gallery dir
for f in glob.glob(os.path.join(source_dir, '*.rst')):
shutil.copy(f, gallery_dir)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyTorch Tutorials'
copyright = '2017, PyTorch'
author = 'PyTorch contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = torch.__version__
# The full version, including alpha/beta/rc tags.
release = torch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
exclude_patterns += sphinx_gallery_conf['examples_dirs']
exclude_patterns += ['*/index.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# # Theme options are theme-specific and customize the look and feel of a theme
# # further. For a list of options available for each theme, see the
# # documentation.
# #
# html_theme_options = {
# 'page_width': '1000px',
# 'fixed_sidebar': True,
# 'code_font_size': '0.87em',
# 'sidebar_includehidden': True
# }
# # Add any paths that contain custom static files (such as style sheets) here,
# # relative to this directory. They are copied after the builtin static files,
# # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# # Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# 'index': ['sidebarlogo.html', 'globaltoc.html', 'searchbox.html', 'sourcelink.html'],
# '**': ['sidebarlogo.html', 'globaltoc.html', 'searchbox.html', 'sourcelink.html']
# }
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_logo = '_static/img/pytorch-logo-dark.svg'
html_theme_options = {
'pytorch_project': 'tutorials',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchTutorialsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyTorchTutorials.tex', 'PyTorch Tutorials',
'Sasank, PyTorch contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pytorchtutorials', 'PyTorch Tutorials',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyTorchTutorials', 'PyTorch Tutorials',
author, 'PyTorchTutorials', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
# Custom CSS
# app.add_stylesheet('css/pytorch_theme.css')
# app.add_stylesheet('https://fonts.googleapis.com/css?family=Lato')
# Custom directives
app.add_directive('includenodoc', IncludeDirective)
app.add_directive('galleryitem', GalleryItemDirective)
app.add_directive('customgalleryitem', CustomGalleryItemDirective)
| 30.721739
| 96
| 0.689499
|
d415e8992aabb799a1cd7639a7e5f3ccddde25cb
| 29,705
|
py
|
Python
|
meerk40t/gui/scenewidgets/guidewidget.py
|
joerlane/meerk40t
|
a75d78848ff1682640e112111fb6ac4e23e08616
|
[
"MIT"
] | null | null | null |
meerk40t/gui/scenewidgets/guidewidget.py
|
joerlane/meerk40t
|
a75d78848ff1682640e112111fb6ac4e23e08616
|
[
"MIT"
] | null | null | null |
meerk40t/gui/scenewidgets/guidewidget.py
|
joerlane/meerk40t
|
a75d78848ff1682640e112111fb6ac4e23e08616
|
[
"MIT"
] | null | null | null |
import math
import wx
from meerk40t.core.units import Length
from meerk40t.gui.laserrender import DRAW_MODE_GUIDES
from meerk40t.gui.scene.sceneconst import HITCHAIN_HIT, RESPONSE_CHAIN, RESPONSE_CONSUME
from meerk40t.gui.scene.widget import Widget
class GuideWidget(Widget):
"""
Interface Widget
Guidelines drawn at along the scene edges.
"""
def __init__(self, scene):
Widget.__init__(self, scene, all=False)
self.scene.context.setting(bool, "show_negative_guide", True)
self.edge_gap = 5
self.line_length = 20
self.calc_area(True, 0, 0)
self.scaled_conversion_x = 0
self.scaled_conversion_y = 0
self.units = None
self.options = []
self.pen_guide1 = wx.Pen()
self.pen_guide2 = wx.Pen()
self.pen_magnets = wx.Pen()
self.color_guide1 = None
self.color_guide2 = None
self.set_colors()
def set_colors(self):
self.color_guide1 = self.scene.colors.color_guide
self.color_guide2 = self.scene.colors.color_guide2
self.pen_guide1.SetColour(self.color_guide1)
self.pen_guide2.SetColour(self.color_guide2)
self.pen_magnets.SetColour(self.scene.colors.color_magnetline)
self.pen_magnets.SetWidth(2)
def hit(self):
return HITCHAIN_HIT
def calc_area(self, lower, w, h):
if lower:
self.scale_x_lower = 0
self.scale_x_upper = self.edge_gap + self.line_length
self.scale_y_lower = 0
self.scale_y_upper = self.edge_gap + self.line_length
# Set secondary to primary initially
self.scale_x2_lower = self.scale_x_lower
self.scale_x2_upper = self.scale_x_upper
self.scale_y2_lower = self.scale_y_lower
self.scale_y2_upper = self.scale_y_upper
else:
self.scale_x2_lower = w - self.edge_gap - self.line_length
self.scale_x2_upper = w
self.scale_y2_lower = h - self.edge_gap - self.line_length
self.scale_y2_upper = h
def contains(self, x, y=None):
# Slightly more complex than usual due to left, top area
value = False
if y is None:
y = x.y
x = x.x
if (
self.scale_x_lower <= x <= self.scale_x_upper
or self.scale_y_lower <= y <= self.scale_y_upper
or self.scale_x2_lower <= x <= self.scale_x2_upper
or self.scale_y2_lower <= y <= self.scale_y2_upper
):
value = True
return value
def set_auto_tick(self, value):
if value == 0:
self.scene.auto_tick = True
else:
self.scene.auto_tick = False
self.scene.tick_distance = value
self.scene._signal_widget(self.scene.widget_root, "grid")
self.scene.request_refresh()
def change_tick_event(self, idx):
value = self.options[idx]
self.set_auto_tick(value)
def attract_event(self, value):
self.scene.magnet_attraction = value
def affect_event(self, value):
if value == 0:
self.scene.magnet_attract_x = not self.scene.magnet_attract_x
elif value == 1:
self.scene.magnet_attract_y = not self.scene.magnet_attract_y
elif value == 2:
self.scene.magnet_attract_c = not self.scene.magnet_attract_c
def toggle_circles(self):
# toggle circular grid
self.scene.context("scene grid circular\n")
def toggle_rect(self):
# toggle primary grid
self.scene.context("scene grid primary\n")
def toggle_secondary(self):
# toggle secondary grid
self.scene.context("scene grid secondary\n")
def fill_magnets(self):
# Let's set the full grid
p = self.scene.context
if self.scene.draw_grid_primary:
tlen = float(
Length(
"{value}{units}".format(
value=self.scene.tick_distance, units=p.units_name
)
)
)
x = 0
while x <= p.device.unit_width:
self.scene.toggle_x_magnet(x)
x += tlen
y = 0
while y <= p.device.unit_height:
self.scene.toggle_y_magnet(y)
y += tlen
elif self.scene.draw_grid_secondary:
# Placeholder for a use case, as you can define them manually...
pass
def event(self, window_pos=None, space_pos=None, event_type=None):
"""
Capture and deal with the double click event.
Doubleclick in the grid loads a menu to remove the background.
"""
_ = self.scene.context._
def add_scale_options(self, menu):
kind = wx.ITEM_CHECK if self.scene.auto_tick else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Auto-Scale"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.set_auto_tick(0),
id=item.GetId(),
)
menu.AppendSeparator()
units = self.scene.context.units_name
if units == "mm":
self.options = [1, 5, 10, 25]
elif units == "cm":
self.options = [0.1, 0.5, 1, 5]
elif units == "inch":
self.options = [0.1, 0.25, 0.5, 1]
else: # mils
self.options = [100, 250, 500, 1000]
# Not elegant but if used with a loop lambda would take the last value of the loop for all...
kind = (
wx.ITEM_CHECK
if self.scene.tick_distance == self.options[0]
and not self.scene.auto_tick
else wx.ITEM_NORMAL
)
item = menu.Append(
wx.ID_ANY,
"{amount:.2f}{units}".format(amount=self.options[0], units=units),
"",
kind,
)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.change_tick_event(0),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK
if self.scene.tick_distance == self.options[1]
and not self.scene.auto_tick
else wx.ITEM_NORMAL
)
item = menu.Append(
wx.ID_ANY,
"{amount:.2f}{units}".format(amount=self.options[1], units=units),
"",
kind,
)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.change_tick_event(1),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK
if self.scene.tick_distance == self.options[2]
and not self.scene.auto_tick
else wx.ITEM_NORMAL
)
item = menu.Append(
wx.ID_ANY,
"{amount:.2f}{units}".format(amount=self.options[2], units=units),
"",
kind,
)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.change_tick_event(2),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK
if self.scene.tick_distance == self.options[3]
and not self.scene.auto_tick
else wx.ITEM_NORMAL
)
item = menu.Append(
wx.ID_ANY,
"{amount:.2f}{units}".format(amount=self.options[3], units=units),
"",
kind,
)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.change_tick_event(3),
id=item.GetId(),
)
def add_attraction_strength_menu(self, menu):
item = menu.Append(
wx.ID_ANY, _("Attraction strength..."), "", wx.ITEM_NORMAL
)
menu.Enable(item.GetId(), False)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 0 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Off"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(0),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 1 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Weak"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(1),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 2 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Normal"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(2),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 3 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Strong"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(3),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 4 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Very Strong"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(4),
id=item.GetId(),
)
kind = (
wx.ITEM_CHECK if self.scene.magnet_attraction == 5 else wx.ITEM_NORMAL
)
item = menu.Append(wx.ID_ANY, _("Enormous"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.attract_event(5),
id=item.GetId(),
)
def add_attraction_options_menu(self, menu):
item = menu.Append(wx.ID_ANY, _("Attraction areas..."), "", wx.ITEM_NORMAL)
menu.Enable(item.GetId(), False)
kind = wx.ITEM_CHECK if self.scene.magnet_attract_x else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Left/Right Side"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.affect_event(0),
id=item.GetId(),
)
kind = wx.ITEM_CHECK if self.scene.magnet_attract_y else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Top/Bottom Side"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.affect_event(1),
id=item.GetId(),
)
kind = wx.ITEM_CHECK if self.scene.magnet_attract_c else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Center"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.affect_event(2),
id=item.GetId(),
)
def add_grid_draw_options(self, menu):
menu.AppendSeparator()
kind = wx.ITEM_CHECK if self.scene.draw_grid_primary else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Draw primary grid"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.toggle_rect(),
id=item.GetId(),
)
kind = wx.ITEM_CHECK if self.scene.draw_grid_secondary else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Draw secondary grid"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.toggle_secondary(),
id=item.GetId(),
)
# DISABLE, AS NOT YET READY
# menu.Enable(item.GetId(), False)
kind = wx.ITEM_CHECK if self.scene.draw_grid_circular else wx.ITEM_NORMAL
item = menu.Append(wx.ID_ANY, _("Draw circular grid"), "", kind)
if kind == wx.ITEM_CHECK:
menu.Check(item.GetId(), True)
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.toggle_circles(),
id=item.GetId(),
)
def process_doubleclick(self):
# Primary Guide
secondary = False
is_y = self.scale_x_lower <= space_pos[0] <= self.scale_x_upper
if not is_y:
if self.scene.draw_grid_secondary:
is_y = self.scale_x2_lower <= space_pos[0] <= self.scale_x2_upper
secondary = True
is_x = self.scale_y_lower <= space_pos[1] <= self.scale_y_upper
if not is_x:
if self.scene.draw_grid_secondary:
is_x = self.scale_y2_lower <= space_pos[1] <= self.scale_y2_upper
secondary = True
# print ("is_x=%s, is_y=%s, secondary=%s" % (is_x, is_y, secondary))
if not (is_x or is_y):
return
value = 0
p = self.scene.context
if self.scaled_conversion_x == 0:
return
p = self.scene.context
sx = 0
sy = 0
tick_distance_x = self.scene.tick_distance
tick_distance_y = self.scene.tick_distance
if secondary:
if not self.scene.grid_secondary_cx is None:
sx = self.scene.grid_secondary_cx
if not self.scene.grid_secondary_cy is None:
sy = self.scene.grid_secondary_cy
if not self.scene.grid_secondary_scale_x is None:
tick_distance_x *= self.scene.grid_secondary_scale_x
if not self.scene.grid_secondary_scale_y is None:
tick_distance_y *= self.scene.grid_secondary_scale_y
ox, oy = self.scene.convert_scene_to_window([sx, sy])
# print(
# "Device-origin=%.1f, %.1f \n ox, oy=%.1f, %.1f"
# % (p.device.origin_x, p.device.origin_y, ox, oy)
# )
mark_point_x = (window_pos[0] - ox) / self.scaled_conversion_x
mark_point_y = (window_pos[1] - oy) / self.scaled_conversion_y
# print(
# "OX=%.1f, Oy=%.1f, Mark before x=%.1f, y=%.1f"
# % (
# ox / self.scaled_conversion_x,
# oy / self.scaled_conversion_y,
# mark_point_x,
# mark_point_y,
# )
# )
# Make positions stick on ticks (or exactly inbetween)
mark_point_x = (
round(2.0 * mark_point_x / tick_distance_x) * 0.5 * tick_distance_x
)
mark_point_y = (
round(2.0 * mark_point_y / tick_distance_y) * 0.5 * tick_distance_y
)
if is_x and is_y:
if self.scene.has_magnets():
self.scene.clear_magnets()
else:
self.fill_magnets()
elif is_x:
# Get the X coordinate from space_pos [0]
value = float(Length("%.1f%s" % (mark_point_x, self.units)))
self.scene.toggle_x_magnet(value)
elif is_x:
# Get the X coordinate from space_pos [0]
value = float(Length("%.1f%s" % (mark_point_x, self.units)))
self.scene.toggle_x_magnet(value)
elif is_y:
# Get the Y coordinate from space_pos [1]
value = float(Length("%.1f%s" % (mark_point_y, self.units)))
self.scene.toggle_y_magnet(value)
self.scene.request_refresh()
if event_type == "hover":
return RESPONSE_CHAIN
elif event_type == "rightdown":
menu = wx.Menu()
add_scale_options(self, menu)
menu.AppendSeparator()
if self.scene.has_magnets():
item = menu.Append(wx.ID_ANY, _("Clear all magnets"), "")
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.scene.clear_magnets(),
id=item.GetId(),
)
menu.AppendSeparator()
add_attraction_strength_menu(self, menu)
menu.AppendSeparator()
add_attraction_options_menu(self, menu)
else:
item = menu.Append(wx.ID_ANY, _("Create magnets along grid"), "")
self.scene.context.gui.Bind(
wx.EVT_MENU,
lambda e: self.fill_magnets(),
id=item.GetId(),
)
add_grid_draw_options(self, menu)
self.scene.context.gui.PopupMenu(menu)
menu.Destroy()
self.scene.request_refresh()
return RESPONSE_CONSUME
elif event_type == "doubleclick":
process_doubleclick(self)
return RESPONSE_CONSUME
else:
return RESPONSE_CHAIN
def process_draw(self, gc):
"""
Draw the guidelines
"""
if self.scene.context.draw_mode & DRAW_MODE_GUIDES != 0:
return
# print ("GuideWidget Draw")
w, h = gc.Size
self.calc_area(False, w, h)
p = self.scene.context
self.scaled_conversion_x = (
p.device.length(str(1) + p.units_name, as_float=True)
* self.scene.widget_root.scene_widget.matrix.value_scale_x()
)
self.scaled_conversion_y = (
p.device.length(str(1) + p.units_name, as_float=True)
* self.scene.widget_root.scene_widget.matrix.value_scale_y()
)
if self.scaled_conversion_x == 0:
return
# Establish the delta for about 15 ticks
# print ("set scene_tick_distance to %f" % delta)
points_x_primary = self.scene.tick_distance * self.scaled_conversion_x
points_y_primary = self.scene.tick_distance * self.scaled_conversion_y
if self.scene.grid_secondary_scale_x is None:
factor_x_secondary = 1.0
else:
factor_x_secondary = self.scene.grid_secondary_scale_x
if self.scene.grid_secondary_scale_y is None:
factor_y_secondary = 1.0
else:
factor_y_secondary = self.scene.grid_secondary_scale_y
points_x_secondary = factor_x_secondary * points_x_primary
points_y_secondary = factor_y_secondary * points_y_primary
self.units = p.units_name
# Calculate center position for primary grid
x = p.device.unit_width * p.device.show_origin_x
y = p.device.unit_height * p.device.show_origin_y
sx_primary, sy_primary = self.scene.convert_scene_to_window([x, y])
# ... and now for secondary
if not self.scene.grid_secondary_cx is None:
x = self.scene.grid_secondary_cx
relative_x = self.scene.grid_secondary_cx / p.device.unit_width
else:
relative_x = p.device.show_origin_x
if not self.scene.grid_secondary_cy is None:
y = self.scene.grid_secondary_cy
relative_y = self.scene.grid_secondary_cy / p.device.unit_height
else:
relative_y = p.device.show_origin_y
sx_secondary, sy_secondary = self.scene.convert_scene_to_window([x, y])
# Do we need to show the guide regardless of the 'show negative guide' setting?
show_x_primary = p.device.show_origin_x not in (0.0, 1.0)
show_y_primary = p.device.show_origin_y not in (0.0, 1.0)
show_x_secondary = relative_x not in (0.0, 1.0)
show_y_secondary = relative_y not in (0.0, 1.0)
if points_x_primary == 0:
return
offset_x_primary = float(sx_primary) % points_x_primary
offset_y_primary = float(sy_primary) % points_y_primary
offset_x_secondary = float(sx_secondary) % points_x_secondary
offset_y_secondary = float(sy_secondary) % points_y_secondary
# print ("The intended scale is in {units} with a tick every {delta} {units}]".format(delta=self.scene.tick_distance, units=self.units))
# print("Ticks start for x at %.1f, for y at %.1f with a step-size of %.1f, %.1f" % (offset_x_primary, offset_y_primary, points_x_primary, points_y_primary))
# print("Start-location is at %.1f, %.1f" % (sx_primary, sy_primary))
length = self.line_length
edge_gap = self.edge_gap
gc.SetPen(self.pen_guide1)
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, self.color_guide1)
gc.DrawText(self.units, edge_gap, edge_gap)
(t_width, t_height) = gc.GetTextExtent("0")
starts = []
ends = []
x = offset_x_primary
while x < w:
if x >= 45:
mark_point = (x - sx_primary) / self.scaled_conversion_x
if round(float(mark_point) * 1000) == 0:
mark_point = 0.0 # prevents -0
if p.device.flip_x:
mark_point *= -1
if mark_point >= 0 or p.show_negative_guide or show_x_primary:
starts.append((x, edge_gap))
ends.append((x, length + edge_gap))
starts.append((x, h - edge_gap))
ends.append((x, h - length - edge_gap))
# Show half distance as well if there's enough room
if t_height < 0.5 * points_x_primary:
starts.append((x - 0.5 * points_x_primary, edge_gap))
ends.append(
(x - 0.5 * points_x_primary, 0.25 * length + edge_gap)
)
if not self.scene.draw_grid_secondary:
starts.append((x, h - edge_gap))
ends.append((x, h - length - edge_gap))
starts.append((x - 0.5 * points_x_primary, h - edge_gap))
ends.append(
(x - 0.5 * points_x_primary, h - 0.25 * length - edge_gap)
)
gc.DrawText("%g" % mark_point, x, edge_gap, -math.tau / 4)
x += points_x_primary
y = offset_y_primary
while y < h:
if y >= 20:
mark_point = (y - sy_primary) / self.scaled_conversion_y
if round(float(mark_point) * 1000) == 0:
mark_point = 0.0 # prevents -0
if p.device.flip_y:
mark_point *= -1
if mark_point >= 0 or p.show_negative_guide or show_y_primary:
starts.append((edge_gap, y))
ends.append((length + edge_gap, y))
# if there is enough room for a mid-distance stroke...
if t_height < 0.5 * points_y_primary:
starts.append((edge_gap, y - 0.5 * points_y_primary))
ends.append(
(0.25 * length + edge_gap, y - 0.5 * points_y_primary)
)
if not self.scene.draw_grid_secondary:
starts.append((w - edge_gap, y))
ends.append((w - length - edge_gap, y))
starts.append((w - edge_gap, y - 0.5 * points_y_primary))
ends.append(
(w - 0.25 * length - edge_gap, y - 0.5 * points_y_primary)
)
# gc.DrawText("%g %s" % (mark_point + 0, p.units_name), 0, y + 0)
gc.DrawText("%g" % (mark_point + 0), edge_gap, y + 0)
y += points_y_primary
if len(starts) > 0:
gc.StrokeLineSegments(starts, ends)
# Now the guide for the secondary grid...
if self.scene.draw_grid_secondary:
gc.SetPen(self.pen_guide2)
gc.SetFont(font, self.color_guide2)
starts = []
ends = []
x = offset_x_secondary
while x < w:
if x >= 45:
mark_point = (x - sx_secondary) / (
factor_x_secondary * self.scaled_conversion_x
)
if round(float(mark_point) * 1000) == 0:
mark_point = 0.0 # prevents -0
if p.device.flip_x:
mark_point *= -1
if mark_point >= 0 or p.show_negative_guide or show_x_secondary:
starts.append((x, edge_gap))
ends.append((x, length + edge_gap))
starts.append((x, h - edge_gap))
ends.append((x, h - length - edge_gap))
# Show half distance as well if there's enough room
if t_height < 0.5 * points_x_secondary:
starts.append((x - 0.5 * points_x_secondary, h - edge_gap))
ends.append(
(
x - 0.5 * points_x_secondary,
h - 0.25 * length - edge_gap,
)
)
info = "%g" % mark_point
(t_w, t_h) = gc.GetTextExtent(info)
gc.DrawText(info, x, h - edge_gap - t_w, -math.tau / 4)
x += points_x_secondary
y = offset_y_secondary
while y < h:
if y >= 20:
mark_point = (y - sy_secondary) / (
factor_y_secondary * self.scaled_conversion_y
)
if round(float(mark_point) * 1000) == 0:
mark_point = 0.0 # prevents -0
if p.device.flip_y:
mark_point *= -1
if mark_point >= 0 or p.show_negative_guide or show_y_secondary:
starts.append((w - edge_gap, y))
ends.append((w - length - edge_gap, y))
# if there is enough room for a mid-distance stroke...
if t_height < 0.5 * points_y_secondary:
starts.append((w - edge_gap, y - 0.5 * points_y_secondary))
ends.append(
(
w - 0.25 * length - edge_gap,
y - 0.5 * points_y_secondary,
)
)
info = "%g" % (mark_point + 0)
(t_w, t_h) = gc.GetTextExtent(info)
gc.DrawText(info, w - edge_gap - t_w, y + 0)
y += points_y_secondary
gc.StrokeLineSegments(starts, ends)
starts_hi = []
ends_hi = []
for x in self.scene.magnet_x:
sx, sy = self.scene.convert_scene_to_window([x, 0])
starts_hi.append((sx, length + edge_gap))
ends_hi.append((sx, h - length - edge_gap))
for y in self.scene.magnet_y:
sx, sy = self.scene.convert_scene_to_window([0, y])
starts_hi.append((length + edge_gap, sy))
ends_hi.append((w - length - edge_gap, sy))
gc.SetPen(self.pen_magnets)
if starts_hi and ends_hi:
gc.StrokeLineSegments(starts_hi, ends_hi)
def signal(self, signal, *args, **kwargs):
"""
Process guide signal to delete the current guidelines and force them to be recalculated.
"""
if signal == "guide":
pass
elif signal == "theme":
self.set_colors()
| 40.196211
| 165
| 0.512237
|
317d22ed42be0326c7736e813f9180c3a9516f7b
| 1,224
|
py
|
Python
|
economy/commands/deposit.py
|
Ful5hii/SkellyMod
|
97eec5af646cf868107c3cdb67b271e8be65e87a
|
[
"MIT"
] | 3
|
2022-02-09T18:25:22.000Z
|
2022-02-22T14:49:22.000Z
|
economy/commands/deposit.py
|
Ful5hii/SkellyMod
|
97eec5af646cf868107c3cdb67b271e8be65e87a
|
[
"MIT"
] | 1
|
2022-02-12T12:15:37.000Z
|
2022-02-12T12:15:37.000Z
|
economy/commands/deposit.py
|
Skelly1301/BeeMod
|
97eec5af646cf868107c3cdb67b271e8be65e87a
|
[
"MIT"
] | null | null | null |
@bot.command(aliases=['dep'])
async def deposit(ctx,amount = None):
await open_account(ctx.author)
if amount == None:
em = discord.Embed(title="Please enter an amount", color=discord.Color.teal())
await ctx.reply(embed=em)
return
bal = await update_bank(ctx.author)
amount=int(amount)
if amount > bal[0]:
em = discord.Embed(title="You don't have that much money!", color=discord.Color.teal())
await ctx.reply(embed=em)
return
if amount < 0:
em = discord.Embed(title="Amount must be positive!", color=discord.Color.teal())
await ctx.reply(embed=em)
return
await update_bank(ctx.author,amount)
await update_bank(ctx.author,1*amount,"Bank")
await update_bank(ctx.author,-2*amount,"Wallet")
depositem = discord.Embed(title=f"You deposited {amount} coins!", color=discord.Color.teal())
await ctx.reply(embed=depositem)
async def update_bank(user,change = 0,mode = "Wallet"):
users = await get_bank_data()
users[str(user.id)][mode] += change
with open("bank.json", 'w') as f:
json.dump(users, f)
bal = [users[str(user.id)]["Wallet"],users[str(user.id)]["Bank"]]
return bal
| 31.384615
| 97
| 0.64134
|
71f51cf9aac99993b7ee1f72c64c1e6d88bfbc20
| 4,438
|
py
|
Python
|
scripts/dominated.py
|
snurk/meta-strains
|
0e43871afeba312bd3fc3b39936b03597a2820dc
|
[
"MIT"
] | null | null | null |
scripts/dominated.py
|
snurk/meta-strains
|
0e43871afeba312bd3fc3b39936b03597a2820dc
|
[
"MIT"
] | null | null | null |
scripts/dominated.py
|
snurk/meta-strains
|
0e43871afeba312bd3fc3b39936b03597a2820dc
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
# import seaborn as sns
import warnings
import scipy
import scipy.cluster
warnings.filterwarnings('ignore')
import sys
def p2f(x):
# percents to float
if x == '-':
return None
else:
return float(x.strip('%')) / 100
def find_margin(VAFs, sample_name=None, within_margin=0.05, eps=0.01):
print("Num of SNVs:", len(VAFs))
margin = eps
while ((0.5 - margin < VAFs) & (VAFs < 0.5 + margin)).sum() < within_margin * len(VAFs) and 0.5 > margin:
margin += eps
if margin > 0.5:
margin = 0.5
if len(VAFs) < 500:
print("Very few SNVs")
else:
print("%.2f - %.2f" % (0.5 - margin, 0.5 + margin))
print('margin = %.2f' % margin)
if 0.5 + margin >= 0.7 or len(VAFs) < 500:
color = 'red'
print('DOMINATED')
res = True
else:
color = 'blue'
print('NOT dominated')
res = False
plt.hist(VAFs, 50, alpha=0.5, color=color)
plt.xlim((0, 1))
plt.xlabel('SNV frequencies')
plt.title(sample_name)
plt.savefig("hists/%s.png" % sample_name)
return res
def filter_by_coverage(depth, vafs, bad_percentile=0.3, good_samples_percent=0.8):
q1 = depth.quantile(bad_percentile)
q2 = depth.quantile(1 - bad_percentile)
cur_n_samples = depth.shape[1]
necessary_amount = int(cur_n_samples * good_samples_percent)
ind = ((depth > q1) & (depth < q2)).sum(axis=1) >= necessary_amount
return ind
def main():
df = pd.read_csv(sys.argv[1], sep='\t')
df_samples = df.iloc[:, -1].str.split(pat=" ", expand=True)
n_samples = df_samples.shape[1]
df_samples_cov = df_samples.apply(lambda x: x.str.split(pat=":", expand=True)[1]).astype("int64")
df_samples_VAF = df_samples.apply(lambda x: x.str.split(pat=":", expand=True)[4]).applymap(p2f)
sample_names = sys.argv[2].split(',')
if not os.path.exists("hists"):
os.makedirs("hists")
dominated_samples = []
for i in range(n_samples):
print(sample_names[i])
cur_depth = df_samples_cov.iloc[:, i].copy()
q1 = cur_depth.quantile(0.3)
q2 = cur_depth.quantile(0.7)
print("Coverage median: %i" % cur_depth.median())
print("Q30: %i" % q1)
print("Q70: %i" % q2)
cur_VAFs = df_samples_VAF.iloc[:, i]
selected_VAFs = cur_VAFs[(q1 < cur_depth) & (cur_depth < q2)]
selected_VAFs = selected_VAFs[(selected_VAFs > 0.02) & (selected_VAFs < 0.98)]
plt.figure(i)
if find_margin(selected_VAFs, sample_name=sample_names[i]):
dominated_samples.append(i)
print()
df_dominated_cov = df_samples_cov.iloc[:, dominated_samples]
df_dominated_VAF = df_samples_VAF.iloc[:, dominated_samples]
selected_SNVs = filter_by_coverage(df_dominated_cov, df_dominated_VAF)
# clustering of genotypes in dominated samples
genotypes = df_dominated_VAF[selected_SNVs] > 0.5
# genotypes = genotypes[
# (genotypes.sum(axis=1) > 0) & (genotypes.sum(axis=1) < len(dominated_samples))] # remove non-informative sites
genotypes = genotypes.T
# percent of non-matching SNVs
dists = scipy.spatial.distance.pdist(genotypes, 'hamming')
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
Z = scipy.cluster.hierarchy.linkage(dists, 'complete')
clusters = {}
for i in range(len(dominated_samples)):
clusters[i] = {sample_names[dominated_samples[i]]}
last_cluster = len(dominated_samples) - 1
for i in range(len(Z)):
if Z[i, 2] > 0.01:
break
u, v = Z[i, 0], Z[i, 1]
last_cluster += 1
clusters[last_cluster] = clusters[u] | clusters[v] # union
clusters.pop(u)
clusters.pop(v)
print("Clustering results:")
for clustered_samples in clusters.values():
print(", ".join(clustered_samples))
# g = sns.clustermap(genotypes.T,
# xticklabels = False,
# yticklabels=[sample_names[i] for i in dominated_samples])
# plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0)
# g.cax.set_visible(False)
# plt.suptitle('%i SNVs' % len(genotypes))
# plt.savefig("dominated_genotypes.png")
if __name__ == "__main__":
main()
| 28.632258
| 120
| 0.619198
|
92fdc1fdaf3e16888206dc2bf2725409b30d5d9c
| 9,786
|
py
|
Python
|
python/ccxt/foxbit.py
|
RusEu/ccxt
|
d6d2b3e2f54a59d102102ee2858eca4d6702fecc
|
[
"MIT"
] | 3
|
2021-06-29T16:27:19.000Z
|
2021-07-18T08:36:07.000Z
|
python/ccxt/foxbit.py
|
Bytedex/ccxt
|
3863b5e1d6c77d719ac102b0243964c4946e7abb
|
[
"MIT"
] | null | null | null |
python/ccxt/foxbit.py
|
Bytedex/ccxt
|
3863b5e1d6c77d719ac102b0243964c4946e7abb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.precise import Precise
class foxbit(Exchange):
def describe(self):
return self.deep_extend(super(foxbit, self).describe(), {
'id': 'foxbit',
'name': 'FoxBit',
'countries': ['BR'],
'has': {
'cancelOrder': True,
'CORS': False,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
},
'rateLimit': 1000,
'version': 'v1',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87443320-01c0d080-c5fe-11ea-92e2-4ef56d32b026.jpg',
'api': {
'public': 'https://api.blinktrade.com/api',
'private': 'https://api.blinktrade.com/tapi',
},
'www': 'https://foxbit.com.br/exchange',
'doc': 'https://foxbit.com.br/api/',
},
'comment': 'Blinktrade API',
'api': {
'public': {
'get': [
'{currency}/ticker', # ?crypto_currency=BTC
'{currency}/orderbook', # ?crypto_currency=BTC
'{currency}/trades', # ?crypto_currency=BTC&since=<TIMESTAMP>&limit=<NUMBER>
],
},
'private': {
'post': [
'D', # order
'F', # cancel order
'U2', # balance
'U4', # my orders
'U6', # withdraw
'U18', # deposit
'U24', # confirm withdrawal
'U26', # list withdrawals
'U30', # list deposits
'U34', # ledger
'U70', # cancel withdrawal
],
},
},
'markets': {
'BTC/VEF': {'id': 'BTCVEF', 'symbol': 'BTC/VEF', 'base': 'BTC', 'quote': 'VEF', 'brokerId': 1, 'broker': 'SurBitcoin'},
'BTC/VND': {'id': 'BTCVND', 'symbol': 'BTC/VND', 'base': 'BTC', 'quote': 'VND', 'brokerId': 3, 'broker': 'VBTC'},
'BTC/BRL': {'id': 'BTCBRL', 'symbol': 'BTC/BRL', 'base': 'BTC', 'quote': 'BRL', 'brokerId': 4, 'broker': 'FoxBit'},
'BTC/PKR': {'id': 'BTCPKR', 'symbol': 'BTC/PKR', 'base': 'BTC', 'quote': 'PKR', 'brokerId': 8, 'broker': 'UrduBit'},
'BTC/CLP': {'id': 'BTCCLP', 'symbol': 'BTC/CLP', 'base': 'BTC', 'quote': 'CLP', 'brokerId': 9, 'broker': 'ChileBit'},
},
'options': {
'brokerId': '4', # https://blinktrade.com/docs/#brokers
},
})
def fetch_balance(self, params={}):
self.load_markets()
request = {
'BalanceReqID': self.nonce(),
}
response = self.privatePostU2(self.extend(request, params))
balances = self.safe_value(response['Responses'], self.options['brokerId'])
result = {'info': response}
if balances is not None:
currencyIds = list(self.currencies_by_id.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
# we only set the balance for the currency if that currency is present in response
# otherwise we will lose the info if the currency balance has been funded or traded or not
if currencyId in balances:
account = self.account()
used = self.safe_string(balances, currencyId + '_locked')
used = Precise.string_div(used, '1e8')
total = self.safe_string(balances, currencyId)
total = Precise.string_div(total, '1e8')
account['used'] = used
account['total'] = total
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['quote'],
'crypto_currency': market['base'],
}
response = self.publicGetCurrencyOrderbook(self.extend(request, params))
return self.parse_order_book(response)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['quote'],
'crypto_currency': market['base'],
}
ticker = self.publicGetCurrencyTicker(self.extend(request, params))
timestamp = self.milliseconds()
lowercaseQuote = market['quote'].lower()
quoteVolume = 'vol_' + lowercaseQuote
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'vol'),
'quoteVolume': self.safe_number(ticker, quoteVolume),
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_string(trade, 'side')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currency': market['quote'],
'crypto_currency': market['base'],
}
response = self.publicGetCurrencyTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
market = self.market(symbol)
orderSide = '1' if (side == 'buy') else '2'
request = {
'ClOrdID': self.nonce(),
'Symbol': market['id'],
'Side': orderSide,
'OrdType': '2',
'Price': price,
'OrderQty': amount,
'BrokerID': market['brokerId'],
}
response = self.privatePostD(self.extend(request, params))
indexed = self.index_by(response['Responses'], 'MsgType')
execution = indexed['8']
return {
'info': response,
'id': execution['OrderID'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.privatePostF(self.extend({
'ClOrdID': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
request = self.extend({'MsgType': path}, query)
body = self.json(request)
headers = {
'APIKey': self.apiKey,
'Nonce': nonce,
'Signature': self.hmac(self.encode(nonce), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'Status' in response:
if response['Status'] != 200:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 40.945607
| 135
| 0.504394
|
388c7ecc841cbfbd4dd6f29389c665b83e8c1839
| 1,600
|
py
|
Python
|
api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
|
Corey-ONeal/opentrons-app_ws-remote
|
a255b76c8a07457787d575da12b2d5bdb6220a91
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
|
Corey-ONeal/opentrons-app_ws-remote
|
a255b76c8a07457787d575da12b2d5bdb6220a91
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
|
Corey-ONeal/opentrons-app_ws-remote
|
a255b76c8a07457787d575da12b2d5bdb6220a91
|
[
"Apache-2.0"
] | null | null | null |
"""Test load pipette commands."""
from mock import AsyncMock # type: ignore[attr-defined]
from opentrons.types import MountType
from opentrons.protocol_engine.types import PipetteName
from opentrons.protocol_engine.execution import LoadedPipette
from opentrons.protocol_engine.commands import (
LoadPipetteRequest,
LoadPipetteResult,
)
def test_load_pipette_request() -> None:
"""It should have a LoadPipetteRequest model."""
request = LoadPipetteRequest(
pipetteName=PipetteName.P300_SINGLE,
mount=MountType.LEFT,
)
assert request.pipetteName == "p300_single"
assert request.mount == MountType.LEFT
assert request.pipetteId is None
def test_load_pipette_result() -> None:
"""It should have a LoadPipetteResult model."""
result = LoadPipetteResult(pipetteId="pipette-id")
assert result.pipetteId == "pipette-id"
async def test_load_pipette_implementation(mock_handlers: AsyncMock) -> None:
"""A LoadPipetteRequest should have an execution implementation."""
mock_handlers.equipment.load_pipette.return_value = LoadedPipette(
pipette_id="pipette-id",
)
request = LoadPipetteRequest(
pipetteName=PipetteName.P300_SINGLE,
mount=MountType.LEFT,
pipetteId="some id"
)
impl = request.get_implementation()
result = await impl.execute(mock_handlers)
assert result == LoadPipetteResult(pipetteId="pipette-id")
mock_handlers.equipment.load_pipette.assert_called_with(
pipette_name="p300_single",
mount=MountType.LEFT,
pipette_id="some id",
)
| 30.769231
| 77
| 0.729375
|
26b81653e71ca4f4e282b12aa87765f908bdf8e0
| 1,548
|
py
|
Python
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListCustomJobs
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_sync]
from google.cloud import aiplatform_v1beta1
def sample_list_custom_jobs():
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListCustomJobsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_custom_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_sync]
| 32.93617
| 85
| 0.76615
|
11c2f786fdfb21cc00e7cc1331bd09d987aeb053
| 3,713
|
py
|
Python
|
flexget/utils/json.py
|
guillaumelamirand/Flexget
|
51945105c253a64c079eb5a62680392ce9013794
|
[
"MIT"
] | 2
|
2017-03-25T10:39:25.000Z
|
2019-12-11T03:46:26.000Z
|
flexget/utils/json.py
|
guillaumelamirand/Flexget
|
51945105c253a64c079eb5a62680392ce9013794
|
[
"MIT"
] | null | null | null |
flexget/utils/json.py
|
guillaumelamirand/Flexget
|
51945105c253a64c079eb5a62680392ce9013794
|
[
"MIT"
] | null | null | null |
"""
Helper module that can load whatever version of the json module is available.
Plugins can just import the methods from this module.
Also allows date and datetime objects to be encoded/decoded.
"""
import datetime
from flexget.plugin import DependencyError
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
try:
# Google Appengine offers simplejson via django
from django.utils import simplejson as json
except ImportError:
raise DependencyError(missing='simplejson')
DATE_FMT = '%Y-%m-%d'
ISO8601_FMT = '%Y-%m-%dT%H:%M:%SZ'
class DTDecoder(json.JSONDecoder):
def decode(self, obj, **kwargs):
# The built-in `json` library will `unicode` strings, except for empty strings. patch this for
# consistency so that `unicode` is always returned.
if obj == b'':
return ''
if isinstance(obj, str):
dt_str = obj.strip('"')
try:
return datetime.datetime.strptime(dt_str, ISO8601_FMT)
except (ValueError, TypeError):
try:
return datetime.datetime.strptime(dt_str, DATE_FMT)
except (ValueError, TypeError):
pass
return super().decode(obj, **kwargs)
def _datetime_encoder(obj):
if isinstance(obj, datetime.datetime):
return obj.strftime(ISO8601_FMT)
elif isinstance(obj, datetime.date):
return obj.strftime(DATE_FMT)
raise TypeError
def _datetime_decoder(dict_):
for key, value in dict_.items():
# The built-in `json` library will `unicode` strings, except for empty strings. patch this for
# consistency so that `unicode` is always returned.
if value == b'':
dict_[key] = ''
continue
try:
datetime_obj = datetime.datetime.strptime(value, ISO8601_FMT)
dict_[key] = datetime_obj
except (ValueError, TypeError):
try:
date_obj = datetime.datetime.strptime(value, DATE_FMT)
dict_[key] = date_obj.date()
except (ValueError, TypeError):
continue
return dict_
def _empty_unicode_decoder(dict_):
for key, value in dict_.items():
# The built-in `json` library will `unicode` strings, except for empty strings. patch this for
# consistency so that `unicode` is always returned.
if value == b'':
dict_[key] = ''
continue
return dict_
def dumps(*args, **kwargs):
if kwargs.pop('encode_datetime', False):
kwargs['default'] = _datetime_encoder
return json.dumps(*args, **kwargs)
def dump(*args, **kwargs):
if kwargs.pop('encode_datetime', False):
kwargs['default'] = _datetime_encoder
return json.dump(*args, **kwargs)
def loads(*args, **kwargs):
"""
:param bool decode_datetime: If `True`, dates in ISO8601 format will be deserialized to :class:`datetime.datetime`
objects.
"""
if kwargs.pop('decode_datetime', False):
kwargs['object_hook'] = _datetime_decoder
kwargs['cls'] = DTDecoder
else:
kwargs['object_hook'] = _empty_unicode_decoder
return json.loads(*args, **kwargs)
def load(*args, **kwargs):
"""
:param bool decode_datetime: If `True`, dates in ISO8601 format will be deserialized to :class:`datetime.datetime`
objects.
"""
if kwargs.pop('decode_datetime', False):
kwargs['object_hook'] = _datetime_decoder
kwargs['cls'] = DTDecoder
else:
kwargs['object_hook'] = _empty_unicode_decoder
return json.load(*args, **kwargs)
| 30.434426
| 118
| 0.623216
|
3ab46f72a22166367457872e618dda97b1177f15
| 553
|
py
|
Python
|
clock/shifts/migrations/0004_shift_key.py
|
chgad/django-clock
|
f855cd1253574c0582ed53a0ac34206c242f04c9
|
[
"MIT"
] | null | null | null |
clock/shifts/migrations/0004_shift_key.py
|
chgad/django-clock
|
f855cd1253574c0582ed53a0ac34206c242f04c9
|
[
"MIT"
] | null | null | null |
clock/shifts/migrations/0004_shift_key.py
|
chgad/django-clock
|
f855cd1253574c0582ed53a0ac34206c242f04c9
|
[
"MIT"
] | 1
|
2020-03-13T14:42:11.000Z
|
2020-03-13T14:42:11.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-08 12:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shifts', '0003_shift_bool_finished'),
]
operations = [
migrations.AddField(
model_name='shift',
name='key',
field=models.CharField(blank=True, choices=[('S', 'Sick'), ('V', 'Vacation')], max_length=2,
verbose_name='Key'),
),
]
| 26.333333
| 104
| 0.56962
|
b719fd643f77bed23220ba3bf652da81540bc5af
| 865
|
py
|
Python
|
python-threatexchange/threatexchange/signal_type/tests/test_tlsh_hash_and_match.py
|
crhird/ThreatExchange
|
bb1d80c414fd44fb94ade28dc332126da2db4e51
|
[
"BSD-3-Clause"
] | 2
|
2021-04-04T19:52:32.000Z
|
2021-07-12T15:53:29.000Z
|
python-threatexchange/threatexchange/signal_type/tests/test_tlsh_hash_and_match.py
|
crhird/ThreatExchange
|
bb1d80c414fd44fb94ade28dc332126da2db4e51
|
[
"BSD-3-Clause"
] | null | null | null |
python-threatexchange/threatexchange/signal_type/tests/test_tlsh_hash_and_match.py
|
crhird/ThreatExchange
|
bb1d80c414fd44fb94ade28dc332126da2db4e51
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from threatexchange.signal_type import tlsh_pdf
TEST_PDF_COMPLETE_TLSH = (
"T145B2859FE708266211A3026277C7AEE5FF76402C636AD5BA2C2CC11C23A1F2957773D5"
)
class TLSHHasherModuleUnitTest(unittest.TestCase):
def test_tlsh_from_file(self):
tlsh_complete_data_hash = tlsh_pdf.TLSHSignal.hash_from_file(
"data/test_pdf_complete.pdf"
)
tlsh_half_data_hash = tlsh_pdf.TLSHSignal.hash_from_file(
"data/test_pdf_half.pdf"
)
# ToDo find way to have sub in signal data here
tlsh_complete_match = tlsh_pdf.TLSHSignal().match_hash(tlsh_complete_data_hash)
tlsh_half_complete_match = tlsh_pdf.TLSHSignal().match_hash(tlsh_half_data_hash)
assert tlsh_complete_data_hash == TEST_PDF_COMPLETE_TLSH
| 37.608696
| 88
| 0.756069
|
5c4542f7405f15067732b7ae3ef929be611f6a1c
| 2,890
|
py
|
Python
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GrantSagInstanceToVbrRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GrantSagInstanceToVbrRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GrantSagInstanceToVbrRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class GrantSagInstanceToVbrRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'GrantSagInstanceToVbr','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_VbrRegionId(self): # String
return self.get_query_params().get('VbrRegionId')
def set_VbrRegionId(self, VbrRegionId): # String
self.add_query_param('VbrRegionId', VbrRegionId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VbrInstanceId(self): # String
return self.get_query_params().get('VbrInstanceId')
def set_VbrInstanceId(self, VbrInstanceId): # String
self.add_query_param('VbrInstanceId', VbrInstanceId)
def get_SmartAGId(self): # String
return self.get_query_params().get('SmartAGId')
def set_SmartAGId(self, SmartAGId): # String
self.add_query_param('SmartAGId', SmartAGId)
def get_VbrUid(self): # Long
return self.get_query_params().get('VbrUid')
def set_VbrUid(self, VbrUid): # Long
self.add_query_param('VbrUid', VbrUid)
| 39.054054
| 88
| 0.761938
|
58bb492d3afedce1826c50a50a0849ede1d732ba
| 11,113
|
py
|
Python
|
tools/build/test/link.py
|
lijgame/boost
|
ec2214a19cdddd1048058321a8105dd0231dac47
|
[
"BSL-1.0"
] | 1
|
2018-12-15T19:57:24.000Z
|
2018-12-15T19:57:24.000Z
|
thirdparty-cpp/boost_1_62_0/tools/build/test/link.py
|
nxplatform/nx-mobile
|
0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5
|
[
"Apache-2.0"
] | null | null | null |
thirdparty-cpp/boost_1_62_0/tools/build/test/link.py
|
nxplatform/nx-mobile
|
0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2014-2015 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests the link-directory rule used to create the
# common boost/ directory in the new git layout.
import BoostBuild
def ignore_config(t):
"""These files are created by the configuration logic in link.jam
They may or may not exist, depending on the system."""
t.ignore("bin/symlink/test-hardlink")
t.ignore("bin/test-hardlink-source")
t.ignore("bin/test-symlink")
t.ignore("bin/test-symlink-source")
def test_basic():
"""Test creation of a single link"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_two():
"""Test merging two directories"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_existing(group1, group2):
"""Test adding a link when a different symlink already exists"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system(group1)
if "dir1-link" in group1:
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
if "dir2-link" in group1:
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
ignore_config(t)
t.expect_nothing_more()
t.run_build_system(group2)
if "dir1-link" in group2:
if "dir1-link" not in group1:
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
else:
t.ignore_removal("include/file1.h")
if "dir2-link" in group2:
if "dir2-link" not in group1:
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
else:
t.ignore_removal("include/file2.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_existing_all():
test_merge_existing(["dir1-link"], ["dir2-link"])
test_merge_existing(["dir2-link"], ["dir1-link"])
test_merge_existing(["dir1-link"], ["dir1-link", "dir2-link"])
test_merge_existing(["dir2-link"], ["dir1-link", "dir2-link"])
def test_merge_recursive():
"Test merging several directories including common prefixes"
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
link-directory dir3-link : src/dir3/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.write("src/dir2/include/nested/file3.h", "file3")
t.write("src/dir3/include/nested/file4.h", "file4")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
t.expect_addition("include/nested/file3.h")
t.expect_content("include/nested/file3.h", "file3")
t.expect_addition("include/nested/file4.h")
t.expect_content("include/nested/file4.h", "file4")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_recursive_existing(group1, group2):
"Test merging several directories including common prefixes."
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
link-directory dir3-link : src/dir3/include : <location>. ;
link-directory dir4-link : src/dir4/include : <location>. ;
link-directory dir5-link : src/dir5/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/nested/file2.h", "file2")
t.write("src/dir3/include/nested/file3.h", "file3")
t.write("src/dir4/include/nested/xxx/yyy/file4.h", "file4")
t.write("src/dir5/include/nested/xxx/yyy/file5.h", "file5")
t.run_build_system(group1)
t.run_build_system(group2 + ["-d+12"])
t.ignore_addition("include/file1.h")
t.ignore_addition("include/nested/file2.h")
t.ignore_addition("include/nested/file3.h")
t.ignore_addition("include/nested/xxx/yyy/file4.h")
t.ignore_addition("include/nested/xxx/yyy/file5.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_recursive_existing_all():
# These should create a link
test_merge_recursive_existing(["dir2-link"], ["dir2-link", "dir1-link"])
test_merge_recursive_existing(["dir2-link"], ["dir1-link", "dir2-link"])
# These should create a directory
test_merge_recursive_existing(["dir2-link"], ["dir2-link", "dir3-link"])
test_merge_recursive_existing(["dir2-link"], ["dir3-link", "dir2-link"])
# It should work even if we have to create many intermediate subdirectories
test_merge_recursive_existing(["dir4-link"], ["dir4-link", "dir5-link"])
test_merge_recursive_existing(["dir4-link"], ["dir5-link", "dir4-link"])
def test_include_scan():
"""Make sure that the #include scanner finds the headers"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
obj test : test.cpp :
<include>include
<implicit-dependency>dir1-link
<implicit-dependency>dir2-link ;
""")
t.write("src/dir1/include/file1.h", "#include <file2.h>\n")
t.write("src/dir2/include/file2.h", "int f();\n")
t.write("test.cpp", """\
#include <file1.h>
int main() { f(); }
""");
t.run_build_system(["test"])
t.expect_addition("bin/$toolset/debug/test.obj")
t.run_build_system()
t.expect_nothing_more()
t.cleanup()
def test_include_scan_merge_existing():
"""Make sure that files are replaced if needed when merging in
a new directory"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
obj test : test.cpp :
<include>include
<implicit-dependency>dir1-link
<implicit-dependency>dir2-link ;
""")
t.write("src/dir1/include/file1.h", "int f();")
t.write("src/dir2/include/file2.h", "#include <file1.h>")
t.write("test.cpp", """\
#include <file2.h>
int main() { f(); }
""")
t.run_build_system(["dir2-link"])
t.run_build_system(["test"])
t.expect_addition("include/file1.h")
t.expect_addition("bin/$toolset/debug/test.obj")
t.expect_nothing_more()
t.cleanup()
def test_update_file_link(params1, params2):
"""Tests the behavior of updates when changing the link mode.
The link needs to be updated iff the original was a copy."""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
import project ;
import property-set ;
import modules ;
if --no-symlinks in [ modules.peek : ARGV ]
{
modules.poke link : .can-symlink : false ;
}
if --no-hardlinks in [ modules.peek : ARGV ]
{
modules.poke link : .can-hardlink : false ;
}
.project = [ project.current ] ;
.has-files = [ glob include/file1.h ] ;
rule can-link ( properties * ) {
if ( ! [ link.can-symlink $(.project) : [ property-set.empty ] ] ) &&
( ! [ link.can-hardlink $(.project) : [ property-set.empty ] ] )
{
ECHO links unsupported ;
}
}
# Use two directories so that we link to individual files.
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
alias check-linking : : <conditional>@can-link ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system(params1)
ignore_config(t)
t.expect_addition("include/file1.h")
t.expect_addition("include/file2.h")
t.expect_nothing_more()
using_links = "links unsupported" not in t.stdout()
t.touch("src/dir1/include/file1.h")
t.run_build_system(params2)
if not using_links: t.expect_touch("include/file1.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_update_file_link_all():
"""Test all nine possible combinations of two runs."""
possible_args = [[], ["--no-symlinks"], ["--no-symlinks", "--no-hardlinks"]]
for arg1 in possible_args:
for arg2 in possible_args:
test_update_file_link(arg1, arg2)
def test_error_duplicate():
"""Test that linking a single file from
multiple sources causes a hard error."""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file1.h", "file2")
t.run_build_system(status=1)
t.expect_output_lines(
["error: Cannot create link include/file1.h to src/dir2/include/file1.h.",
"error: Link previously defined to another file, src/dir1/include/file1.h."])
t.cleanup()
test_basic()
test_merge_two()
test_merge_existing_all()
test_merge_recursive()
test_merge_recursive_existing_all()
test_include_scan()
test_include_scan_merge_existing()
test_update_file_link_all()
test_error_duplicate()
| 33.173134
| 87
| 0.632233
|
aafafab425325678319f0ddba6a28351a784e5d6
| 1,174
|
py
|
Python
|
Class 12/Python Programs/Stacks & Queues/stacks with class.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
Class 12/Python Programs/Stacks & Queues/stacks with class.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
Class 12/Python Programs/Stacks & Queues/stacks with class.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
class stack:
s=[]
def push(self):
a=input("Enter any number :")
stack.s.append(a)
def display(self):
if (stack.s==[]):
print("Stack Empty")
else:
l=len(stack.s)
for i in range(l-1,-1,-1):
print (stack.s[i])
def peek(self):
if (stack.s==[]):
print("Stack Empty")
else:
print("Top Most Element :- ",stack.s[-1])
a=stack()
c="y"
while(c=="y"):
print ("Enter 1. To PUSH ")
print ("Enter 2. To POP ")
print ("Enter 3. To PEEK ")
print ("Enter 4. To Display ")
print("________________________________________________________________")
choice=int(input("Enter Your Choice :- "))
if (choice==1):
a.push()
elif (choice==2):
if (a.s==[]):
print ("Stack Empty")
else:
print ("Deleted element is : ",a.s.pop())
elif (choice==3):
a.peek()
elif (choice==4):
a.display()
else:
print("Wrong Input")
c=input("If You Wanna Continue Enter 'y' :- ")
if c!='y':
print("Bye")
quit
| 26.681818
| 78
| 0.471891
|
24929a436ece3d781e5f57e2213df8c8998eb753
| 17,600
|
py
|
Python
|
ERAN/tf_verify/deepg.py
|
ISCAS-PMC/deepg
|
528beda0fb4e7381b3b64a26ff72a582e5754b58
|
[
"Apache-2.0"
] | null | null | null |
ERAN/tf_verify/deepg.py
|
ISCAS-PMC/deepg
|
528beda0fb4e7381b3b64a26ff72a582e5754b58
|
[
"Apache-2.0"
] | null | null | null |
ERAN/tf_verify/deepg.py
|
ISCAS-PMC/deepg
|
528beda0fb4e7381b3b64a26ff72a582e5754b58
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import csv
import os
import copy
import numpy as np
import sys
import tensorflow as tf
from read_net_file import read_net
sys.path.insert(0, '../ELINA/python_interface/')
from eran import ERAN
from elina_coeff import *
from elina_linexpr0 import *
import time
EPS = 10**(-9)
n_rows, n_cols, n_channels = 0, 0, 0
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def normalize(image, means, stds, dataset, is_conv):
if dataset == 'mnist' or dataset == 'fashion':
for i in range(len(image)):
image[i] = (image[i] - means[0])/stds[0]
else:
for i in range(3072):
image[i] = (image[i] - means[i % 3]) / stds[i % 3]
def normalize_poly(num_params, lexpr_cst, lexpr_weights, lexpr_dim, uexpr_cst, uexpr_weights, uexpr_dim, means, stds, dataset):
if dataset == 'mnist' or dataset == 'fashion':
for i in range(len(lexpr_cst)):
lexpr_cst[i] = (lexpr_cst[i] - means[0]) / stds[0]
uexpr_cst[i] = (uexpr_cst[i] - means[0]) / stds[0]
for i in range(len(lexpr_weights)):
lexpr_weights[i] /= stds[0]
uexpr_weights[i] /= stds[0]
else:
for i in range(len(lexpr_cst)):
lexpr_cst[i] = (lexpr_cst[i] - means[i % 3]) / stds[i % 3]
uexpr_cst[i] = (uexpr_cst[i] - means[i % 3]) / stds[i % 3]
for i in range(len(lexpr_weights)):
lexpr_weights[i] /= stds[(i // num_params) % 3]
uexpr_weights[i] /= stds[(i // num_params) % 3]
def show_ascii_spec(lb, ub):
print('==================================================================')
for i in range(n_rows):
print(' ', end='')
for j in range(n_cols):
print('#' if lb[n_cols*n_channels*i+j*n_channels] >= 0.5 else ' ', end='')
print(' | ', end='')
for j in range(n_cols):
print('#' if ub[n_cols*n_channels*i+j*n_channels] >= 0.5 else ' ', end='')
print(' | ')
print('==================================================================')
def main():
parser = argparse.ArgumentParser(description='Analyze NN.')
parser.add_argument('--net', type=str, help='Neural network to analyze')
parser.add_argument('--dataset', type=str, default='mnist', help='Dataset')
parser.add_argument('--data_dir', type=str, help='Directory which contains data')
parser.add_argument('--data_root', type=str, help='Directory which contains data')
parser.add_argument('--num_params', type=int, default=0, help='Number of transformation parameters')
parser.add_argument('--num_tests', type=int, default=None, help='Number of images to test')
parser.add_argument('--from_test', type=int, default=0, help='Number of images to test')
parser.add_argument('--test_idx', type=int, default=None, help='Index to test')
parser.add_argument('--debug', action='store_true', help='Whether to display debug info')
parser.add_argument('--attack', action='store_true', help='Whether to attack')
parser.add_argument('--timeout_lp', type=float, default=1, help='timeout for the LP solver')
parser.add_argument('--timeout_milp', type=float, default=1, help='timeout for the MILP solver')
parser.add_argument('--use_area_heuristic', type=str2bool, default=True, help='whether to use area heuristic for the DeepPoly ReLU approximation')
args = parser.parse_args()
#LRJ add
deepsymbol_input_folder=args.data_dir+'/Batch_Input_DeepG'
#print(deepsymbol_input_folder)
if not os.path.exists(deepsymbol_input_folder):
os.makedirs(deepsymbol_input_folder)
else:
for root,dirs,files in os.walk(deepsymbol_input_folder):
for name in files:
if name.endswith('.in'):
os.remove(os.path.join(root,name))
#print("delete "+os.path.join(root,name))
global n_rows, n_cols, n_channels
if args.dataset == 'cifar10':
n_rows, n_cols, n_channels = 32, 32, 3
else:
n_rows, n_cols, n_channels = 28, 28, 1
filename, file_extension = os.path.splitext(args.net)
is_trained_with_pytorch = False
is_saved_tf_model = False
if(file_extension == ".net" or file_extension == ".pyt"):
is_trained_with_pytorch = True
elif(file_extension == ".meta"):
is_saved_tf_model = True
elif(file_extension != ".tf"):
print("file extension not supported")
exit(1)
is_conv = False
if(is_saved_tf_model):
netfolder = os.path.dirname(args.net)
tf.logging.set_verbosity(tf.logging.ERROR)
sess = tf.Session()
saver = tf.train.import_meta_graph(args.net)
saver.restore(sess, tf.train.latest_checkpoint(netfolder+'/'))
eran = ERAN(sess.graph.get_tensor_by_name('logits:0'), sess)
else:
if args.dataset == 'mnist' or args.dataset == 'fashion':
num_pixels = 784
else:
num_pixels = 3072
model, is_conv, means, stds = read_net(args.net, num_pixels, is_trained_with_pytorch)
eran = ERAN(model)
csvfile = open('../../code/datasets/{}_test.csv'.format(args.dataset), 'r')
tests = csv.reader(csvfile, delimiter=',')
total, attacked, standard_correct, tot_time = 0, 0, 0, 0
correct_box, correct_poly = 0, 0
cver_box, cver_poly = [], []
for i, test in enumerate(tests):
if args.test_idx is not None and i != args.test_idx:
continue
attacks_file = os.path.join(args.data_dir, 'attack_{}.csv'.format(i))
if args.num_tests is not None and i >= args.num_tests:
break
print('Test {}:'.format(i))
#LRJ add
current_test = i
if args.dataset == 'mnist' or args.dataset == 'fashion':
image = np.float64(test[1:len(test)])
else:
if is_trained_with_pytorch:
image = np.float64(test[1:len(test)])
else:
image = np.float64(test[1:len(test)]) - 0.5
spec_lb = np.copy(image)
spec_ub = np.copy(image)
if(is_trained_with_pytorch):
normalize(spec_lb, means, stds, args.dataset, is_conv)
normalize(spec_ub, means, stds, args.dataset, is_conv)
label, nn, nlb, nub = eran.analyze_box(spec_lb, spec_ub, 'deeppoly', args.timeout_lp, args.timeout_milp, args.use_area_heuristic)
print('Label: ', label)
if label != int(test[0]):
print('Label {}, but true label is {}, skipping...'.format(label, int(test[0])))
print('Standard accuracy: {} percent'.format(standard_correct/float(i+1)*100))
continue
else:
standard_correct += 1
print('Standard accuracy: {} percent'.format(standard_correct/float(i+1)*100))
dim = n_rows * n_cols * n_channels
ok_box, ok_poly = True, True
k = args.num_params + 1 + 1 + dim
attack_imgs, checked, attack_pass = [], [], 0
cex_found = False
if args.attack:
with open(attacks_file, 'r') as fin:
lines = fin.readlines()
for j in range(0, len(lines), args.num_params+1):
params = [float(line[:-1]) for line in lines[j:j+args.num_params]]
tokens = lines[j+args.num_params].split(',')
values = np.array(list(map(float, tokens)))
attack_lb = values[::2]
attack_ub = values[1::2]
if is_trained_with_pytorch:
normalize(attack_lb, means, stds, args.dataset, is_conv)
normalize(attack_ub, means, stds, args.dataset, is_conv)
else:
attack_lb -= 0.5
attack_ub -= 0.5
attack_imgs.append((params, attack_lb, attack_ub))
checked.append(False)
predict_label, _, _, _ = eran.analyze_box(
attack_lb[:dim], attack_ub[:dim], 'deeppoly',
args.timeout_lp, args.timeout_milp, args.use_area_heuristic, 0)
if predict_label != int(test[0]):
print('counter-example, params: ', params, ', predicted label: ', predict_label)
cex_found = True
break
else:
attack_pass += 1
print('tot attacks: ', len(attack_imgs))
specs_file = os.path.join(args.data_dir, '{}.csv'.format(i))
#LRJ add
spec_lb_all=[]
spec_ub_all=[]
begtime = time.time()
with open(specs_file, 'r') as fin:
lines = fin.readlines()
print('Number of lines: ', len(lines))
assert len(lines) % k == 0
spec_lb = np.zeros(args.num_params + dim)
spec_ub = np.zeros(args.num_params + dim)
expr_size = args.num_params
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
ver_chunks_box, ver_chunks_poly, tot_chunks = 0, 0, 0
for i, line in enumerate(lines):
if i % k < args.num_params:
# read specs for the parameters
values = np.array(list(map(float, line[:-1].split(' '))))
assert values.shape[0] == 2
param_idx = i % k
spec_lb[dim + param_idx] = values[0]
spec_ub[dim + param_idx] = values[1]
if args.debug:
print('parameter %d: [%.4f, %.4f]' % (param_idx, values[0], values[1]))
elif i % k == args.num_params:
# read interval bounds for image pixels
values = np.array(list(map(float, line[:-1].split(','))))
#print(values)
#print(len(values))
spec_lb[:dim] = values[::2]
spec_ub[:dim] = values[1::2]
# if args.debug:
# show_ascii_spec(spec_lb, spec_ub)
elif i % k < k - 1:
# read polyhedra constraints for image pixels
tokens = line[:-1].split(' ')
assert len(tokens) == 2 + 2*args.num_params + 1
bias_lower, weights_lower = float(tokens[0]), list(map(float, tokens[1:1+args.num_params]))
assert tokens[args.num_params+1] == '|'
bias_upper, weights_upper = float(tokens[args.num_params+2]), list(map(float, tokens[3+args.num_params:]))
assert len(weights_lower) == args.num_params
assert len(weights_upper) == args.num_params
lexpr_cst.append(bias_lower)
uexpr_cst.append(bias_upper)
for j in range(args.num_params):
lexpr_dim.append(dim + j)
uexpr_dim.append(dim + j)
lexpr_weights.append(weights_lower[j])
uexpr_weights.append(weights_upper[j])
else:
assert(line == 'SPEC_FINISHED\n')
for p_idx in range(args.num_params):
lexpr_cst.append(spec_lb[dim + p_idx])
for l in range(args.num_params):
lexpr_weights.append(0)
lexpr_dim.append(dim + l)
uexpr_cst.append(spec_ub[dim + p_idx])
for l in range(args.num_params):
uexpr_weights.append(0)
uexpr_dim.append(dim + l)
#LRJ Add Data beform normalize
if len(spec_lb_all):
spec_lb_all=np.minimum(spec_lb_all,spec_lb[:dim])
else:
spec_lb_all=list(spec_lb[:dim])
if len(spec_ub_all):
spec_ub_all=np.maximum(spec_ub_all,spec_ub[:dim])
else:
spec_ub_all=list(spec_ub[:dim])
#print(spec_lb_all)
if(is_trained_with_pytorch):
normalize(spec_lb[:dim], means, stds, args.dataset, is_conv)
normalize(spec_ub[:dim], means, stds, args.dataset, is_conv)
normalize_poly(args.num_params, lexpr_cst, lexpr_weights, lexpr_dim, uexpr_cst, uexpr_weights, uexpr_dim, means, stds, args.dataset)
for attack_idx, (attack_params, attack_lb, attack_ub) in enumerate(attack_imgs):
ok_attack = True
for j in range(num_pixels):
low, up = lexpr_cst[j], uexpr_cst[j]
for idx in range(args.num_params):
low += lexpr_weights[j * args.num_params + idx] * attack_params[idx]
up += uexpr_weights[j * args.num_params + idx] * attack_params[idx]
if low > attack_lb[j] + EPS or attack_ub[j] > up + EPS:
ok_attack = False
if ok_attack:
checked[attack_idx] = True
# print('checked ', attack_idx)
if args.debug:
print('Running the analysis...')
t_begin = time.time()
perturbed_label_poly, _, _, _ = eran.analyze_box(
spec_lb, spec_ub, 'deeppoly',
args.timeout_lp, args.timeout_milp, args.use_area_heuristic, 0,
lexpr_weights, lexpr_cst, lexpr_dim,
uexpr_weights, uexpr_cst, uexpr_dim,
expr_size)
perturbed_label_box, _, _, _ = eran.analyze_box(
spec_lb[:dim], spec_ub[:dim], 'deeppoly',
args.timeout_lp, args.timeout_milp, args.use_area_heuristic, 0)
t_end = time.time()
#LRJ add normalized data below
#print(spec_lb[:dim])
#if len(spec_lb_all):
# spec_lb_all=np.minimum(spec_lb_all,spec_lb[:dim])
#else:
# spec_lb_all=spec_lb[:dim]
#if len(spec_ub_all):
# spec_ub_all=np.maximum(spec_ub_all,spec_ub[:dim])
#else:
# spec_ub_all=spec_ub[:dim]
print('DeepG: ', perturbed_label_poly, '\tInterval: ', perturbed_label_box, '\tlabel: ', label, '[Time: %.4f]' % (t_end - t_begin))
tot_chunks += 1
if perturbed_label_box != label:
ok_box = False
else:
ver_chunks_box += 1
if perturbed_label_poly != label:
ok_poly = False
else:
ver_chunks_poly += 1
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
#LRJ add
#print(len(spec_lb_all))
#print(len(spec_ub_all))
in_file = os.path.join(deepsymbol_input_folder, 'in_{}.in'.format(current_test))
fp=open(in_file,"w")
for index in range(len(spec_lb_all)):
fp.write(str(spec_lb_all[index])+' '+str(spec_ub_all[index])+'\n')
fp.close()
total += 1
if ok_box:
correct_box += 1
if ok_poly:
correct_poly += 1
if cex_found:
assert (not ok_box) and (not ok_poly)
attacked += 1
cver_poly.append(ver_chunks_poly / float(tot_chunks))
cver_box.append(ver_chunks_box / float(tot_chunks))
tot_time += time.time() - begtime
print('Verified[box]: {}, Verified[poly]: {}, CEX found: {}'.format(ok_box, ok_poly, cex_found))
assert not cex_found or not ok_box, 'ERROR! Found counter-example, but image was verified with box!'
assert not cex_found or not ok_poly, 'ERROR! Found counter-example, but image was verified with poly!'
print('Attacks found: %.2f percent, %d/%d' % (100.0*attacked/total, attacked, total))
print('[Box] Provably robust: %.2f percent, %d/%d' % (100.0*correct_box/total, correct_box, total))
print('[Poly] Provably robust: %.2f percent, %d/%d' % (100.0*correct_poly/total, correct_poly, total))
print('Empirically robust: %.2f percent, %d/%d' % (100.0*(total-attacked)/total, total-attacked, total))
print('[Box] Average chunks verified: %.2f percent' % (100.0*np.mean(cver_box)))
print('[Poly] Average chunks verified: %.2f percent' % (100.0*np.mean(cver_poly)))
print('Average time: ', tot_time/total)
print('Verified:%d Box Robust:%d Poly Robust:%d Box Robust Percentage:%.2f Poly Robust Percentage:%.2f' % (total,correct_box,correct_poly,100.0*correct_box/total,100.0*correct_poly/total))
if __name__ == '__main__':
main()
| 44.783715
| 196
| 0.535625
|
56c153ddf68516fcf301d238de44b5c1aa00d098
| 15,611
|
py
|
Python
|
.history/src/Simulador_20200711171152.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200711171152.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200711171152.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
self.matriz_atualizacoes_cura = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_status = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
self.matriz_status[posicao[0], posicao[1]] = status
if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura
def salvar_posicionamento(self):
self.lista_matrizes_status.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice_infectante in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for indice_vizinho in lista_vizinhos:
#verificação de SADIO
if self.verifica_status(indice_vizinho) == self.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice_vizinho)
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo2.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice_vizinho)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def checagem_morte_individual(self, chance_morte, indice):
rng_morte = random.random()
if rng_morte <= chance_morte:
self.matriz_status[indice[0], indice[1]] = self.MORTO
return self.MORTO
else:
return self.checar_cura_individual(indice)
def checar_cura_individual(self, indice):
self.matriz_atualizacoes_cura[indice[0], indice[1]] = self.matriz_atualizacoes_cura[indice[0], indice[1]] - 1
if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:
self.matriz_status[indice[0], indice[1]] = self.CURADO
return self.CURADO
else:
return self.matriz_status[indice[0], indice[1]]
def checagem_morte_cura_lista(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for indice_infectante in lista_infectantes_tipo2:
self.checagem_morte_individual(self.chance_morte, indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.MORTO:
lista_mortos.append(indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_mortos, lista_curados
def checagem_cura_lista(self, lista_infectantes):
lista_curados = []
for indice_infectante in lista_infectantes:
self.checar_cura_individual(indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_curados
def iterar(self):
#Verifica os novos infectados por infectantes do tipo 1 e 2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#Verifica morte/cura dos infectados tipo 2
lista_mortos, lista_curados_t2 = self.checagem_morte_cura_lista(self.lista_infectados_tipo_2)
#Verifica cura dos infectados tipo 1
lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)
#remove os mortos e curados das listas de infectantes tipo 1 e 2
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
if indice not in lista_mortos:
if indice not in lista_curados_t2:
nova_lista_infectados_t2.append(indice)
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
if indice not in lista_curados_t1:
nova_lista_infectados_t1.append(indice)
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
#atualiza o número de mortos
# self.num_mortos = self.num_mortos + len(lista_mortos)
self.num_mortos =np.sum(self.matriz_status[self.matriz_status == self.MORTO].toarray())
#atualiza o número de curados
print("curados da rodada")
print(len(lista_curados_t1) + len(lista_curados_t2))
#self.num_curados = self.num_curados + len(lista_curados_t1) + len(lista_curados_t2)
self.num_mortos =np.sum(self.matriz_status[self.matriz_status == self.CURADO].toarray())
#movimentar infectantes:
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
nova_lista_infectados_t1.append(self.mover_infectante(indice))
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
nova_lista_infectados_t2.append(self.mover_infectante(indice))
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
#populacao_sadia = self.dataframe.iloc[-1]['num_sadios'] - len(lista_novos_infectados_tipo2+lista_novos_infectados_tipo1+lista_curados_t1+lista_curados_t2+)
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
# dict = {
# 'num_sadios':self.dataframe.iloc[-1]['num_sadios'] - np.sum(self.matriz_status[self.matriz_status != 0].toarray()),
# 'num_infect_t1':np.sum(self.matriz_status[self.matriz_status == 1].toarray()),
# 'num_infect_t2':np.sum(self.matriz_status[self.matriz_status == 2].toarray()),
# 'num_curados':np.sum(self.matriz_status[self.matriz_status == 3].toarray()),
# 'num_mortos':np.sum(self.matriz_status[self.matriz_status == 4].toarray())}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
# print("num t1: ", len(self.lista_infectados_tipo_1))
# print("num t2: ", len(self.lista_infectados_tipo_2))
# print("num curados: ", self.num_curados)
# print("num mortos: ", self.num_mortos)
# print("---------")
# #salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)
self.lista_infectados_tipo_2.append(indice)
def trocar(self,matriz,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = matriz[x_fin,y_fin]
matriz[x_fin,y_fin] = matriz[x_ini,y_ini]
matriz[x_ini,y_ini] = aux
def verifica_status(self, indice):
return self.matriz_status[indice[0], indice[1]]
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar(self.matriz_status, posicao_inicial, posicao_final)
self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)
return posicao_final
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.3
chance_morte = 0.1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.0
sim = Simulador(
5,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
while (sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2']) > 0:
#plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
print(sim.dataframe.iloc[-1])
sim.iterar()
#print(sim.dataframe.iloc[-1])
#print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
print(sim.dataframe)
#plt.show()
# for i in range(12):
# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
# print(i)
# print("Status")
# print(sim.matriz_status.toarray())
# print("Cura")
# print(sim.matriz_atualizacoes_cura.toarray())
# sim.iterar()
# m = sim.matriz_atualizacoes_cura[sim.matriz_status == 1 or sim.matriz_status == 2].toarray()
# print(m)
#plt.show()
#print(sim.dataframe)
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# sim.iterar()
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# print(sim.dataframe)
# print("status inicial: ", sim.df_individuos[1][0].status)
# print("Novos infectados: ", sim.verificar_infeccao(sim.lista_infectados_tipo_1))
# plt.show()
| 40.443005
| 170
| 0.652681
|
467cc39770037319aa8935bbd96bb39e69359504
| 3,323
|
py
|
Python
|
src/pytorch_adapt/hooks/optimizer.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | 1
|
2021-12-15T19:36:01.000Z
|
2021-12-15T19:36:01.000Z
|
src/pytorch_adapt/hooks/optimizer.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | null | null | null |
src/pytorch_adapt/hooks/optimizer.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | null | null | null |
from typing import Dict, List
import torch
from ..utils import common_functions as c_f
from ..weighters import BaseWeighter, MeanWeighter
from .base import BaseHook
from .reducers import BaseReducer, MeanReducer
class OptimizerHook(BaseHook):
"""
1. Executes the wrapped hook
2. Zeros all gradients
3. Backpropagates the loss
4. Steps the optimizer
"""
def __init__(
self,
hook: BaseHook,
optimizers: List[torch.optim.Optimizer],
weighter: BaseWeighter = None,
reducer: BaseReducer = None,
**kwargs
):
"""
Arguments:
hook: the hook that computes the losses
optimizers: a list of optimizers that will be used
to update model weights
weighter: weights the returned losses and outputs a
single value on which ```.backward()``` is called.
If ```None```, then it defaults to
[```MeanWeighter```][pytorch_adapt.weighters.mean_weighter.MeanWeighter].
reducer: a hook that reduces any unreduced losses to a single value.
If ```None```, then it defaults to
[```MeanReducer```][pytorch_adapt.hooks.reducers.MeanReducer].
"""
super().__init__(**kwargs)
self.hook = hook
self.optimizers = optimizers
self.weighter = c_f.default(weighter, MeanWeighter, {})
self.reducer = c_f.default(reducer, MeanReducer, {})
self.loss_components = {}
def call(self, losses, inputs):
""""""
losses, outputs = self.hook(losses, inputs)
combined = c_f.assert_dicts_are_disjoint(inputs, outputs)
losses, new_outputs = self.reducer(losses, combined)
outputs.update(new_outputs)
loss, self.loss_components = self.weighter(losses)
optimizers = self.optimizers
if isinstance(optimizers[0], str):
optimizers = c_f.extract(inputs, optimizers)
c_f.zero_back_step(loss, optimizers, inputs.get("custom_backward"))
return {}, outputs
def _loss_keys(self):
""""""
return []
def _out_keys(self):
""""""
return c_f.join_lists([self.hook.out_keys, self.reducer.out_keys])
def extra_repr(self):
return c_f.extra_repr(self, ["optimizers", "weighter"])
class SummaryHook(BaseHook):
"""
Repackages losses into a dictionary format useful for logging.
This should be used only at the very end of each
iteration, i.e. it should be the last sub-hook
in a [ChainHook][pytorch_adapt.hooks.utils.ChainHook].
"""
def __init__(self, optimizers: Dict[str, OptimizerHook], **kwargs):
"""
Arguments:
optimizers: A dictionary of optimizer hooks.
The losses computed inside these hooks
will be packaged into nested dictionaries.
"""
super().__init__(**kwargs)
self.optimizers = optimizers
def call(self, losses, inputs):
""""""
losses = {}
for k, v in self.optimizers.items():
losses[k] = v.loss_components
return losses, {}
def _loss_keys(self):
""""""
return list(self.optimizers.keys())
def _out_keys(self):
""""""
return []
| 31.951923
| 89
| 0.605477
|
8989cb6c4d3fd8878724bcad4cc76856053623ba
| 1,471
|
py
|
Python
|
flashcards/admin.py
|
ubuntustan/nyumba-kumi
|
bb8c8b3159e16d6c246ff758764e858cfba3a864
|
[
"MIT"
] | null | null | null |
flashcards/admin.py
|
ubuntustan/nyumba-kumi
|
bb8c8b3159e16d6c246ff758764e858cfba3a864
|
[
"MIT"
] | null | null | null |
flashcards/admin.py
|
ubuntustan/nyumba-kumi
|
bb8c8b3159e16d6c246ff758764e858cfba3a864
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Card_Set, Card
def push_live(modeladmin, request, query_set):
'''
In the Action: dropdown on the Admin Panel -
Let's admin select multiple card sets at once to push to active
'''
rows_updated = query_set.update(is_active = True)
# Message sent to admin - displaying how many sets were updated as active
if rows_updated == 1:
message = '1 set was'
else:
message = '%s were' % rows_updated
modeladmin.message_user(request, '%s sucessfully updated' % message)
push_live.short_description = 'Select card sets as active.'
# Customizing the Admin Panel
class Card_Set_Admin(admin.ModelAdmin):
# 'is_active' adds column to the Admin Panel - displaying if topic is active (inactive means there are no cards in the card set)
# 'get_card_count' adds column to the Admin Panel - displaying total number of cards in each card set
list_display = ('topic', 'is_active', 'get_card_count')
# Add filter box on the Admin Panel, letting admin filter to active/inactive card sets
list_filter = ('is_active',)
# Admin can search for topic or description
search_fields = ['topic', 'description']
actions = [push_live]
class Card_Admin(admin.ModelAdmin):
pass
# Register your models here.
admin.site.register(Card_Set, Card_Set_Admin)
admin.site.register(Card, Card_Admin)
| 33.431818
| 133
| 0.719918
|
8c3d56027dec65b6f7f8fbb8e52003434c809cb5
| 210
|
py
|
Python
|
ykdl/extractors/baidu/__init__.py
|
Miloxing/ykdl
|
2d485cbb326327ad1d28c5cea4dffcace777032d
|
[
"MIT"
] | 3
|
2018-09-04T09:33:51.000Z
|
2021-11-01T09:03:27.000Z
|
ykdl/extractors/baidu/__init__.py
|
MichioY/bilibiliupload
|
623e0d06e6acb4b5f2c3d6291450f27bbf83667e
|
[
"MIT"
] | null | null | null |
ykdl/extractors/baidu/__init__.py
|
MichioY/bilibiliupload
|
623e0d06e6acb4b5f2c3d6291450f27bbf83667e
|
[
"MIT"
] | 1
|
2019-12-26T18:00:47.000Z
|
2019-12-26T18:00:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
def get_extractor(url):
if re.search("music.baidu", url):
from . import music as s
return s.site
raise NotImplementedError(url)
| 19.090909
| 37
| 0.62381
|
e89f9ef0949b31f327818a84e967f8686269ad87
| 466
|
py
|
Python
|
src/colusa/plugins/etr_cs_rutgers_edu.py
|
huuhoa/symphony
|
f8a364649634b4d864771b2c8a3103b714b6b9e2
|
[
"MIT"
] | 6
|
2020-08-29T04:14:15.000Z
|
2020-09-18T10:53:59.000Z
|
src/colusa/plugins/etr_cs_rutgers_edu.py
|
huuhoa/colusa
|
07a0a60680c8085c5dca522e0237f7b5a5181dcb
|
[
"MIT"
] | 34
|
2021-09-07T15:17:38.000Z
|
2022-03-25T15:16:40.000Z
|
src/colusa/plugins/etr_cs_rutgers_edu.py
|
huuhoa/colusa
|
07a0a60680c8085c5dca522e0237f7b5a5181dcb
|
[
"MIT"
] | 2
|
2020-08-29T04:21:35.000Z
|
2020-09-13T17:36:06.000Z
|
from colusa.etr import Extractor, register_extractor
@register_extractor('www.cs.rutgers.edu/~pxk/')
class CSRutgersEduExtractor(Extractor):
def _find_main_content(self):
return self.bs.find('div', attrs={'id': 'main'})
def cleanup(self):
self.remove_tag(self.main_content, 'div', attrs={'id': 'downloadmsg'})
self.remove_tag(self.main_content, 'div', attrs={'id': 'headline'})
super(CSRutgersEduExtractor, self).cleanup()
| 35.846154
| 78
| 0.690987
|
f4ddf7606c78b2f28ef9729567af079d8545d686
| 15,955
|
py
|
Python
|
aries_cloudagent/core/conductor.py
|
adamsc64/aries-cloudagent-python
|
d09f6085b248a68c95822ae6b2aa06bb0053675b
|
[
"Apache-2.0"
] | 1
|
2021-01-15T01:04:43.000Z
|
2021-01-15T01:04:43.000Z
|
aries_cloudagent/core/conductor.py
|
adamsc64/aries-cloudagent-python
|
d09f6085b248a68c95822ae6b2aa06bb0053675b
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/core/conductor.py
|
adamsc64/aries-cloudagent-python
|
d09f6085b248a68c95822ae6b2aa06bb0053675b
|
[
"Apache-2.0"
] | 1
|
2021-01-15T01:04:31.000Z
|
2021-01-15T01:04:31.000Z
|
"""
The Conductor.
The conductor is responsible for coordinating messages that are received
over the network, communicating with the ledger, passing messages to handlers,
instantiating concrete implementations of required modules and storing data in the
wallet.
"""
import hashlib
import logging
from ..admin.base_server import BaseAdminServer
from ..admin.server import AdminServer
from ..config.default_context import ContextBuilder
from ..config.injection_context import InjectionContext
from ..config.ledger import ledger_config
from ..config.logging import LoggingConfigurator
from ..config.wallet import wallet_config, BaseWallet
from ..ledger.error import LedgerConfigError, LedgerTransactionError
from ..messaging.responder import BaseResponder
from ..protocols.connections.v1_0.manager import (
ConnectionManager,
ConnectionManagerError,
)
from ..transport.inbound.manager import InboundTransportManager
from ..transport.inbound.message import InboundMessage
from ..transport.outbound.base import OutboundDeliveryError
from ..transport.outbound.manager import OutboundTransportManager, QueuedOutboundMessage
from ..transport.outbound.message import OutboundMessage
from ..transport.wire_format import BaseWireFormat
from ..utils.task_queue import CompletedTask, TaskQueue
from ..utils.stats import Collector
from .dispatcher import Dispatcher
LOGGER = logging.getLogger(__name__)
class Conductor:
"""
Conductor class.
Class responsible for initializing concrete implementations
of our require interfaces and routing inbound and outbound message data.
"""
def __init__(self, context_builder: ContextBuilder) -> None:
"""
Initialize an instance of Conductor.
Args:
inbound_transports: Configuration for inbound transports
outbound_transports: Configuration for outbound transports
settings: Dictionary of various settings
"""
self.admin_server = None
self.context: InjectionContext = None
self.context_builder = context_builder
self.dispatcher: Dispatcher = None
self.inbound_transport_manager: InboundTransportManager = None
self.outbound_transport_manager: OutboundTransportManager = None
async def setup(self):
"""Initialize the global request context."""
context = await self.context_builder.build()
self.dispatcher = Dispatcher(context)
await self.dispatcher.setup()
wire_format = await context.inject(BaseWireFormat, required=False)
if wire_format and hasattr(wire_format, "task_queue"):
wire_format.task_queue = self.dispatcher.task_queue
# Register all inbound transports
self.inbound_transport_manager = InboundTransportManager(
context, self.inbound_message_router, self.handle_not_returned
)
await self.inbound_transport_manager.setup()
# Register all outbound transports
self.outbound_transport_manager = OutboundTransportManager(
context, self.handle_not_delivered
)
await self.outbound_transport_manager.setup()
# Configure the wallet
public_did = await wallet_config(context)
# Configure the ledger
if not await ledger_config(context, public_did):
LOGGER.warning("No ledger configured")
# Admin API
if context.settings.get("admin.enabled"):
try:
admin_host = context.settings.get("admin.host", "0.0.0.0")
admin_port = context.settings.get("admin.port", "80")
self.admin_server = AdminServer(
admin_host,
admin_port,
context,
self.outbound_message_router,
self.webhook_router,
self.stop,
self.dispatcher.task_queue,
self.get_stats,
)
webhook_urls = context.settings.get("admin.webhook_urls")
if webhook_urls:
for url in webhook_urls:
self.admin_server.add_webhook_target(url)
context.injector.bind_instance(BaseAdminServer, self.admin_server)
if "http" not in self.outbound_transport_manager.registered_schemes:
self.outbound_transport_manager.register("http")
except Exception:
LOGGER.exception("Unable to register admin server")
raise
# Fetch stats collector, if any
collector = await context.inject(Collector, required=False)
if collector:
# add stats to our own methods
collector.wrap(
self,
(
# "inbound_message_router",
"outbound_message_router",
# "create_inbound_session",
),
)
# at the class level (!) should not be performed multiple times
collector.wrap(
ConnectionManager,
(
# "get_connection_targets",
"fetch_did_document",
"find_inbound_connection",
),
)
self.context = context
async def start(self) -> None:
"""Start the agent."""
context = self.context
# Start up transports
try:
await self.inbound_transport_manager.start()
except Exception:
LOGGER.exception("Unable to start inbound transports")
raise
try:
await self.outbound_transport_manager.start()
except Exception:
LOGGER.exception("Unable to start outbound transports")
raise
# Start up Admin server
if self.admin_server:
try:
await self.admin_server.start()
except Exception:
LOGGER.exception("Unable to start administration API")
# Make admin responder available during message parsing
# This allows webhooks to be called when a connection is marked active,
# for example
context.injector.bind_instance(BaseResponder, self.admin_server.responder)
# Get agent label
default_label = context.settings.get("default_label")
# Get public did
wallet: BaseWallet = await context.inject(BaseWallet)
public_did = await wallet.get_public_did()
# Show some details about the configuration to the user
LoggingConfigurator.print_banner(
default_label,
self.inbound_transport_manager.registered_transports,
self.outbound_transport_manager.registered_transports,
public_did.did if public_did else None,
self.admin_server,
)
# Create a static connection for use by the test-suite
if context.settings.get("debug.test_suite_endpoint"):
mgr = ConnectionManager(self.context)
their_endpoint = context.settings["debug.test_suite_endpoint"]
test_conn = await mgr.create_static_connection(
my_seed=hashlib.sha256(b"aries-protocol-test-subject").digest(),
their_seed=hashlib.sha256(b"aries-protocol-test-suite").digest(),
their_endpoint=their_endpoint,
alias="test-suite",
)
print("Created static connection for test suite")
print(" - My DID:", test_conn.my_did)
print(" - Their DID:", test_conn.their_did)
print(" - Their endpoint:", their_endpoint)
print()
# Print an invitation to the terminal
if context.settings.get("debug.print_invitation"):
try:
mgr = ConnectionManager(self.context)
_connection, invitation = await mgr.create_invitation(
my_label=context.settings.get("debug.invite_label"),
multi_use=context.settings.get("debug.invite_multi_use", False),
public=context.settings.get("debug.invite_public", False),
)
base_url = context.settings.get("invite_base_url")
invite_url = invitation.to_url(base_url)
print("Invitation URL:")
print(invite_url, flush=True)
except Exception:
LOGGER.exception("Error creating invitation")
async def stop(self, timeout=1.0):
"""Stop the agent."""
shutdown = TaskQueue()
if self.dispatcher:
shutdown.run(self.dispatcher.complete())
if self.admin_server:
shutdown.run(self.admin_server.stop())
if self.inbound_transport_manager:
shutdown.run(self.inbound_transport_manager.stop())
if self.outbound_transport_manager:
shutdown.run(self.outbound_transport_manager.stop())
await shutdown.complete(timeout)
def inbound_message_router(
self, message: InboundMessage, can_respond: bool = False
):
"""
Route inbound messages.
Args:
message: The inbound message instance
can_respond: If the session supports return routing
"""
if message.receipt.direct_response_requested and not can_respond:
LOGGER.warning(
"Direct response requested, but not supported by transport: %s",
message.transport_type,
)
# Note: at this point we could send the message to a shared queue
# if this pod is too busy to process it
try:
self.dispatcher.queue_message(
message,
self.outbound_message_router,
self.admin_server and self.admin_server.send_webhook,
lambda completed: self.dispatch_complete(message, completed),
)
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
def dispatch_complete(self, message: InboundMessage, completed: CompletedTask):
"""Handle completion of message dispatch."""
if completed.exc_info:
LOGGER.exception(
"Exception in message handler:", exc_info=completed.exc_info
)
if isinstance(completed.exc_info[1], LedgerConfigError) or isinstance(
completed.exc_info[1], LedgerTransactionError
):
LOGGER.error(
"%shutdown on ledger error %s",
"S" if self.admin_server else "No admin server to s",
str(completed.exc_info[1]),
)
if self.admin_server:
self.admin_server.notify_fatal_error()
else:
LOGGER.error(
"DON'T shutdown on %s %s",
completed.exc_info[0].__name__,
str(completed.exc_info[1]),
)
self.inbound_transport_manager.dispatch_complete(message, completed)
async def get_stats(self) -> dict:
"""Get the current stats tracked by the conductor."""
stats = {
"in_sessions": len(self.inbound_transport_manager.sessions),
"out_encode": 0,
"out_deliver": 0,
"task_active": self.dispatcher.task_queue.current_active,
"task_done": self.dispatcher.task_queue.total_done,
"task_failed": self.dispatcher.task_queue.total_failed,
"task_pending": self.dispatcher.task_queue.current_pending,
}
for m in self.outbound_transport_manager.outbound_buffer:
if m.state == QueuedOutboundMessage.STATE_ENCODE:
stats["out_encode"] += 1
if m.state == QueuedOutboundMessage.STATE_DELIVER:
stats["out_deliver"] += 1
return stats
async def outbound_message_router(
self,
context: InjectionContext,
outbound: OutboundMessage,
inbound: InboundMessage = None,
) -> None:
"""
Route an outbound message.
Args:
context: The request context
message: An outbound message to be sent
inbound: The inbound message that produced this response, if available
"""
if not outbound.target and outbound.reply_to_verkey:
if not outbound.reply_from_verkey and inbound:
outbound.reply_from_verkey = inbound.receipt.recipient_verkey
# return message to an inbound session
if self.inbound_transport_manager.return_to_session(outbound):
return
if not outbound.to_session_only:
await self.queue_outbound(context, outbound, inbound)
def handle_not_returned(self, context: InjectionContext, outbound: OutboundMessage):
"""Handle a message that failed delivery via an inbound session."""
try:
self.dispatcher.run_task(self.queue_outbound(context, outbound))
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
async def queue_outbound(
self,
context: InjectionContext,
outbound: OutboundMessage,
inbound: InboundMessage = None,
):
"""
Queue an outbound message.
Args:
context: The request context
message: An outbound message to be sent
inbound: The inbound message that produced this response, if available
"""
# populate connection target(s)
if not outbound.target and not outbound.target_list and outbound.connection_id:
# using provided request context
mgr = ConnectionManager(context)
try:
outbound.target_list = await self.dispatcher.run_task(
mgr.get_connection_targets(connection_id=outbound.connection_id)
)
except ConnectionManagerError:
LOGGER.exception("Error preparing outbound message for transmission")
return
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
try:
self.outbound_transport_manager.enqueue_message(context, outbound)
except OutboundDeliveryError:
LOGGER.warning("Cannot queue message for delivery, no supported transport")
self.handle_not_delivered(context, outbound)
def handle_not_delivered(
self, context: InjectionContext, outbound: OutboundMessage
):
"""Handle a message that failed delivery via outbound transports."""
self.inbound_transport_manager.return_undelivered(outbound)
def webhook_router(
self, topic: str, payload: dict, endpoint: str, max_attempts: int = None
):
"""
Route a webhook through the outbound transport manager.
Args:
topic: The webhook topic
payload: The webhook payload
endpoint: The endpoint of the webhook target
max_attempts: The maximum number of attempts
"""
try:
self.outbound_transport_manager.enqueue_webhook(
topic, payload, endpoint, max_attempts
)
except OutboundDeliveryError:
LOGGER.warning(
"Cannot queue message webhook for delivery, no supported transport"
)
| 39.00978
| 88
| 0.621561
|
5f7a60bba8beb66729f954e9ca2bdbe61a01003b
| 22,107
|
py
|
Python
|
tools/scons/scons-local-2.2.0/SCons/Executor.py
|
tristanpenman/gameutils
|
d4695922c6a8ba201a67fc8e5319da53e0d25323
|
[
"BSD-2-Clause"
] | null | null | null |
tools/scons/scons-local-2.2.0/SCons/Executor.py
|
tristanpenman/gameutils
|
d4695922c6a8ba201a67fc8e5319da53e0d25323
|
[
"BSD-2-Clause"
] | null | null | null |
tools/scons/scons-local-2.2.0/SCons/Executor.py
|
tristanpenman/gameutils
|
d4695922c6a8ba201a67fc8e5319da53e0d25323
|
[
"BSD-2-Clause"
] | null | null | null |
"""SCons.Executor
A module for executing actions with specific lists of target and source
Nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Executor.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import collections
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
class Batch(object):
"""Remembers exact association between targets
and sources of executor."""
def __init__(self, targets=[], sources=[]):
self.targets = targets
self.sources = sources
class TSList(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by wrapping
an executor Method. This class is used in the Executor.lvars()
to delay creation of NodeList objects until they're needed.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, func):
self.func = func
def __getattr__(self, attr):
nl = self.func()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.func()
return nl[i]
def __getslice__(self, i, j):
nl = self.func()
i = max(i, 0); j = max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.func()
return str(nl)
def __repr__(self):
nl = self.func()
return repr(nl)
class TSObject(object):
"""A class that implements $TARGET or $SOURCE expansions by wrapping
an Executor method.
"""
def __init__(self, func):
self.func = func
def __getattr__(self, attr):
n = self.func()
return getattr(n, attr)
def __str__(self):
n = self.func()
if n:
return str(n)
return ''
def __repr__(self):
n = self.func()
if n:
return repr(n)
return ''
def rfile(node):
"""
A function to return the results of a Node's rfile() method,
if it exists, and the Node itself otherwise (if it's a Value
Node, e.g.).
"""
try:
rfile = node.rfile
except AttributeError:
return node
else:
return rfile()
class Executor(object):
"""A class for controlling instances of executing an action.
This largely exists to hold a single association of an action,
environment, list of environment override dictionaries, targets
and sources for later processing as needed.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action, env=None, overridelist=[{}],
targets=[], sources=[], builder_kw={}):
if __debug__: logInstanceCreation(self, 'Executor.Executor')
self.set_action_list(action)
self.pre_actions = []
self.post_actions = []
self.env = env
self.overridelist = overridelist
if targets or sources:
self.batches = [Batch(targets[:], sources[:])]
else:
self.batches = []
self.builder_kw = builder_kw
self._memo = {}
def get_lvars(self):
try:
return self.lvars
except AttributeError:
self.lvars = {
'CHANGED_SOURCES' : TSList(self._get_changed_sources),
'CHANGED_TARGETS' : TSList(self._get_changed_targets),
'SOURCE' : TSObject(self._get_source),
'SOURCES' : TSList(self._get_sources),
'TARGET' : TSObject(self._get_target),
'TARGETS' : TSList(self._get_targets),
'UNCHANGED_SOURCES' : TSList(self._get_unchanged_sources),
'UNCHANGED_TARGETS' : TSList(self._get_unchanged_targets),
}
return self.lvars
def _get_changes(self):
cs = []
ct = []
us = []
ut = []
for b in self.batches:
if b.targets[0].is_up_to_date():
us.extend(list(map(rfile, b.sources)))
ut.extend(b.targets)
else:
cs.extend(list(map(rfile, b.sources)))
ct.extend(b.targets)
self._changed_sources_list = SCons.Util.NodeList(cs)
self._changed_targets_list = SCons.Util.NodeList(ct)
self._unchanged_sources_list = SCons.Util.NodeList(us)
self._unchanged_targets_list = SCons.Util.NodeList(ut)
def _get_changed_sources(self, *args, **kw):
try:
return self._changed_sources_list
except AttributeError:
self._get_changes()
return self._changed_sources_list
def _get_changed_targets(self, *args, **kw):
try:
return self._changed_targets_list
except AttributeError:
self._get_changes()
return self._changed_targets_list
def _get_source(self, *args, **kw):
#return SCons.Util.NodeList([rfile(self.batches[0].sources[0]).get_subst_proxy()])
return rfile(self.batches[0].sources[0]).get_subst_proxy()
def _get_sources(self, *args, **kw):
return SCons.Util.NodeList([rfile(n).get_subst_proxy() for n in self.get_all_sources()])
def _get_target(self, *args, **kw):
#return SCons.Util.NodeList([self.batches[0].targets[0].get_subst_proxy()])
return self.batches[0].targets[0].get_subst_proxy()
def _get_targets(self, *args, **kw):
return SCons.Util.NodeList([n.get_subst_proxy() for n in self.get_all_targets()])
def _get_unchanged_sources(self, *args, **kw):
try:
return self._unchanged_sources_list
except AttributeError:
self._get_changes()
return self._unchanged_sources_list
def _get_unchanged_targets(self, *args, **kw):
try:
return self._unchanged_targets_list
except AttributeError:
self._get_changes()
return self._unchanged_targets_list
def get_action_targets(self):
if not self.action_list:
return []
targets_string = self.action_list[0].get_targets(self.env, self)
if targets_string[0] == '$':
targets_string = targets_string[1:]
return self.get_lvars()[targets_string]
def set_action_list(self, action):
import SCons.Util
if not SCons.Util.is_List(action):
if not action:
import SCons.Errors
raise SCons.Errors.UserError("Executor must have an action.")
action = [action]
self.action_list = action
def get_action_list(self):
return self.pre_actions + self.action_list + self.post_actions
def get_all_targets(self):
"""Returns all targets for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.targets)
return result
def get_all_sources(self):
"""Returns all sources for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.sources)
return result
def get_all_children(self):
"""Returns all unique children (dependencies) for all batches
of this Executor.
The Taskmaster can recognize when it's already evaluated a
Node, so we don't have to make this list unique for its intended
canonical use case, but we expect there to be a lot of redundancy
(long lists of batched .cc files #including the same .h files
over and over), so removing the duplicates once up front should
save the Taskmaster a lot of work.
"""
result = SCons.Util.UniqueList([])
for target in self.get_all_targets():
result.extend(target.children())
return result
def get_all_prerequisites(self):
"""Returns all unique (order-only) prerequisites for all batches
of this Executor.
"""
result = SCons.Util.UniqueList([])
for target in self.get_all_targets():
result.extend(target.prerequisites)
return result
def get_action_side_effects(self):
"""Returns all side effects for all batches of this
Executor used by the underlying Action.
"""
result = SCons.Util.UniqueList([])
for target in self.get_action_targets():
result.extend(target.side_effects)
return result
memoizer_counters.append(SCons.Memoize.CountValue('get_build_env'))
def get_build_env(self):
"""Fetch or create the appropriate build Environment
for this Executor.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
# Create the build environment instance with appropriate
# overrides. These get evaluated against the current
# environment's construction variables so that users can
# add to existing values by referencing the variable in
# the expansion.
overrides = {}
for odict in self.overridelist:
overrides.update(odict)
import SCons.Defaults
env = self.env or SCons.Defaults.DefaultEnvironment()
build_env = env.Override(overrides)
self._memo['get_build_env'] = build_env
return build_env
def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets and sources.
"""
env = self.get_build_env()
try:
cwd = self.batches[0].targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd,
self.get_all_targets(),
self.get_all_sources())
def get_kw(self, kw={}):
result = self.builder_kw.copy()
result.update(kw)
result['executor'] = self
return result
def do_nothing(self, target, kw):
return 0
def do_execute(self, target, kw):
"""Actually execute the action list."""
env = self.get_build_env()
kw = self.get_kw(kw)
status = 0
for act in self.get_action_list():
#args = (self.get_all_targets(), self.get_all_sources(), env)
args = ([], [], env)
status = act(*args, **kw)
if isinstance(status, SCons.Errors.BuildError):
status.executor = self
raise status
elif status:
msg = "Error %s" % status
raise SCons.Errors.BuildError(
errstr=msg,
node=self.batches[0].targets,
executor=self,
action=act)
return status
# use extra indirection because with new-style objects (Python 2.2
# and above) we can't override special methods, and nullify() needs
# to be able to do this.
def __call__(self, target, **kw):
return self.do_execute(target, kw)
def cleanup(self):
self._memo = {}
def add_sources(self, sources):
"""Add source files to this Executor's list. This is necessary
for "multi" Builders that can be called repeatedly to build up
a source file list for a given target."""
# TODO(batch): extend to multiple batches
assert (len(self.batches) == 1)
# TODO(batch): remove duplicates?
sources = [x for x in sources if x not in self.batches[0].sources]
self.batches[0].sources.extend(sources)
def get_sources(self):
return self.batches[0].sources
def add_batch(self, targets, sources):
"""Add pair of associated target and source to this Executor's list.
This is necessary for "batch" Builders that can be called repeatedly
to build up a list of matching target and source files that will be
used in order to update multiple target files at once from multiple
corresponding source files, for tools like MSVC that support it."""
self.batches.append(Batch(targets, sources))
def prepare(self):
"""
Preparatory checks for whether this Executor can go ahead
and (try to) build its targets.
"""
for s in self.get_all_sources():
if s.missing():
msg = "Source `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (s, self.batches[0].targets[0]))
def add_pre_action(self, action):
self.pre_actions.append(action)
def add_post_action(self, action):
self.post_actions.append(action)
# another extra indirection for new-style objects and nullify...
def my_str(self):
env = self.get_build_env()
return "\n".join([action.genstring(self.get_all_targets(),
self.get_all_sources(),
env)
for action in self.get_action_list()])
def __str__(self):
return self.my_str()
def nullify(self):
self.cleanup()
self.do_execute = self.do_nothing
self.my_str = lambda: ''
memoizer_counters.append(SCons.Memoize.CountValue('get_contents'))
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
result = "".join([action.get_contents(self.get_all_targets(),
self.get_all_sources(),
env)
for action in self.get_action_list()])
self._memo['get_contents'] = result
return result
def get_timestamp(self):
"""Fetch a time stamp for this Executor. We don't have one, of
course (only files do), but this is the interface used by the
timestamp module.
"""
return 0
def scan_targets(self, scanner):
# TODO(batch): scan by batches
self.scan(scanner, self.get_all_targets())
def scan_sources(self, scanner):
# TODO(batch): scan by batches
if self.batches[0].sources:
self.scan(scanner, self.get_all_sources())
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
# TODO(batch): scan by batches)
deps = []
if scanner:
for node in node_list:
node.disambiguate()
s = scanner.select(node)
if not s:
continue
path = self.get_build_scanner_path(s)
deps.extend(node.get_implicit_deps(env, s, path))
else:
kw = self.get_kw()
for node in node_list:
node.disambiguate()
scanner = node.get_env_scanner(env, kw)
if not scanner:
continue
scanner = scanner.select(node)
if not scanner:
continue
path = self.get_build_scanner_path(scanner)
deps.extend(node.get_implicit_deps(env, scanner, path))
deps.extend(self.get_implicit_deps())
for tgt in self.get_all_targets():
tgt.add_to_implicit(deps)
def _get_unignored_sources_key(self, node, ignore=()):
return (node,) + tuple(ignore)
memoizer_counters.append(SCons.Memoize.CountDict('get_unignored_sources', _get_unignored_sources_key))
def get_unignored_sources(self, node, ignore=()):
key = (node,) + tuple(ignore)
try:
memo_dict = self._memo['get_unignored_sources']
except KeyError:
memo_dict = {}
self._memo['get_unignored_sources'] = memo_dict
else:
try:
return memo_dict[key]
except KeyError:
pass
if node:
# TODO: better way to do this (it's a linear search,
# but it may not be critical path)?
sourcelist = []
for b in self.batches:
if node in b.targets:
sourcelist = b.sources
break
else:
sourcelist = self.get_all_sources()
if ignore:
idict = {}
for i in ignore:
idict[i] = 1
sourcelist = [s for s in sourcelist if s not in idict]
memo_dict[key] = sourcelist
return sourcelist
def get_implicit_deps(self):
"""Return the executor's implicit dependencies, i.e. the nodes of
the commands to be executed."""
result = []
build_env = self.get_build_env()
for act in self.get_action_list():
deps = act.get_implicit_deps(self.get_all_targets(),
self.get_all_sources(),
build_env)
result.extend(deps)
return result
_batch_executors = {}
def GetBatchExecutor(key):
return _batch_executors[key]
def AddBatchExecutor(key, executor):
assert key not in _batch_executors
_batch_executors[key] = executor
nullenv = None
def get_NullEnvironment():
"""Use singleton pattern for Null Environments."""
global nullenv
import SCons.Util
class NullEnvironment(SCons.Util.Null):
import SCons.CacheDir
_CacheDir_path = None
_CacheDir = SCons.CacheDir.CacheDir(None)
def get_CacheDir(self):
return self._CacheDir
if not nullenv:
nullenv = NullEnvironment()
return nullenv
class Null(object):
"""A null Executor, with a null build Environment, that does
nothing when the rest of the methods call it.
This might be able to disapper when we refactor things to
disassociate Builders from Nodes entirely, so we're not
going to worry about unit tests for this--at least for now.
"""
def __init__(self, *args, **kw):
if __debug__: logInstanceCreation(self, 'Executor.Null')
self.batches = [Batch(kw['targets'][:], [])]
def get_build_env(self):
return get_NullEnvironment()
def get_build_scanner_path(self):
return None
def cleanup(self):
pass
def prepare(self):
pass
def get_unignored_sources(self, *args, **kw):
return tuple(())
def get_action_targets(self):
return []
def get_action_list(self):
return []
def get_all_targets(self):
return self.batches[0].targets
def get_all_sources(self):
return self.batches[0].targets[0].sources
def get_all_children(self):
return self.batches[0].targets[0].children()
def get_all_prerequisites(self):
return []
def get_action_side_effects(self):
return []
def __call__(self, *args, **kw):
return 0
def get_contents(self):
return ''
def _morph(self):
"""Morph this Null executor to a real Executor object."""
batches = self.batches
self.__class__ = Executor
self.__init__([])
self.batches = batches
# The following methods require morphing this Null Executor to a
# real Executor object.
def add_pre_action(self, action):
self._morph()
self.add_pre_action(action)
def add_post_action(self, action):
self._morph()
self.add_post_action(action)
def set_action_list(self, action):
self._morph()
self.set_action_list(action)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.869085
| 108
| 0.59013
|
87b28dd04578ced725b110bb1e359f6a39bde056
| 7,246
|
py
|
Python
|
apps/ticket/migrations/0001_initial.py
|
x315904752/loonflow
|
c65151916c6f5914e8edf50ae8350df53860c640
|
[
"MIT"
] | null | null | null |
apps/ticket/migrations/0001_initial.py
|
x315904752/loonflow
|
c65151916c6f5914e8edf50ae8350df53860c640
|
[
"MIT"
] | null | null | null |
apps/ticket/migrations/0001_initial.py
|
x315904752/loonflow
|
c65151916c6f5914e8edf50ae8350df53860c640
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2020-02-22 09:37
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TicketCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=50, verbose_name='创建人')),
('gmt_created', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('gmt_modified', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_deleted', models.BooleanField(default=False, verbose_name='已删除')),
('name', models.CharField(max_length=50, verbose_name='字段名')),
('field_key', models.CharField(max_length=50, verbose_name='字段标识')),
('ticket_id', models.IntegerField(verbose_name='工单id')),
('field_type_id', models.IntegerField(help_text='见service.constant_service中定义', verbose_name='字段类型')),
('char_value', models.CharField(blank=True, default='', max_length=1000, verbose_name='字符串值')),
('int_value', models.IntegerField(blank=True, default=0, verbose_name='整形值')),
('float_value', models.FloatField(blank=True, default=0.0, verbose_name='浮点值')),
('bool_value', models.BooleanField(default=False, verbose_name='布尔值')),
('date_value', models.DateField(blank=True, default=datetime.datetime(1, 1, 1, 0, 0), verbose_name='日期值')),
('datetime_value', models.DateTimeField(blank=True, default=datetime.datetime(1, 1, 1, 0, 0), verbose_name='日期时间值')),
('time_value', models.TimeField(blank=True, default=datetime.datetime(1900, 1, 1, 0, 0, 1), verbose_name='时间值')),
('radio_value', models.CharField(blank=True, default='', max_length=50, verbose_name='radio值')),
('checkbox_value', models.CharField(blank=True, default='', help_text='逗号隔开多个选项', max_length=50, verbose_name='checkbox值')),
('select_value', models.CharField(blank=True, default='', max_length=50, verbose_name='下拉列表值')),
('multi_select_value', models.CharField(blank=True, default='', help_text='逗号隔开多个选项', max_length=50, verbose_name='多选下拉列表值')),
('text_value', models.TextField(blank=True, default='', verbose_name='文本值')),
('username_value', models.CharField(blank=True, default='', max_length=50, verbose_name='用户名')),
('multi_username_value', models.CharField(blank=True, default='', max_length=1000, verbose_name='多选用户名')),
],
options={
'verbose_name': '工单自定义字段',
'verbose_name_plural': '工单自定义字段',
},
),
migrations.CreateModel(
name='TicketFlowLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=50, verbose_name='创建人')),
('gmt_created', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('gmt_modified', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_deleted', models.BooleanField(default=False, verbose_name='已删除')),
('ticket_id', models.IntegerField(verbose_name='工单id')),
('transition_id', models.IntegerField(help_text='与worklow.Transition关联, 为0时表示认为干预的操作', verbose_name='流转id')),
('suggestion', models.CharField(blank=True, default='', max_length=1000, verbose_name='处理意见')),
('participant_type_id', models.IntegerField(help_text='见service.constant_service中定义', verbose_name='处理人类型')),
('participant', models.CharField(blank=True, default='', max_length=50, verbose_name='处理人')),
('state_id', models.IntegerField(blank=True, default=0, verbose_name='当前状态id')),
('intervene_type_id', models.IntegerField(default=0, help_text='0.非人为干预的流转,1.转交操作 2.加签操作 3.加签处理完成', verbose_name='干预类型')),
('ticket_data', models.CharField(blank=True, default='', help_text='可以用于记录当前表单数据,json格式', max_length=10000, verbose_name='工单数据')),
],
options={
'verbose_name': '工单流转日志',
'verbose_name_plural': '工单流转日志',
},
),
migrations.CreateModel(
name='TicketRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=50, verbose_name='创建人')),
('gmt_created', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('gmt_modified', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_deleted', models.BooleanField(default=False, verbose_name='已删除')),
('title', models.CharField(blank=True, default='', help_text='工单的标题', max_length=500, verbose_name='标题')),
('workflow_id', models.IntegerField(help_text='与workflow.Workflow流程关联', verbose_name='关联的流程id')),
('sn', models.CharField(help_text='工单的流水号', max_length=25, verbose_name='流水号')),
('state_id', models.IntegerField(help_text='与workflow.State关联', verbose_name='当前状态')),
('parent_ticket_id', models.IntegerField(default=0, help_text='与ticket.TicketRecord关联', verbose_name='父工单id')),
('parent_ticket_state_id', models.IntegerField(default=0, help_text='与workflow.State关联,子工单是关联到父工单的某个状态下的', verbose_name='对应父工单状态id')),
('participant_type_id', models.IntegerField(default=0, help_text='0.无处理人,1.个人,2.多人,3.部门,4.角色', verbose_name='当前处理人类型')),
('participant', models.CharField(blank=True, default='', help_text='可以为空(无处理人的情况,如结束状态)、username\\多个username(以,隔开)\\部门id\\角色id\\脚本文件名等', max_length=100, verbose_name='当前处理人')),
('relation', models.CharField(blank=True, default='', help_text='工单流转过程中将保存所有相关的人(包括创建人、曾经的待处理人),用于查询', max_length=1000, verbose_name='工单关联人')),
('in_add_node', models.BooleanField(default=False, help_text='是否处于加签状态下', verbose_name='加签状态中')),
('add_node_man', models.CharField(blank=True, default='', help_text='加签操作的人,工单当前处理人处理完成后会回到该处理人,当处于加签状态下才有效', max_length=50, verbose_name='加签人')),
('script_run_last_result', models.BooleanField(default=True, verbose_name='脚本最后一次执行结果')),
('is_end', models.BooleanField(default=False, help_text='工单是否已处于结束状态', verbose_name='已结束')),
('is_rejected', models.BooleanField(default=False, help_text='工单是否处于被拒绝状态', verbose_name='被拒绝')),
('multi_all_person', models.CharField(blank=True, default='{}', help_text='需要当前状态处理人全部处理时实际的处理结果,json格式', max_length=1000, verbose_name='全部处理的结果')),
],
options={
'verbose_name': '工单记录',
'verbose_name_plural': '工单记录',
},
),
]
| 73.191919
| 192
| 0.628209
|
4956d1589cc4535a73bcf5fbdc1448a03757d661
| 3,388
|
py
|
Python
|
src/pynn/bin/decode_ctc.py
|
enesyugan/yapay-nn
|
b6e0740e7c1ae829abd7cd1bd447a172291538b1
|
[
"Apache-2.0"
] | null | null | null |
src/pynn/bin/decode_ctc.py
|
enesyugan/yapay-nn
|
b6e0740e7c1ae829abd7cd1bd447a172291538b1
|
[
"Apache-2.0"
] | null | null | null |
src/pynn/bin/decode_ctc.py
|
enesyugan/yapay-nn
|
b6e0740e7c1ae829abd7cd1bd447a172291538b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Thai-Son Nguyen
# Licensed under the Apache License, Version 2.0 (the "License")
import time
import argparse
import torch
from pynn.util import load_object
from pynn.decoder.ctc import beam_search
from pynn.util.text import load_dict, write_hypo
from pynn.io.audio_seq import SpectroDataset
parser = argparse.ArgumentParser(description='pynn')
parser.add_argument('--model-dic', help='model dictionary', required=True)
parser.add_argument('--lm-dic', help='language model dictionary', default=None)
parser.add_argument('--lm-scale', help='language model scale', type=float, default=0.5)
parser.add_argument('--dict', help='dictionary file', default=None)
parser.add_argument('--word-dict', help='word dictionary file', default=None)
parser.add_argument('--data-scp', help='path to data scp', required=True)
parser.add_argument('--downsample', help='concated frames', type=int, default=1)
parser.add_argument('--mean-sub', help='mean subtraction', action='store_true')
parser.add_argument('--batch-size', help='batch size', type=int, default=32)
parser.add_argument('--blank', help='blank', type=int, default=0)
parser.add_argument('--blank-scale', help='blank scaling', type=float, default=1.0)
parser.add_argument('--beam-size', help='beam size', type=int, default=10)
parser.add_argument('--pruning', help='pruning size', type=float, default=1.5)
parser.add_argument('--fp16', help='float 16 bits', action='store_true')
parser.add_argument('--output', help='output file', type=str, default='hypos/H_1_LV.ctm')
parser.add_argument('--format', help='output format', type=str, default='ctm')
parser.add_argument('--space', help='space token', type=str, default='<space>')
if __name__ == '__main__':
args = parser.parse_args()
dic, word_dic = load_dict(args.dict, args.word_dict)
use_gpu = torch.cuda.is_available()
device = torch.device('cuda' if use_gpu else 'cpu')
mdic = torch.load(args.model_dic)
model = load_object(mdic['class'], mdic['module'], mdic['params'])
model = model.to(device)
model.load_state_dict(mdic['state'])
model.eval()
if args.fp16: model.half()
lm = None
if args.lm_dic is not None:
mdic = torch.load(args.lm_dic)
lm = load_object(mdic['class'], mdic['module'], mdic['params'])
lm = lm.to(device)
lm.load_state_dict(mdic['state'])
lm.eval()
if args.fp16: lm.half()
reader = SpectroDataset(args.data_scp, mean_sub=args.mean_sub, fp16=args.fp16,
sort_src=True, sek=False, downsample=args.downsample)
since = time.time()
fout = open(args.output, 'w')
with torch.no_grad():
while True:
seqs, masks, utts = reader.read_batch_utt(args.batch_size)
if not utts: break
seqs, masks = seqs.to(device), masks.to(device)
hypos = beam_search(model, seqs, masks, device, lm, args.lm_scale,
args.beam_size, args.pruning, args.blank, args.blank_scale)
hypos = [[el+2-args.blank for el in hypo] + [2] for hypo in hypos]
write_hypo(hypos, None, fout, utts, dic, word_dic, args.space, args.format)
fout.close()
time_elapsed = time.time() - since
print(" Elapsed Time: %.0fm %.0fs" % (time_elapsed // 60, time_elapsed % 60))
| 42.886076
| 91
| 0.674439
|
97f69381dc5f71b32069099a5c9932fdf36f9897
| 264
|
py
|
Python
|
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
"""定义pizzerias的url模式"""
from . import views
from django.conf.urls import url
urlpatterns = [
#主页
url(r'^$',views.index,name = 'index'),
#显示所有主题
url(r'^pizzas/$',views.pizzas,name = 'pizzas'),
url(r'^pizzas/(?P<pizza_id>\d+)/$',views.pizza,name = 'pizza'),
]
| 22
| 64
| 0.647727
|
4954c1c5fbd9ec1404a785e2e866c6452534ef62
| 41,399
|
py
|
Python
|
monai/apps/deepgrow/transforms.py
|
tatuanb/monai_V1
|
41e492b61c78bb3c303f38b03fe9fdc74a3c2e96
|
[
"Apache-2.0"
] | 1
|
2020-11-13T23:13:23.000Z
|
2020-11-13T23:13:23.000Z
|
monai/apps/deepgrow/transforms.py
|
catherine1996cn/MONAI
|
ff9bbfa82763de46cbac75553e340633e3d84ecb
|
[
"Apache-2.0"
] | 2
|
2020-11-13T23:15:00.000Z
|
2020-11-16T14:54:08.000Z
|
monai/apps/deepgrow/transforms.py
|
catherine1996cn/MONAI
|
ff9bbfa82763de46cbac75553e340633e3d84ecb
|
[
"Apache-2.0"
] | 1
|
2021-11-18T22:37:40.000Z
|
2021-11-18T22:37:40.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Callable, Dict, Hashable, List, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import IndexSelection, KeysCollection
from monai.networks.layers import GaussianFilter
from monai.transforms import Resize, SpatialCrop
from monai.transforms.transform import MapTransform, Randomizable, Transform
from monai.transforms.utils import generate_spatial_bounding_box, is_positive
from monai.utils import InterpolateMode, deprecated_arg, ensure_tuple, ensure_tuple_rep, min_version, optional_import
measure, _ = optional_import("skimage.measure", "0.14.2", min_version)
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
# Transforms to support Training for Deepgrow models
class FindAllValidSlicesd(Transform):
"""
Find/List all valid slices in the label.
Label is assumed to be a 4D Volume with shape CDHW, where C=1.
Args:
label: key to the label source.
sids: key to store slices indices having valid label map.
"""
def __init__(self, label: str = "label", sids: str = "sids"):
self.label = label
self.sids = sids
def _apply(self, label):
sids = []
for sid in range(label.shape[1]): # Assume channel is first
if np.sum(label[0][sid]) != 0:
sids.append(sid)
return np.asarray(sids)
def __call__(self, data):
d: Dict = dict(data)
label = d[self.label]
if label.shape[0] != 1:
raise ValueError("Only supports single channel labels!")
if len(label.shape) != 4: # only for 3D
raise ValueError("Only supports label with shape CDHW!")
sids = self._apply(label)
if sids is not None and len(sids):
d[self.sids] = sids
return d
class AddInitialSeedPointd(Randomizable, Transform):
"""
Add random guidance as initial seed point for a given label.
Note that the label is of size (C, D, H, W) or (C, H, W)
The guidance is of size (2, N, # of dims) where N is number of guidance added.
# of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)
Args:
label: label source.
guidance: key to store guidance.
sids: key that represents list of valid slice indices for the given label.
sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.
connected_regions: maximum connected regions to use for adding initial points.
"""
def __init__(
self,
label: str = "label",
guidance: str = "guidance",
sids: str = "sids",
sid: str = "sid",
connected_regions: int = 5,
):
self.label = label
self.sids_key = sids
self.sid_key = sid
self.sid = None
self.guidance = guidance
self.connected_regions = connected_regions
def randomize(self, data):
sid = data.get(self.sid_key, None)
sids = data.get(self.sids_key, None)
if sids is not None:
if sid is None or sid not in sids:
sid = self.R.choice(sids, replace=False)
else:
sid = None
self.sid = sid
def _apply(self, label, sid):
dimensions = 3 if len(label.shape) > 3 else 2
default_guidance = [-1] * (dimensions + 1)
dims = dimensions
if sid is not None and dimensions == 3:
dims = 2
label = label[0][sid][np.newaxis] # Assume channel is first
label = (label > 0.5).astype(np.float32)
blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label
if np.max(blobs_labels) <= 0:
raise AssertionError("Not a valid Label")
pos_guidance = []
for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):
if dims == 2:
label = (blobs_labels == ridx).astype(np.float32)
if np.sum(label) == 0:
pos_guidance.append(default_guidance)
continue
distance = distance_transform_cdt(label).flatten()
probability = np.exp(distance) - 1.0
idx = np.where(label.flatten() > 0)[0]
seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
dst = distance[seed]
g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
g[0] = dst[0] # for debug
if dimensions == 2 or dims == 3:
pos_guidance.append(g)
else:
pos_guidance.append([g[0], sid, g[-2], g[-1]])
return np.asarray([pos_guidance, [default_guidance] * len(pos_guidance)])
def __call__(self, data):
d = dict(data)
self.randomize(data)
d[self.guidance] = json.dumps(self._apply(d[self.label], self.sid).astype(int, copy=False).tolist())
return d
class AddGuidanceSignald(Transform):
"""
Add Guidance signal for input image.
Based on the "guidance" points, apply gaussian to them and add them as new channel for input image.
Args:
image: key to the image source.
guidance: key to store guidance.
sigma: standard deviation for Gaussian kernel.
number_intensity_ch: channel index.
"""
def __init__(self, image: str = "image", guidance: str = "guidance", sigma: int = 2, number_intensity_ch: int = 1):
self.image = image
self.guidance = guidance
self.sigma = sigma
self.number_intensity_ch = number_intensity_ch
def _get_signal(self, image, guidance):
dimensions = 3 if len(image.shape) > 3 else 2
guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
if dimensions == 3:
signal = np.zeros((len(guidance), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
else:
signal = np.zeros((len(guidance), image.shape[-2], image.shape[-1]), dtype=np.float32)
sshape = signal.shape
for i, g_i in enumerate(guidance):
for point in g_i:
if np.any(np.asarray(point) < 0):
continue
if dimensions == 3:
p1 = max(0, min(int(point[-3]), sshape[-3] - 1))
p2 = max(0, min(int(point[-2]), sshape[-2] - 1))
p3 = max(0, min(int(point[-1]), sshape[-1] - 1))
signal[i, p1, p2, p3] = 1.0
else:
p1 = max(0, min(int(point[-2]), sshape[-2] - 1))
p2 = max(0, min(int(point[-1]), sshape[-1] - 1))
signal[i, p1, p2] = 1.0
if np.max(signal[i]) > 0:
signal_tensor = torch.tensor(signal[i])
pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma)
signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0))
signal_tensor = signal_tensor.squeeze(0).squeeze(0)
signal[i] = signal_tensor.detach().cpu().numpy()
signal[i] = (signal[i] - np.min(signal[i])) / (np.max(signal[i]) - np.min(signal[i]))
return signal
def _apply(self, image, guidance):
signal = self._get_signal(image, guidance)
image = image[0 : 0 + self.number_intensity_ch, ...]
return np.concatenate([image, signal], axis=0)
def __call__(self, data):
d = dict(data)
image = d[self.image]
guidance = d[self.guidance]
d[self.image] = self._apply(image, guidance)
return d
class FindDiscrepancyRegionsd(Transform):
"""
Find discrepancy between prediction and actual during click interactions during training.
Args:
label: key to label source.
pred: key to prediction source.
discrepancy: key to store discrepancies found between label and prediction.
"""
def __init__(self, label: str = "label", pred: str = "pred", discrepancy: str = "discrepancy"):
self.label = label
self.pred = pred
self.discrepancy = discrepancy
@staticmethod
def disparity(label, pred):
label = (label > 0.5).astype(np.float32)
pred = (pred > 0.5).astype(np.float32)
disparity = label - pred
pos_disparity = (disparity > 0).astype(np.float32)
neg_disparity = (disparity < 0).astype(np.float32)
return [pos_disparity, neg_disparity]
def _apply(self, label, pred):
return self.disparity(label, pred)
def __call__(self, data):
d = dict(data)
label = d[self.label]
pred = d[self.pred]
d[self.discrepancy] = self._apply(label, pred)
return d
class AddRandomGuidanced(Randomizable, Transform):
"""
Add random guidance based on discrepancies that were found between label and prediction.
input shape is as below:
Guidance is of shape (2, N, # of dim)
Discrepancy is of shape (2, C, D, H, W) or (2, C, H, W)
Probability is of shape (1)
Args:
guidance: key to guidance source.
discrepancy: key that represents discrepancies found between label and prediction.
probability: key that represents click/interaction probability.
"""
def __init__(self, guidance: str = "guidance", discrepancy: str = "discrepancy", probability: str = "probability"):
self.guidance = guidance
self.discrepancy = discrepancy
self.probability = probability
self._will_interact = None
def randomize(self, data=None):
probability = data[self.probability]
self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])
def find_guidance(self, discrepancy):
distance = distance_transform_cdt(discrepancy).flatten()
probability = np.exp(distance) - 1.0
idx = np.where(discrepancy.flatten() > 0)[0]
if np.sum(discrepancy > 0) > 0:
seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
dst = distance[seed]
g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]
g[0] = dst[0]
return g
return None
def add_guidance(self, discrepancy, will_interact):
if not will_interact:
return None, None
pos_discr = discrepancy[0]
neg_discr = discrepancy[1]
can_be_positive = np.sum(pos_discr) > 0
can_be_negative = np.sum(neg_discr) > 0
correct_pos = np.sum(pos_discr) >= np.sum(neg_discr)
if correct_pos and can_be_positive:
return self.find_guidance(pos_discr), None
if not correct_pos and can_be_negative:
return None, self.find_guidance(neg_discr)
return None, None
def _apply(self, guidance, discrepancy):
guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
pos, neg = self.add_guidance(discrepancy, self._will_interact)
if pos:
guidance[0].append(pos)
guidance[1].append([-1] * len(pos))
if neg:
guidance[0].append([-1] * len(neg))
guidance[1].append(neg)
return json.dumps(np.asarray(guidance, dtype=int).tolist())
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
discrepancy = d[self.discrepancy]
self.randomize(data)
d[self.guidance] = self._apply(guidance, discrepancy)
return d
class SpatialCropForegroundd(MapTransform):
"""
Crop only the foreground object of the expected images.
Difference VS :py:class:`monai.transforms.CropForegroundd`:
1. If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
object using box's center and spatial_size.
2. This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
in data[{key}_{meta_key_postfix}]
The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
The valid part can be determined by any field in the data with `source_key`, for example:
- Select values > 0 in image field as the foreground and crop on all fields specified by `keys`.
- Select label = 3 in label field as the foreground to crop on all fields specified by `keys`.
- Select label > 0 in the third channel of a One-Hot label field as the foreground to crop all `keys` fields.
Users can define arbitrary function to select expected foreground from the whole source image or specified
channels. And it can also add margin to every dim of the bounding box of foreground object.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.MapTransform`
source_key: data source to generate the bounding box of foreground, can be image or label, etc.
spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `{key}_{meta_key_postfix}` to to fetch/store the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
original_shape_key: key to record original shape for foreground.
cropped_shape_key: key to record cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
source_key: str,
spatial_size: Union[Sequence[int], np.ndarray],
select_fn: Callable = is_positive,
channel_indices: Optional[IndexSelection] = None,
margin: int = 0,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix="meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.source_key = source_key
self.spatial_size = list(spatial_size)
self.select_fn = select_fn
self.channel_indices = channel_indices
self.margin = margin
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
box_start, box_end = generate_spatial_bounding_box(
d[self.source_key], self.select_fn, self.channel_indices, self.margin
)
center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))
current_size = list(np.subtract(box_end, box_start).astype(int, copy=False))
if np.all(np.less(current_size, self.spatial_size)):
cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size)
box_start = np.array([s.start for s in cropper.slices])
box_end = np.array([s.stop for s in cropper.slices])
else:
cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
meta_key = meta_key or f"{key}_{meta_key_postfix}"
d[meta_key][self.start_coord_key] = box_start
d[meta_key][self.end_coord_key] = box_end
d[meta_key][self.original_shape_key] = d[key].shape
image = cropper(d[key])
d[meta_key][self.cropped_shape_key] = image.shape
d[key] = image
return d
# Transforms to support Inference for Deepgrow models
class AddGuidanceFromPointsd(Transform):
"""
Add guidance based on user clicks.
We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally.
Clicks always specify the coordinates in (H, W, D)
If depth_first is True:
Input is now of shape (D, H, W), will return guidance that specifies the coordinates in (D, H, W)
else:
Input is now of shape (H, W, D), will return guidance that specifies the coordinates in (H, W, D)
Args:
ref_image: key to reference image to fetch current and original image details.
guidance: output key to store guidance.
foreground: key that represents user foreground (+ve) clicks.
background: key that represents user background (-ve) clicks.
axis: axis that represents slices in 3D volume. (axis to Depth)
depth_first: if depth (slices) is positioned at first dimension.
spatial_dims: dimensions based on model used for deepgrow (2D vs 3D).
slice_key: key that represents applicable slice to add guidance.
meta_keys: explicitly indicate the key of the meta data dictionary of `ref_image`.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.
meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
.. deprecated:: 0.6.0
``dimensions`` is deprecated, use ``spatial_dims`` instead.
"""
@deprecated_arg(name="dimensions", since="0.6", msg_suffix="Please use `spatial_dims` instead.")
def __init__(
self,
ref_image,
guidance: str = "guidance",
foreground: str = "foreground",
background: str = "background",
axis: int = 0,
depth_first: bool = True,
spatial_dims: int = 2,
slice_key: str = "slice",
meta_keys: Optional[str] = None,
meta_key_postfix: str = "meta_dict",
dimensions: Optional[int] = None,
):
self.ref_image = ref_image
self.guidance = guidance
self.foreground = foreground
self.background = background
self.axis = axis
self.depth_first = depth_first
self.dimensions = spatial_dims if dimensions is None else dimensions
self.slice = slice_key
self.meta_keys = meta_keys
self.meta_key_postfix = meta_key_postfix
def _apply(self, pos_clicks, neg_clicks, factor, slice_num):
pos = neg = []
if self.dimensions == 2:
points = list(pos_clicks)
points.extend(neg_clicks)
points = np.array(points)
slices = list(np.unique(points[:, self.axis]))
slice_idx = slices[0] if slice_num is None else next(x for x in slices if x == slice_num)
if len(pos_clicks):
pos_clicks = np.array(pos_clicks)
pos = (pos_clicks[np.where(pos_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
if len(neg_clicks):
neg_clicks = np.array(neg_clicks)
neg = (neg_clicks[np.where(neg_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
guidance = [pos, neg, slice_idx]
else:
if len(pos_clicks):
pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist()
if len(neg_clicks):
neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist()
guidance = [pos, neg]
return guidance
def __call__(self, data):
d = dict(data)
meta_dict_key = self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}"
if meta_dict_key not in d:
raise RuntimeError(f"Missing meta_dict {meta_dict_key} in data!")
if "spatial_shape" not in d[meta_dict_key]:
raise RuntimeError('Missing "spatial_shape" in meta_dict!')
original_shape = d[meta_dict_key]["spatial_shape"]
current_shape = list(d[self.ref_image].shape)
if self.depth_first:
if self.axis != 0:
raise RuntimeError("Depth first means the depth axis should be 0.")
# in here we assume the depth dimension was in the last dimension of "original_shape"
original_shape = np.roll(original_shape, 1)
factor = np.array(current_shape) / original_shape
fg_bg_clicks = []
for key in [self.foreground, self.background]:
clicks = d[key]
clicks = list(np.array(clicks, dtype=int))
if self.depth_first:
for i in range(len(clicks)):
clicks[i] = list(np.roll(clicks[i], 1))
fg_bg_clicks.append(clicks)
d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice))
return d
class SpatialCropGuidanced(MapTransform):
"""
Crop image based on guidance with minimal spatial size.
- If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
object using box's center and spatial_size.
- This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
in data[{key}_{meta_key_postfix}]
Input data is of shape (C, spatial_1, [spatial_2, ...])
Args:
keys: keys of the corresponding items to be transformed.
guidance: key to the guidance. It is used to generate the bounding box of foreground
spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
original_shape_key: key to record original shape for foreground.
cropped_shape_key: key to record cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
guidance: str,
spatial_size,
margin=20,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix="meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.guidance = guidance
self.spatial_size = list(spatial_size)
self.margin = margin
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def bounding_box(self, points, img_shape):
ndim = len(img_shape)
margin = ensure_tuple_rep(self.margin, ndim)
for m in margin:
if m < 0:
raise ValueError("margin value should not be negative number.")
box_start = [0] * ndim
box_end = [0] * ndim
for di in range(ndim):
dt = points[..., di]
min_d = max(min(dt - margin[di]), 0)
max_d = min(img_shape[di], max(dt + margin[di] + 1))
box_start[di], box_end[di] = min_d, max_d
return box_start, box_end
def __call__(self, data):
d: Dict = dict(data)
first_key: Union[Hashable, List] = self.first_key(d)
if first_key == []:
return d
guidance = d[self.guidance]
original_spatial_shape = d[first_key].shape[1:] # type: ignore
box_start, box_end = self.bounding_box(np.array(guidance[0] + guidance[1]), original_spatial_shape)
center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))
spatial_size = self.spatial_size
box_size = list(np.subtract(box_end, box_start).astype(int, copy=False))
spatial_size = spatial_size[-len(box_size) :]
if len(spatial_size) < len(box_size):
# If the data is in 3D and spatial_size is specified as 2D [256,256]
# Then we will get all slices in such case
diff = len(box_size) - len(spatial_size)
spatial_size = list(original_spatial_shape[1 : (1 + diff)]) + spatial_size
if np.all(np.less(box_size, spatial_size)):
if len(center) == 3:
# 3D Deepgrow: set center to be middle of the depth dimension (D)
center[0] = spatial_size[0] // 2
cropper = SpatialCrop(roi_center=center, roi_size=spatial_size)
else:
cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
# update bounding box in case it was corrected by the SpatialCrop constructor
box_start = np.array([s.start for s in cropper.slices])
box_end = np.array([s.stop for s in cropper.slices])
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
if not np.array_equal(d[key].shape[1:], original_spatial_shape):
raise RuntimeError("All the image specified in keys should have same spatial shape")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
d[meta_key][self.start_coord_key] = box_start
d[meta_key][self.end_coord_key] = box_end
d[meta_key][self.original_shape_key] = d[key].shape
image = cropper(d[key])
d[meta_key][self.cropped_shape_key] = image.shape
d[key] = image
pos_clicks, neg_clicks = guidance[0], guidance[1]
pos = np.subtract(pos_clicks, box_start).tolist() if len(pos_clicks) else []
neg = np.subtract(neg_clicks, box_start).tolist() if len(neg_clicks) else []
d[self.guidance] = [pos, neg]
return d
class ResizeGuidanced(Transform):
"""
Resize the guidance based on cropped vs resized image.
This transform assumes that the images have been cropped and resized. And the shape after cropped is store inside
the meta dict of ref image.
Args:
guidance: key to guidance
ref_image: key to reference image to fetch current and original image details
meta_keys: explicitly indicate the key of the meta data dictionary of `ref_image`.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.
meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
cropped_shape_key: key that records cropped shape for foreground.
"""
def __init__(
self,
guidance: str,
ref_image: str,
meta_keys: Optional[str] = None,
meta_key_postfix: str = "meta_dict",
cropped_shape_key: str = "foreground_cropped_shape",
) -> None:
self.guidance = guidance
self.ref_image = ref_image
self.meta_keys = meta_keys
self.meta_key_postfix = meta_key_postfix
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
meta_dict: Dict = d[self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}"]
current_shape = d[self.ref_image].shape[1:]
cropped_shape = meta_dict[self.cropped_shape_key][1:]
factor = np.divide(current_shape, cropped_shape)
pos_clicks, neg_clicks = guidance[0], guidance[1]
pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist() if len(pos_clicks) else []
neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist() if len(neg_clicks) else []
d[self.guidance] = [pos, neg]
return d
class RestoreLabeld(MapTransform):
"""
Restores label based on the ref image.
The ref_image is assumed that it went through the following transforms:
1. Fetch2DSliced (If 2D)
2. Spacingd
3. SpatialCropGuidanced
4. Resized
And its shape is assumed to be (C, D, H, W)
This transform tries to undo these operation so that the result label can be overlapped with original volume.
It does the following operation:
1. Undo Resized
2. Undo SpatialCropGuidanced
3. Undo Spacingd
4. Undo Fetch2DSliced
The resulting label is of shape (D, H, W)
Args:
keys: keys of the corresponding items to be transformed.
ref_image: reference image to fetch current and original image details
slice_only: apply only to an applicable slice, in case of 2D model/prediction
mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_key is None, use `key_{meta_key_postfix} to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key that records the start coordinate of spatial bounding box for foreground.
end_coord_key: key that records the end coordinate of spatial bounding box for foreground.
original_shape_key: key that records original shape for foreground.
cropped_shape_key: key that records cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
ref_image: str,
slice_only: bool = False,
mode: Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str] = InterpolateMode.NEAREST,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
meta_keys: Optional[str] = None,
meta_key_postfix: str = "meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.ref_image = ref_image
self.slice_only = slice_only
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = meta_key_postfix
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]
for key, mode, align_corners, meta_key in self.key_iterator(d, self.mode, self.align_corners, self.meta_keys):
image = d[key]
# Undo Resize
current_shape = image.shape
cropped_shape = meta_dict[self.cropped_shape_key]
if np.any(np.not_equal(current_shape, cropped_shape)):
resizer = Resize(spatial_size=cropped_shape[1:], mode=mode)
image = resizer(image, mode=mode, align_corners=align_corners)
# Undo Crop
original_shape = meta_dict[self.original_shape_key]
result = np.zeros(original_shape, dtype=np.float32)
box_start = meta_dict[self.start_coord_key]
box_end = meta_dict[self.end_coord_key]
spatial_dims = min(len(box_start), len(image.shape[1:]))
slices = [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])]
slices = tuple(slices)
result[slices] = image
# Undo Spacing
current_size = result.shape[1:]
# change spatial_shape from HWD to DHW
spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1))
spatial_size = spatial_shape[-len(current_size) :]
if np.any(np.not_equal(current_size, spatial_size)):
resizer = Resize(spatial_size=spatial_size, mode=mode)
result = resizer(result, mode=mode, align_corners=align_corners)
# Undo Slicing
slice_idx = meta_dict.get("slice_idx")
if slice_idx is None or self.slice_only:
final_result = result if len(result.shape) <= 3 else result[0]
else:
slice_idx = meta_dict["slice_idx"][0]
final_result = np.zeros(tuple(spatial_shape))
final_result[slice_idx] = result
d[key] = final_result
meta_key = meta_key or f"{key}_{self.meta_key_postfix}"
meta = d.get(meta_key)
if meta is None:
meta = dict()
d[meta_key] = meta
meta["slice_idx"] = slice_idx
meta["affine"] = meta_dict["original_affine"]
return d
class Fetch2DSliced(MapTransform):
"""
Fetch one slice in case of a 3D volume.
The volume only contains spatial coordinates.
Args:
keys: keys of the corresponding items to be transformed.
guidance: key that represents guidance.
axis: axis that represents slice in 3D volume.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: use `key_{meta_key_postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys,
guidance="guidance",
axis: int = 0,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
):
super().__init__(keys, allow_missing_keys)
self.guidance = guidance
self.axis = axis
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
def _apply(self, image, guidance):
slice_idx = guidance[2] # (pos, neg, slice_idx)
idx = []
for i, size_i in enumerate(image.shape):
idx.append(slice_idx) if i == self.axis else idx.append(slice(0, size_i))
idx = tuple(idx)
return image[idx], idx
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
if len(guidance) < 3:
raise RuntimeError("Guidance does not container slice_idx!")
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
img_slice, idx = self._apply(d[key], guidance)
d[key] = img_slice
d[meta_key or f"{key}_{meta_key_postfix}"]["slice_idx"] = idx
return d
| 44.041489
| 120
| 0.641392
|
1af3e3bbff860c59256992d72f2375654f57069a
| 738
|
py
|
Python
|
gu/delay.py
|
gf0842wf/gu
|
dff87afe1e51d636f69cd2d689a71a20360bb08a
|
[
"MIT"
] | 3
|
2015-03-15T12:15:33.000Z
|
2015-03-15T12:20:00.000Z
|
gu/delay.py
|
gf0842wf/gu
|
dff87afe1e51d636f69cd2d689a71a20360bb08a
|
[
"MIT"
] | null | null | null |
gu/delay.py
|
gf0842wf/gu
|
dff87afe1e51d636f69cd2d689a71a20360bb08a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""延时,循环,超时等gevent工具"""
import gevent
import time
import logging
logger = logging.getLogger(__name__)
class Timeout(gevent.Timeout):
def __init__(self, seconds=None, exception=None):
gevent.Timeout.__init__(self, seconds, exception)
self.start_time = None
def start(self):
self.start_time = time.time()
gevent.Timeout.start(self)
@property
def passed(self):
if self.stime is None:
return 0
now = time.time()
return now - self.stime
@property
def rest(self):
if self.stime is None: return 0
if self.seconds is None: return 0
now = time.time()
return self.seconds - (now - self.stime)
| 22.363636
| 57
| 0.612466
|
27e8499a052a60a0e43e570f24d4bd6eb82d7461
| 528
|
py
|
Python
|
bad_mason/covid19getter.py
|
Mason-Lin/bad_mason
|
1500e5b99c249ee14e55916bac280daf838e6413
|
[
"MIT"
] | null | null | null |
bad_mason/covid19getter.py
|
Mason-Lin/bad_mason
|
1500e5b99c249ee14e55916bac280daf838e6413
|
[
"MIT"
] | 1
|
2020-07-09T15:54:32.000Z
|
2020-07-09T16:12:34.000Z
|
bad_mason/covid19getter.py
|
Mason-Lin/bad_mason
|
1500e5b99c249ee14e55916bac280daf838e6413
|
[
"MIT"
] | null | null | null |
import subprocess
from pathlib import Path
from pprint import pprint
__all__ = ["get_covid19_data"]
def get_covid19_data():
# init_helper
helper_path = Path(__file__).absolute().parent.joinpath("datahelper.zip").resolve()
try:
subprocess.call(["py", f"{helper_path}"])
except FileNotFoundError:
print("please reinstall latest version")
raise
data = helper()
print("{:*^30}".format("FAKE DATA"))
pprint(data)
def helper():
# do something ...
return "Hello DATA!"
| 21.12
| 87
| 0.653409
|
7bce53f879e4b13debc226ec8d2ead282d32a0da
| 244
|
py
|
Python
|
blog/sitemap.py
|
florent6001/django-simple-blog
|
a8a9a38ac16d2ac91698f5ca1247bcaa5db83ffd
|
[
"MIT"
] | 2
|
2020-03-17T12:42:56.000Z
|
2020-04-19T12:10:02.000Z
|
blog/sitemap.py
|
florent6001/django-simple-blog
|
a8a9a38ac16d2ac91698f5ca1247bcaa5db83ffd
|
[
"MIT"
] | 9
|
2021-03-30T12:51:05.000Z
|
2022-03-12T00:19:03.000Z
|
blog/sitemap.py
|
florent6001/django-simple-blog
|
a8a9a38ac16d2ac91698f5ca1247bcaa5db83ffd
|
[
"MIT"
] | null | null | null |
from django.contrib.sitemaps import Sitemap
from blog.models import Post, Category
class PostSitemap(Sitemap):
def items(self):
return Post.objects.all()
class CategorySitemap(Sitemap):
def items(self):
return Category.objects.all()
| 20.333333
| 43
| 0.770492
|
d0a53f5f4a599a4201f63359ac6b78a34629ff7e
| 3,589
|
py
|
Python
|
monitoring/google/cloud/monitoring_v3/proto/span_context_pb2.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
monitoring/google/cloud/monitoring_v3/proto/span_context_pb2.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
monitoring/google/cloud/monitoring_v3/proto/span_context_pb2.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/monitoring_v3/proto/span_context.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/monitoring_v3/proto/span_context.proto",
package="google.monitoring.v3",
syntax="proto3",
serialized_options=_b(
"\n\030com.google.monitoring.v3B\020SpanContextProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3"
),
serialized_pb=_b(
'\n3google/cloud/monitoring_v3/proto/span_context.proto\x12\x14google.monitoring.v3" \n\x0bSpanContext\x12\x11\n\tspan_name\x18\x01 \x01(\tB\xa8\x01\n\x18\x63om.google.monitoring.v3B\x10SpanContextProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3b\x06proto3'
),
)
_SPANCONTEXT = _descriptor.Descriptor(
name="SpanContext",
full_name="google.monitoring.v3.SpanContext",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="span_name",
full_name="google.monitoring.v3.SpanContext.span_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=77,
serialized_end=109,
)
DESCRIPTOR.message_types_by_name["SpanContext"] = _SPANCONTEXT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SpanContext = _reflection.GeneratedProtocolMessageType(
"SpanContext",
(_message.Message,),
dict(
DESCRIPTOR=_SPANCONTEXT,
__module__="google.cloud.monitoring_v3.proto.span_context_pb2",
__doc__="""The context of a span, attached to google.api.Distribution.Exemplars in
google.api.Distribution values during aggregation.
It contains the name of a span with format:
projects/[PROJECT\_ID]/traces/[TRACE\_ID]/spans/[SPAN\_ID]
Attributes:
span_name:
The resource name of the span in the following format: ::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE\_ID] is a unique identifier for a trace within a
project; it is a 32-character hexadecimal encoding of a
16-byte array. [SPAN\_ID] is a unique identifier for a span
within a trace; it is a 16-character hexadecimal encoding of
an 8-byte array.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.SpanContext)
),
)
_sym_db.RegisterMessage(SpanContext)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 34.84466
| 370
| 0.696851
|
f0c6b03a4d1927845ea56c58a2fcc3478357b29f
| 709
|
py
|
Python
|
2019-11-19_0.py
|
yanorei32/trash
|
0b489e1eef48c51a13d2644f79377c9d05354e7a
|
[
"Unlicense"
] | null | null | null |
2019-11-19_0.py
|
yanorei32/trash
|
0b489e1eef48c51a13d2644f79377c9d05354e7a
|
[
"Unlicense"
] | 1
|
2020-01-21T06:40:38.000Z
|
2020-01-21T06:40:38.000Z
|
2019-11-19_0.py
|
yanorei32/trash
|
0b489e1eef48c51a13d2644f79377c9d05354e7a
|
[
"Unlicense"
] | 1
|
2020-01-21T07:15:58.000Z
|
2020-01-21T07:15:58.000Z
|
import random
import sys
import time
def main():
targetStr = list("ポプテピピック")
targetStrLen = len(targetStr)
matchCounter = 0
tryCounter = 0
begin = time.time()
while True:
tryCounter += 1
c = random.choice(targetStr)
sys.stdout.write(c)
if c != targetStr[matchCounter]:
matchCounter = 0
continue
if matchCounter == targetStrLen - 1:
break
matchCounter += 1
deltaTime = time.time() - begin
sys.stdout.write('\n')
sys.stdout.flush()
print(f'Done ({tryCounter} chars printed. Δt={deltaTime:.3f} sec. {tryCounter/deltaTime:.1f} chars/sec)')
if __name__ == '__main__':
main()
| 19.162162
| 109
| 0.586742
|
2124250c12034cd3ea09daca98f17c564c3c2604
| 6,804
|
py
|
Python
|
train/ssl_rot_trainer.py
|
mmaaz60/ssl_for_fgvc
|
9a4bf0a112b818caca8794868a903dc736839a43
|
[
"MIT"
] | 10
|
2021-05-24T13:23:52.000Z
|
2022-03-24T06:54:02.000Z
|
train/ssl_rot_trainer.py
|
mmaaz60/ssl_for_fgvc
|
9a4bf0a112b818caca8794868a903dc736839a43
|
[
"MIT"
] | null | null | null |
train/ssl_rot_trainer.py
|
mmaaz60/ssl_for_fgvc
|
9a4bf0a112b818caca8794868a903dc736839a43
|
[
"MIT"
] | 3
|
2021-06-10T13:59:57.000Z
|
2022-02-05T08:54:40.000Z
|
import torch
from test.base_tester import BaseTester
import logging
from utils.util import preprocess_input_data_rotation
from utils.util import save_model_checkpoints
logger = logging.getLogger(f"train/ssl_rot_trainer.py")
class SSLROTTrainer:
def __init__(self, model, dataloader, class_loss_function, rot_loss_function, rotation_loss_weight, optimizer,
epochs, lr_scheduler=None, val_dataloader=None, device="cuda", log_step=50, checkpoints_dir_path=None):
"""
Constructor, the function initializes the training related parameters.
:param model: The model to train
:param dataloader: The dataloader to get training samples from
:param class_loss_function: The CUB classification loss function
:param rot_loss_function: The rotation classification loss function
:param rotation_loss_weight: The lambda value, specifying the contribution of rotation loss
:param optimizer: The optimizer to be used for training
:param epochs: Number of epochs
:param lr_scheduler: Learning rate scheduler
:param val_dataloader: Validation dataloader to get the validation samples
:param device: The execution device
:param log_step: # Logging step, after each log_step batches a log will be recorded
:param checkpoints_dir_path: # Checkpoints directory to save the model training progress and checkpoints
"""
self.model = model
self.dataloader = dataloader
self.class_loss = class_loss_function()
self.rot_loss = rot_loss_function()
self.rotation_loss_weight = rotation_loss_weight # Decides contribution of rotation loss to total loss
self.optimizer = optimizer
self.epochs = epochs
self.lr_scheduler = lr_scheduler
self.device = device
self.log_step = log_step
self.checkpoints_dir_path = checkpoints_dir_path
self.validator = BaseTester(val_dataloader, class_loss_function) \
if val_dataloader else None
self.metrics = {}
def train_epoch(self, epoch):
"""
The function trains the model for one epoch.
"""
total_cls_loss = 0
total_rot_loss = 0
total_loss = 0
# allocating metric for classification
total_predictions_head1 = 0
total_correct_predictions_head1 = 0
# allocation metric for rotation
total_predictions_head2 = 0
total_correct_predictions_head2 = 0
self.model.train()
for batch_idx, d in enumerate(self.dataloader):
inputs, labels = d
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# Generates rotation augmented images and corresponding labels
# Augmented labels: Repeats of original class labels for each rotation of image
augmented_inputs, augmented_labels, rot_labels = preprocess_input_data_rotation(
inputs, labels, rotation=True)
class_outputs, rot_outputs = self.model(augmented_inputs, train=True)
# Computing total loss from loss for classification head and rotation head
classification_loss = self.class_loss(class_outputs, augmented_labels)
total_cls_loss += classification_loss
rot_loss = self.rot_loss(rot_outputs, rot_labels)
total_rot_loss += rot_loss
# Limits contribution of rotation loss by rotation_loss_weight
loss = (1 - self.rotation_loss_weight) * classification_loss + self.rotation_loss_weight * rot_loss
total_loss += loss
# Metrics for classification head - head1
_, preds_head1 = torch.max(class_outputs, 1)
total_predictions_head1 += len(preds_head1)
total_correct_predictions_head1 += torch.sum(preds_head1 == augmented_labels.data)
# Metrics for rotation head - head2
_, preds_head2 = torch.max(rot_outputs, 1)
total_predictions_head2 += len(preds_head2)
total_correct_predictions_head2 += torch.sum(preds_head2 == rot_labels.data)
# optimization
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (batch_idx % self.log_step == 0) and (batch_idx != 0):
logger.info(
f"Train Epoch: {epoch}, Step, {batch_idx}/{len(self.dataloader)}, "
f"Cls Loss: {total_cls_loss / batch_idx}, Rot Loss: {total_rot_loss / batch_idx} "
f"Total Loss: {total_loss / batch_idx}")
self.metrics[epoch] = {}
self.metrics[epoch]["train"] = {}
self.metrics[epoch]["train"]["loss"] = float(total_loss / batch_idx)
self.metrics[epoch]["train"]["cls_loss"] = float(total_cls_loss / batch_idx)
self.metrics[epoch]["train"]["rot_loss"] = float(total_rot_loss / batch_idx)
self.metrics[epoch]["train"]["class_accuracy"] = float(
total_correct_predictions_head1) / float(total_predictions_head1)
self.metrics[epoch]["train"]["rot_accuracy"] = float(
total_correct_predictions_head2) / float(total_predictions_head2)
logger.info(f"Epoch {epoch} cls loss: {self.metrics[epoch]['train']['cls_loss']}, "
f"Epoch {epoch} rot loss: {self.metrics[epoch]['train']['rot_loss']}, "
f"Epoch {epoch} loss: {self.metrics[epoch]['train']['loss']}, "
f"class_accuracy:{self.metrics[epoch]['train']['class_accuracy']} "
f"rot_accuracy:{self.metrics[epoch]['train']['rot_accuracy']}")
def train_and_validate(self, start_epoch, end_epoch=None):
"""
The function implements the overall training pipeline.
:param start_epoch: Start epoch number
:param end_epoch: End epoch number
"""
self.model = self.model.to(self.device) # Transfer the model to the execution device
best_accuracy = 0 # Variable to keep track of the best test accuracy to save the best model
# Train and validate the model for (end_epoch - start_epoch)
for i in range(start_epoch, end_epoch + 1 if end_epoch else self.epochs + 1):
self.train_epoch(i)
if self.validator:
val_metrics = self.validator.test(self.model)
self.metrics[i]["val"] = {}
self.metrics[i]["val"] = val_metrics
# Save the checkpoints
best_accuracy = save_model_checkpoints(self.checkpoints_dir_path, i, self.model.state_dict(),
self.metrics[i], best_accuracy)
if self.lr_scheduler:
self.lr_scheduler.step()
| 51.938931
| 120
| 0.648589
|
037cd8a335220f5d372b13603e151aa37b014912
| 8,942
|
py
|
Python
|
reconcile/terraform_resources.py
|
apahim/qontract-reconcile
|
b540660da1ee5b2f843d42cf8754381c0487b6a9
|
[
"Apache-2.0"
] | null | null | null |
reconcile/terraform_resources.py
|
apahim/qontract-reconcile
|
b540660da1ee5b2f843d42cf8754381c0487b6a9
|
[
"Apache-2.0"
] | null | null | null |
reconcile/terraform_resources.py
|
apahim/qontract-reconcile
|
b540660da1ee5b2f843d42cf8754381c0487b6a9
|
[
"Apache-2.0"
] | null | null | null |
import sys
import shutil
import semver
import logging
import utils.gql as gql
import utils.threaded as threaded
import utils.vault_client as vault_client
import reconcile.openshift_base as ob
import reconcile.queries as queries
from utils.terrascript_client import TerrascriptClient as Terrascript
from utils.terraform_client import OR, TerraformClient as Terraform
from utils.openshift_resource import ResourceInventory
from utils.oc import OC_Map
from utils.defer import defer
from reconcile.aws_iam_keys import run as disable_keys
from utils.oc import StatusCodeError
from textwrap import indent
TF_RESOURCE = """
provider
... on NamespaceTerraformResourceRDS_v1 {
account
identifier
defaults
availability_zone
parameter_group
overrides
output_resource_name
enhanced_monitoring
replica_source
output_resource_db_name
}
... on NamespaceTerraformResourceS3_v1 {
account
region
identifier
defaults
overrides
sqs_identifier
s3_events
output_resource_name
storage_class
}
... on NamespaceTerraformResourceElastiCache_v1 {
account
identifier
defaults
parameter_group
region
overrides
output_resource_name
}
... on NamespaceTerraformResourceServiceAccount_v1 {
account
identifier
variables
policies
user_policy
output_resource_name
}
... on NamespaceTerraformResourceSQS_v1 {
account
region
identifier
output_resource_name
specs {
defaults
queues {
key
value
}
}
}
... on NamespaceTerraformResourceDynamoDB_v1 {
account
region
identifier
output_resource_name
specs {
defaults
tables {
key
value
}
}
}
... on NamespaceTerraformResourceECR_v1 {
account
identifier
region
output_resource_name
}
... on NamespaceTerraformResourceS3CloudFront_v1 {
account
region
identifier
defaults
output_resource_name
storage_class
}
... on NamespaceTerraformResourceS3SQS_v1 {
account
region
identifier
defaults
output_resource_name
storage_class
}
... on NamespaceTerraformResourceCloudWatch_v1 {
account
region
identifier
defaults
es_identifier
filter_pattern
output_resource_name
}
... on NamespaceTerraformResourceKMS_v1 {
account
region
identifier
defaults
overrides
output_resource_name
}
... on NamespaceTerraformResourceElasticSearch_v1 {
account
region
identifier
defaults
output_resource_name
}
... on NamespaceTerraformResourceACM_v1 {
account
region
identifier
secret {
path
field
}
output_resource_name
}
"""
TF_NAMESPACES_QUERY = """
{
namespaces: namespaces_v1 {
name
managedTerraformResources
terraformResources {
%s
}
cluster {
name
serverUrl
jumpHost {
hostname
knownHosts
user
port
identity {
path
field
format
}
}
automationToken {
path
field
format
}
internal
}
}
}
""" % (indent(TF_RESOURCE, 6*' '))
QONTRACT_INTEGRATION = 'terraform_resources'
QONTRACT_INTEGRATION_VERSION = semver.format_version(0, 5, 2)
QONTRACT_TF_PREFIX = 'qrtf'
def populate_oc_resources(spec, ri):
if spec.oc is None:
return
logging.debug("[populate_oc_resources] cluster: " + spec.cluster
+ " namespace: " + spec.namespace
+ " resource: " + spec.resource)
try:
for item in spec.oc.get_items(spec.resource,
namespace=spec.namespace):
openshift_resource = OR(item,
QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION)
ri.add_current(
spec.cluster,
spec.namespace,
spec.resource,
openshift_resource.name,
openshift_resource
)
except StatusCodeError as e:
ri.register_error()
msg = 'cluster: {},'
msg += 'namespace: {},'
msg += 'resource: {},'
msg += 'exception: {}'
msg = msg.format(spec.cluster, spec.namespace, spec.resource, str(e))
logging.error(msg)
def fetch_current_state(namespaces, thread_pool_size, internal, use_jump_host):
ri = ResourceInventory()
settings = queries.get_app_interface_settings()
oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION,
settings=settings, internal=internal,
use_jump_host=use_jump_host,
thread_pool_size=thread_pool_size)
state_specs = \
ob.init_specs_to_fetch(
ri,
oc_map,
namespaces=namespaces,
override_managed_types=['Secret']
)
threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri)
return ri, oc_map
def init_working_dirs(accounts, thread_pool_size,
print_only=False, oc_map=None, settings=None):
ts = Terrascript(QONTRACT_INTEGRATION,
QONTRACT_TF_PREFIX,
thread_pool_size,
accounts,
oc_map,
settings=settings)
working_dirs = ts.dump(print_only)
return ts, working_dirs
def setup(print_only, thread_pool_size, internal, use_jump_host,
account_name):
gqlapi = gql.get_api()
accounts = queries.get_aws_accounts()
if account_name:
accounts = [n for n in accounts
if n['name'] == account_name]
if not accounts:
raise ValueError(f"aws account {account_name} is not found")
settings = queries.get_app_interface_settings()
namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces']
tf_namespaces = [namespace_info for namespace_info in namespaces
if namespace_info.get('managedTerraformResources')]
ri, oc_map = fetch_current_state(tf_namespaces, thread_pool_size,
internal, use_jump_host)
ts, working_dirs = init_working_dirs(accounts, thread_pool_size,
print_only=print_only,
oc_map=oc_map,
settings=settings)
tf = Terraform(QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION,
QONTRACT_TF_PREFIX,
working_dirs,
thread_pool_size)
existing_secrets = tf.get_terraform_output_secrets()
ts.populate_resources(tf_namespaces, existing_secrets, account_name)
ts.dump(print_only, existing_dirs=working_dirs)
return ri, oc_map, tf
def cleanup_and_exit(tf=None, status=False, working_dirs={}):
if tf is None:
for wd in working_dirs.values():
shutil.rmtree(wd)
else:
tf.cleanup()
sys.exit(status)
def write_outputs_to_vault(vault_path, ri):
integration_name = QONTRACT_INTEGRATION.replace('_', '-')
for cluster, namespace, _, data in ri:
for name, d_item in data['desired'].items():
secret_path = \
f"{vault_path}/{integration_name}/{cluster}/{namespace}/{name}"
secret = {'path': secret_path, 'data': d_item.body['data']}
vault_client.write(secret)
@defer
def run(dry_run, print_only=False,
enable_deletion=False, io_dir='throughput/',
thread_pool_size=10, internal=None, use_jump_host=True,
light=False, vault_output_path='',
account_name=None, defer=None):
ri, oc_map, tf = \
setup(print_only, thread_pool_size, internal, use_jump_host,
account_name)
defer(lambda: oc_map.cleanup())
if print_only:
cleanup_and_exit()
if tf is None:
err = True
cleanup_and_exit(tf, err)
if not light:
deletions_detected, err = tf.plan(enable_deletion)
if err:
cleanup_and_exit(tf, err)
if deletions_detected:
if enable_deletion:
tf.dump_deleted_users(io_dir)
else:
cleanup_and_exit(tf, deletions_detected)
if dry_run:
cleanup_and_exit(tf)
if not light:
err = tf.apply()
if err:
cleanup_and_exit(tf, err)
# Temporary skip apply secret for running tf-r per account locally.
# The integration running on the cluster will manage the secret
# after any manual running.
# Will refactor with caller for further operator implement.
if account_name:
cleanup_and_exit(tf)
tf.populate_desired_state(ri, oc_map)
ob.realize_data(dry_run, oc_map, ri)
disable_keys(dry_run, thread_pool_size,
disable_service_account_keys=True)
if vault_output_path:
write_outputs_to_vault(vault_output_path, ri)
if ri.has_error_registered():
sys.exit(1)
cleanup_and_exit(tf)
| 24.977654
| 79
| 0.646164
|
a0465c9f8827cef8f5e3ba3720f2a24e4221ea5c
| 7,606
|
py
|
Python
|
backbone/effnet.py
|
YLFF/2004P_Pytorch-Networks
|
2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05
|
[
"MIT"
] | 454
|
2019-12-26T15:04:02.000Z
|
2020-10-24T13:57:35.000Z
|
backbone/effnet.py
|
YLFF/2004P_Pytorch-Networks
|
2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05
|
[
"MIT"
] | 1
|
2020-05-14T06:01:18.000Z
|
2020-05-14T06:01:18.000Z
|
backbone/effnet.py
|
YLFF/2004P_Pytorch-Networks
|
2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05
|
[
"MIT"
] | 17
|
2019-12-27T12:19:47.000Z
|
2020-02-03T07:42:05.000Z
|
import torch
import torch.nn as nn
class EffHead(nn.Module):
"""EfficientNet head: 1x1, BN, Swish, AvgPool, Dropout, FC."""
def __init__(self, w_in, w_out, nc):
super(EffHead, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 1, stride=1, padding=0, bias=False)
self.conv_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.conv_swish = Swish()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
if 0 > 0.0:
self.dropout = nn.Dropout(0)
self.fc = nn.Linear(w_out, nc, bias=True)
def forward(self, x):
x = self.conv_swish(self.conv_bn(self.conv(x)))
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x) if hasattr(self, "dropout") else x
x = self.fc(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, nc):
# cx = net.complexity_conv2d(cx, w_in, w_out, 1, 1, 0)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx["h"], cx["w"] = 1, 1
# cx = net.complexity_conv2d(cx, w_out, nc, 1, 1, 0, bias=True)
# return cx
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, 1, bias=True),
Swish(),
nn.Conv2d(w_se, w_in, 1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
# @staticmethod
# def complexity(cx, w_in, w_se):
# h, w = cx["h"], cx["w"]
# cx["h"], cx["w"] = 1, 1
# cx = net.complexity_conv2d(cx, w_in, w_se, 1, 1, 0, bias=True)
# cx = net.complexity_conv2d(cx, w_se, w_in, 1, 1, 0, bias=True)
# cx["h"], cx["w"] = h, w
# return cx
class MBConv(nn.Module):
"""Mobile inverted bottleneck block w/ SE (MBConv)."""
def __init__(self, w_in, exp_r, kernel, stride, se_r, w_out):
# expansion, 3x3 dwise, BN, Swish, SE, 1x1, BN, skip_connection
super(MBConv, self).__init__()
self.exp = None
w_exp = int(w_in * exp_r)
if w_exp != w_in:
self.exp = nn.Conv2d(w_in, w_exp, 1, stride=1, padding=0, bias=False)
self.exp_bn = nn.BatchNorm2d(w_exp, eps=1e-5, momentum=0.1)
self.exp_swish = Swish()
dwise_args = {"groups": w_exp, "padding": (kernel - 1) // 2, "bias": False}
self.dwise = nn.Conv2d(w_exp, w_exp, kernel, stride=stride, **dwise_args)
self.dwise_bn = nn.BatchNorm2d(w_exp, eps=1e-5, momentum=0.1)
self.dwise_swish = Swish()
self.se = SE(w_exp, int(w_in * se_r))
self.lin_proj = nn.Conv2d(w_exp, w_out, 1, stride=1, padding=0, bias=False)
self.lin_proj_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
# Skip connection if in and out shapes are the same (MN-V2 style)
self.has_skip = stride == 1 and w_in == w_out
def forward(self, x):
f_x = x
if self.exp:
f_x = self.exp_swish(self.exp_bn(self.exp(f_x)))
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
f_x = self.se(f_x)
f_x = self.lin_proj_bn(self.lin_proj(f_x))
if self.has_skip:
f_x = x + f_x
return f_x
# @staticmethod
# def complexity(cx, w_in, exp_r, kernel, stride, se_r, w_out):
# w_exp = int(w_in * exp_r)
# if w_exp != w_in:
# cx = net.complexity_conv2d(cx, w_in, w_exp, 1, 1, 0)
# cx = net.complexity_batchnorm2d(cx, w_exp)
# padding = (kernel - 1) // 2
# cx = net.complexity_conv2d(cx, w_exp, w_exp, kernel, stride, padding, w_exp)
# cx = net.complexity_batchnorm2d(cx, w_exp)
# cx = SE.complexity(cx, w_exp, int(w_in * se_r))
# cx = net.complexity_conv2d(cx, w_exp, w_out, 1, 1, 0)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class EffStage(nn.Module):
"""EfficientNet stage."""
def __init__(self, w_in, exp_r, kernel, stride, se_r, w_out, d):
super(EffStage, self).__init__()
for i in range(d):
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
name = "b{}".format(i + 1)
self.add_module(name, MBConv(b_w_in, exp_r, kernel, b_stride, se_r, w_out))
def forward(self, x):
for block in self.children():
x = block(x)
return x
# @staticmethod
# def complexity(cx, w_in, exp_r, kernel, stride, se_r, w_out, d):
# for i in range(d):
# b_stride = stride if i == 0 else 1
# b_w_in = w_in if i == 0 else w_out
# cx = MBConv.complexity(cx, b_w_in, exp_r, kernel, b_stride, se_r, w_out)
# return cx
class StemIN(nn.Module):
"""EfficientNet stem for ImageNet: 3x3, BN, Swish."""
def __init__(self, w_in, w_out):
super(StemIN, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 3, stride=2, padding=1, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.swish = Swish()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out):
# cx = net.complexity_conv2d(cx, w_in, w_out, 3, 2, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class EffNet(nn.Module):
"""EfficientNet model."""
@staticmethod
def get_args(cfg):
return {
"stem_w": cfg.EFFNET.STEM_W,
"ds": cfg.EFFNET.DEPTHS,
"ws": cfg.EFFNET.WIDTHS,
"exp_rs": cfg.EFFNET.EXP_RATIOS,
"se_r": cfg.EFFNET.SE_R,
"ss": cfg.EFFNET.STRIDES,
"ks": cfg.EFFNET.KERNELS,
"head_w": cfg.EFFNET.HEAD_W,
"nc": cfg.CLASS_NUM,
}
def __init__(self,cfg,logger):
super(EffNet, self).__init__()
self._construct(**EffNet.get_args(cfg))
def _construct(self, stem_w, ds, ws, exp_rs, se_r, ss, ks, head_w, nc):
stage_params = list(zip(ds, ws, exp_rs, ss, ks))
self.stem = StemIN(3, stem_w)
prev_w = stem_w
for i, (d, w, exp_r, stride, kernel) in enumerate(stage_params):
name = "s{}".format(i + 1)
self.add_module(name, EffStage(prev_w, exp_r, kernel, stride, se_r, w, d))
prev_w = w
self.head = EffHead(prev_w, head_w, nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
# @staticmethod
# def complexity(cx):
# """Computes model complexity. If you alter the model, make sure to update."""
# return EffNet._complexity(cx, **EffNet.get_args())
# @staticmethod
# def _complexity(cx, stem_w, ds, ws, exp_rs, se_r, ss, ks, head_w, nc):
# stage_params = list(zip(ds, ws, exp_rs, ss, ks))
# cx = StemIN.complexity(cx, 3, stem_w)
# prev_w = stem_w
# for d, w, exp_r, stride, kernel in stage_params:
# cx = EffStage.complexity(cx, prev_w, exp_r, kernel, stride, se_r, w, d)
# prev_w = w
# cx = EffHead.complexity(cx, prev_w, head_w, nc)
# return cx
| 35.050691
| 87
| 0.56508
|
b2057b6fb9429780a75729d8ff20887b544cb93e
| 2,474
|
py
|
Python
|
vendor/github.com/open-policy-agent/opa/build/changelog.py
|
chronidev/opa-docker-authz
|
d2a0ea0be813d4327604da05fe69bf89d35bd13d
|
[
"Apache-2.0"
] | 5
|
2019-03-16T13:06:05.000Z
|
2020-04-25T00:19:16.000Z
|
vendor/github.com/open-policy-agent/opa/build/changelog.py
|
chronidev/opa-docker-authz
|
d2a0ea0be813d4327604da05fe69bf89d35bd13d
|
[
"Apache-2.0"
] | 1
|
2019-08-15T15:57:05.000Z
|
2019-08-15T15:57:05.000Z
|
vendor/github.com/open-policy-agent/opa/build/changelog.py
|
chronidev/opa-docker-authz
|
d2a0ea0be813d4327604da05fe69bf89d35bd13d
|
[
"Apache-2.0"
] | 2
|
2021-03-17T03:01:26.000Z
|
2021-12-17T20:41:17.000Z
|
#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message, repo_url):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} ([#{issue_id}]({repo_url}/issues/{issue_id}))".format(subject=subject, issue_id=issue_id, repo_url=repo_url)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo_url", default="https://github.com/open-policy-agent/opa")
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message, args.repo_url)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in sorted(changelog["Fixes"]):
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in sorted(changelog[None]):
print "- {}".format(line)
if __name__ == "__main__":
main()
| 27.797753
| 143
| 0.649151
|
ff69457de5259e3e04575822a38114f0462330a3
| 1,949
|
py
|
Python
|
backend/app/db/crud.py
|
qew21/react-diary
|
8d7ac6455f2a639724ffeea8a50fc2299d7ca7ea
|
[
"MIT"
] | null | null | null |
backend/app/db/crud.py
|
qew21/react-diary
|
8d7ac6455f2a639724ffeea8a50fc2299d7ca7ea
|
[
"MIT"
] | null | null | null |
backend/app/db/crud.py
|
qew21/react-diary
|
8d7ac6455f2a639724ffeea8a50fc2299d7ca7ea
|
[
"MIT"
] | null | null | null |
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
import typing as t
from . import models, schemas
from app.core.security import get_password_hash
def get_user(db: Session, user_id: int):
user = db.query(models.User).filter(models.User.id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
def get_user_by_email(db: Session, email: str) -> schemas.UserBase:
return db.query(models.User).filter(models.User.email == email).first()
def get_users(
db: Session, skip: int = 0, limit: int = 100
) -> t.List[schemas.UserOut]:
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
hashed_password = get_password_hash(user.password)
db_user = models.User(
first_name=user.first_name,
last_name=user.last_name,
email=user.email,
is_active=user.is_active,
is_superuser=user.is_superuser,
hashed_password=hashed_password,
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def delete_user(db: Session, user_id: int):
user = get_user(db, user_id)
if not user:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail="User not found")
db.delete(user)
db.commit()
return user
def edit_user(
db: Session, user_id: int, user: schemas.UserEdit
) -> schemas.User:
db_user = get_user(db, user_id)
if not db_user:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail="User not found")
update_data = user.dict(exclude_unset=True)
if "password" in update_data:
update_data["hashed_password"] = get_password_hash(user.password)
del update_data["password"]
for key, value in update_data.items():
setattr(db_user, key, value)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
| 27.069444
| 79
| 0.691124
|
44559a0ff689765ad1d80ebeba1cc0ffa51622d2
| 1,123
|
py
|
Python
|
var/spack/repos/builtin/packages/r-genelendatabase/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/r-genelendatabase/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/r-genelendatabase/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGenelendatabase(RPackage):
"""Lengths of mRNA transcripts for a number of genomes.
Length of mRNA transcripts for a number of genomes and gene ID formats,
largely based on UCSC table browser"""
homepage = "https://bioconductor.org/packages/geneLenDataBase"
git = "https://git.bioconductor.org/packages/geneLenDataBase.git"
version('1.20.0', commit='70a1abed00ee68f7bfa07c42c011f9edae9915e4')
version('1.18.0', commit='77db87e5a4819bf94761fabef0d2ff741a1c5d07')
version('1.16.0', commit='c2a8b2359c6c59388853d6f6d15d71dffb17a198')
version('1.14.0', commit='b456b3ffb04eaf335893fdec2bb10f6795dd7e08')
version('1.12.0', commit='85d6536763c12850e6c01da9e2f9e0b9c07601fe')
depends_on('r@2.11.0:', type=('build', 'run'))
depends_on('r-rtracklayer', type=('build', 'run'))
depends_on('r-genomicfeatures@1.3.15:', type=('build', 'run'))
| 41.592593
| 78
| 0.730187
|
08e8b80fd22d1a8572c6c61a9735c4acdf6cf27e
| 25,553
|
py
|
Python
|
orphicx_graph.py
|
WanyuGroup/CVPR2022-OrphicX
|
98d8d8259439c45661573e575cf956331df16abc
|
[
"MIT"
] | null | null | null |
orphicx_graph.py
|
WanyuGroup/CVPR2022-OrphicX
|
98d8d8259439c45661573e575cf956331df16abc
|
[
"MIT"
] | null | null | null |
orphicx_graph.py
|
WanyuGroup/CVPR2022-OrphicX
|
98d8d8259439c45661573e575cf956331df16abc
|
[
"MIT"
] | null | null | null |
""" explainer_main.py
Main user interface for the explainer module.
"""
import argparse
import os
from networkx.algorithms.components.connected import connected_components
import sklearn.metrics as metrics
from functools import partial
from tensorboardX import SummaryWriter
import sys
import time
import math
import pickle
import shutil
import torch
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import networkx as nx
import torch.nn.functional as F
import causaleffect
from torch import nn, optim
from gae.model import VGAE3MLP
from gae.optimizer import loss_function as gae_loss
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(dir_path, 'gnnexp'))
import models
import utils.io_utils as io_utils
import utils.parser_utils as parser_utils
from explainer import explain
decimal_round = lambda x: round(x, 5)
color_map = ['gray', 'blue', 'purple', 'red', 'brown', 'green', 'orange', 'olive']
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='Mutagenicity', help='Name of dataset.')
parser.add_argument('--output', type=str, default=None, help='output path.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('-e', '--epoch', type=int, default=300, help='Number of training epochs.')
parser.add_argument('-b', '--batch_size', type=int, default=128, help='Number of samples in a minibatch.')
parser.add_argument('--seed', type=int, default=42, help='Number of training epochs.')
parser.add_argument('--max_grad_norm', type=float, default=1, help='max_grad_norm.')
parser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).')
parser.add_argument('--encoder_hidden1', type=int, default=32, help='Number of units in hidden layer 1.')
parser.add_argument('--encoder_hidden2', type=int, default=16, help='Number of units in hidden layer 2.')
parser.add_argument('--encoder_output', type=int, default=16, help='Dim of output of VGAE encoder.')
parser.add_argument('--decoder_hidden1', type=int, default=16, help='Number of units in decoder hidden layer 1.')
parser.add_argument('--decoder_hidden2', type=int, default=16, help='Number of units in decoder hidden layer 2.')
parser.add_argument('--K', type=int, default=8, help='Number of casual factors.')
parser.add_argument('--coef_lambda', type=float, default=0.01, help='Coefficient of gae loss.')
parser.add_argument('--coef_kl', type=float, default=0.01, help='Coefficient of gae loss.')
parser.add_argument('--coef_causal', type=float, default=1.0, help='Coefficient of causal loss.')
parser.add_argument('--coef_size', type=float, default=0.0, help='Coefficient of size loss.')
parser.add_argument('--NX', type=int, default=1, help='Number of monte-carlo samples per causal factor.')
parser.add_argument('--NA', type=int, default=1, help='Number of monte-carlo samples per causal factor.')
parser.add_argument('--Nalpha', type=int, default=25, help='Number of monte-carlo samples per causal factor.')
parser.add_argument('--Nbeta', type=int, default=100, help='Number of monte-carlo samples per noncausal factor.')
parser.add_argument('--node_perm', action="store_true", help='Use node permutation as data augmentation for causal training.')
parser.add_argument('--load_ckpt', default=None, help='Load parameters from checkpoint.')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--retrain', action='store_true')
parser.add_argument('--patient', type=int, default=100, help='Patient for early stopping.')
parser.add_argument('--plot_info_flow', action='store_true')
args = parser.parse_args()
if args.gpu and torch.cuda.is_available():
print("Use cuda")
device = torch.device("cuda")
else:
device = torch.device("cpu")
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
random.seed(args.seed)
def graph_labeling(G):
for node in G:
G.nodes[node]['string'] = 1
old_strings = tuple([G.nodes[node]['string'] for node in G])
for iter_num in range(100):
for node in G:
string = sorted([G.nodes[neigh]['string'] for neigh in G.neighbors(node)])
G.nodes[node]['concat_string'] = tuple([G.nodes[node]['string']] + string)
d = nx.get_node_attributes(G,'concat_string')
nodes,strings = zip(*{k: d[k] for k in sorted(d, key=d.get)}.items())
map_string = dict([[string, i+1] for i, string in enumerate(sorted(set(strings)))])
for node in nodes:
G.nodes[node]['string'] = map_string[G.nodes[node]['concat_string']]
new_strings = tuple([G.nodes[node]['string'] for node in G])
if old_strings == new_strings:
break
else:
old_strings = new_strings
return G
def preprocess_graph(adj):
adj_ = adj + np.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = np.diag(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt)
return torch.from_numpy(adj_normalized).float()
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def gaeloss(x,mu,logvar,data):
return gae_loss(preds=x, labels=data['adj_label'],
mu=mu, logvar=logvar, n_nodes=data['n_nodes'],
norm=data['norm'], pos_weight=data['pos_weight'])
softmax = torch.nn.Softmax(dim=1)
ce = torch.nn.CrossEntropyLoss(reduction='mean')
def main():
# Load a model checkpoint
ckpt = torch.load('ckpt/%s_base_h20_o20.pth.tar'%(args.dataset))
cg_dict = ckpt["cg"] # get computation graph
input_dim = cg_dict["feat"].shape[2]
num_classes = cg_dict["pred"].shape[2]
print("input dim: ", input_dim, "; num classes: ", num_classes)
# Explain Graph prediction
classifier = models.GcnEncoderGraph(
input_dim=input_dim,
hidden_dim=20,
embedding_dim=20,
label_dim=num_classes,
num_layers=3,
bn=False,
args=argparse.Namespace(gpu=args.gpu,bias=True,method=None),
).to(device)
# load state_dict (obtained by model.state_dict() when saving checkpoint)
classifier.load_state_dict(ckpt["model_state"])
classifier.eval()
print("Number of graphs:", cg_dict["adj"].shape[0])
if args.output is None:
args.output = args.dataset
K = args.K
L = args.encoder_output - K
ceparams = {
'Nalpha': args.Nalpha,
'Nbeta' : args.Nbeta,
'K' : K,
'L' : L,
'z_dim' : args.encoder_output,
'M' : num_classes}
model = VGAE3MLP(
input_dim + 100, args.encoder_hidden1, args.encoder_hidden1,
args.encoder_output, args.decoder_hidden1, args.decoder_hidden2,
args.K, args.dropout
).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = gaeloss
label_onehot = torch.eye(100, dtype=torch.float)
class GraphSampler(torch.utils.data.Dataset):
""" Sample graphs and nodes in graph
"""
def __init__(
self,
graph_idxs
):
self.graph_idxs = graph_idxs
self.graph_data = []
for graph_idx in graph_idxs:
adj = cg_dict["adj"][graph_idx].float()
label = cg_dict["label"][graph_idx].long()
feat = cg_dict["feat"][graph_idx, :].float()
G = graph_labeling(nx.from_numpy_array(cg_dict["adj"][graph_idx].numpy()))
graph_label = np.array([G.nodes[node]['string'] for node in G])
graph_label_onehot = label_onehot[graph_label]
sub_feat = torch.cat((feat, graph_label_onehot), dim=1)
adj_label = adj + np.eye(adj.shape[0])
n_nodes = adj.shape[0]
graph_size = torch.count_nonzero(adj.sum(-1))
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
pos_weight = torch.from_numpy(np.array(pos_weight))
norm = torch.tensor(adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2))
self.graph_data += [{
"graph_idx": graph_idx,
"graph_size": graph_size,
"sub_adj": adj.to(device),
"feat": feat.to(device).float(),
"sub_feat": sub_feat.to(device).float(),
"sub_label": label.to(device).float(),
"adj_label": adj_label.to(device).float(),
"n_nodes": torch.Tensor([n_nodes])[0].to(device),
"pos_weight": pos_weight.to(device),
"norm": norm.to(device)
}]
def __len__(self):
return len(self.graph_idxs)
def __getitem__(self, idx):
return self.graph_data[idx]
train_idxs = np.array(cg_dict['train_idx'])
val_idxs = np.array(cg_dict['val_idx'])
test_idxs = np.array(cg_dict['test_idx'])
train_graphs = GraphSampler(train_idxs)
train_dataset = torch.utils.data.DataLoader(
train_graphs,
batch_size=args.batch_size,
shuffle=True,
num_workers=0,
)
val_graphs = GraphSampler(val_idxs)
val_dataset = torch.utils.data.DataLoader(
val_graphs,
batch_size=1000,
shuffle=False,
num_workers=0,
)
test_graphs = GraphSampler(test_idxs)
test_dataset = torch.utils.data.DataLoader(
test_graphs,
batch_size=1000,
shuffle=False,
num_workers=0,
)
def eval_model(dataset, prefix=''):
model.eval()
with torch.no_grad():
for data in dataset:
labels = cg_dict['label'][data['graph_idx'].long()].long().to(device)
recovered, mu, logvar = model(data['sub_feat'], data['sub_adj'])
recovered_adj = torch.sigmoid(recovered)
nll_loss = criterion(recovered, mu, logvar, data).mean()
org_adjs = data['sub_adj']
org_logits = classifier(data['feat'], data['sub_adj'])[0]
org_probs = F.softmax(org_logits, dim=1)
org_log_probs = F.log_softmax(org_logits, dim=1)
masked_recovered_adj = recovered_adj * data['sub_adj']
recovered_logits = classifier(data['feat'], masked_recovered_adj)[0]
recovered_probs = F.softmax(recovered_logits, dim=1)
recovered_log_probs = F.log_softmax(recovered_logits, dim=1)
alpha_mu = torch.zeros_like(mu)
alpha_mu[:,:,:args.K] = mu[:,:,:args.K]
alpha_adj = torch.sigmoid(model.dc(alpha_mu))
masked_alpha_adj = alpha_adj * data['sub_adj']
alpha_logits = classifier(data['feat'], masked_alpha_adj)[0]
beta_mu = torch.zeros_like(mu)
beta_mu[:,:,args.K:] = mu[:,:,args.K:]
beta_adj = torch.sigmoid(model.dc(beta_mu))
masked_beta_adj = beta_adj * data['sub_adj']
beta_logits = classifier(data['feat'], masked_beta_adj)[0]
causal_loss = []
beta_info = []
for idx in random.sample(range(0, data['feat'].shape[0]), args.NX):
_causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)
_beta_info, _ = causaleffect.beta_info_flow(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)
causal_loss += [_causal_loss]
beta_info += [_beta_info]
for A_idx in random.sample(range(0, data['feat'].shape[0]), args.NA-1):
if args.node_perm:
perm = torch.randperm(data['graph_size'][idx])
perm_adj = data['sub_adj'][idx].clone().detach()
perm_adj[:data['graph_size'][idx]] = perm_adj[perm]
else:
perm_adj = data['sub_adj'][A_idx]
_causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)
_beta_info, _ = causaleffect.beta_info_flow(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)
causal_loss += [_causal_loss]
beta_info += [_beta_info]
causal_loss = torch.stack(causal_loss).mean()
alpha_info = causal_loss
beta_info = torch.stack(beta_info).mean()
klloss = F.kl_div(F.log_softmax(alpha_logits, dim=1), org_probs, reduction='mean')
pred_labels = torch.argmax(org_probs,axis=1)
org_acc = (torch.argmax(org_probs,axis=1) == torch.argmax(recovered_probs,axis=1)).float().mean()
pred_acc = (torch.argmax(recovered_probs,axis=1) == labels).float().mean()
kl_pred_org = F.kl_div(recovered_log_probs, org_probs, reduction='mean')
alpha_probs = F.softmax(alpha_logits, dim=1)
alpha_log_probs = F.log_softmax(alpha_logits, dim=1)
beta_probs = F.softmax(beta_logits, dim=1)
beta_log_probs = F.log_softmax(beta_logits, dim=1)
alpha_gt_acc = (torch.argmax(alpha_probs,axis=1) == labels).float().mean()
alpha_pred_acc = (torch.argmax(alpha_probs,axis=1) == pred_labels).float().mean()
alpha_kld = F.kl_div(alpha_log_probs, org_probs, reduction='mean')
beta_gt_acc = (torch.argmax(beta_probs,axis=1) == labels).float().mean()
beta_pred_acc = (torch.argmax(beta_probs,axis=1) == pred_labels).float().mean()
beta_kld = F.kl_div(beta_log_probs, org_probs, reduction='mean')
alpha_sparsity = masked_alpha_adj.mean((1,2))/org_adjs.mean((1,2))
loss = args.coef_lambda * nll_loss + \
args.coef_causal * causal_loss + \
args.coef_kl * klloss + \
args.coef_size * alpha_sparsity.mean()
writer.add_scalar("%s/total_loss"%prefix, loss, epoch)
writer.add_scalar("%s/nll"%prefix, nll_loss, epoch)
writer.add_scalar("%s/causal"%prefix, causal_loss, epoch)
writer.add_scalar("%s/alpha_info_flow"%prefix, alpha_info/(alpha_info+beta_info), epoch)
writer.add_scalar("%s/beta_info_flow"%prefix, beta_info/(alpha_info+beta_info), epoch)
writer.add_scalar("%s/acc(Y_rec, Y_org)"%prefix, org_acc, epoch)
writer.add_scalar("%s/acc(Y_rec, labels)"%prefix, pred_acc, epoch)
writer.add_scalar("%s/kld(Y_rec, Y_org)"%prefix, kl_pred_org, epoch)
writer.add_scalar("%s/kld(Y_alpha, Y_org)"%prefix, alpha_kld, epoch)
writer.add_scalar("%s/kld(Y_beta, Y_org)"%prefix, beta_kld, epoch)
writer.add_scalar("%s/alpha_sparsity"%prefix, alpha_sparsity.mean(), epoch)
writer.add_scalar("%s/acc(Y_alpha, labels)"%prefix, alpha_gt_acc, epoch)
writer.add_scalar("%s/acc(Y_beta, labels)"%prefix, beta_gt_acc, epoch)
writer.add_scalar("%s/acc(Y_alpha, Y_org)"%prefix, alpha_pred_acc, epoch)
writer.add_scalar("%s/acc(Y_beta, Y_org)"%prefix, beta_pred_acc, epoch)
return loss.item()
def save_checkpoint(filename):
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_loss': best_loss,
'epoch': epoch
}, filename)
if args.load_ckpt:
ckpt_path = args.load_ckpt
else:
ckpt_path = os.path.join('explanation', args.output, 'model.ckpt')
if os.path.exists(ckpt_path) and not args.retrain:
print("Load checkpoint from {}".format(ckpt_path))
checkpoint = torch.load(ckpt_path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
best_loss = checkpoint['best_loss']
else:
args.retrain = True
start_epoch = 1
best_loss = 100
if args.resume or args.retrain:
patient = args.patient
model.train()
start_time = time.time()
writer = SummaryWriter(comment=args.output)
os.makedirs('explanation/%s' % args.output, exist_ok=True)
for epoch in tqdm(range(start_epoch, args.epoch+1)):
# print("------- Epoch %2d ------" % epoch)
model.train()
train_losses = []
for batch_idx, data in enumerate(train_dataset):
optimizer.zero_grad()
mu, logvar = model.encode(data['sub_feat'], data['sub_adj'])
sample_mu = model.reparameterize(mu, logvar)
recovered = model.dc(sample_mu)
org_logit = classifier(data['feat'], data['sub_adj'])[0]
org_probs = F.softmax(org_logit, dim=1)
if args.coef_lambda:
nll_loss = args.coef_lambda * criterion(recovered, mu, logvar, data).mean()
else:
nll_loss = 0
alpha_mu = torch.zeros_like(sample_mu)
alpha_mu[:,:,:args.K] = sample_mu[:,:,:args.K]
alpha_adj = torch.sigmoid(model.dc(alpha_mu))
masked_alpha_adj = alpha_adj * data['sub_adj']
alpha_logit = classifier(data['feat'], masked_alpha_adj)[0]
alpha_sparsity = masked_alpha_adj.mean((1,2))/data['sub_adj'].mean((1,2))
if args.coef_causal:
causal_loss = []
NX = min(data['feat'].shape[0], args.NX)
NA = min(data['feat'].shape[0], args.NA)
for idx in random.sample(range(0, data['feat'].shape[0]), NX):
_causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)
causal_loss += [_causal_loss]
for A_idx in random.sample(range(0, data['feat'].shape[0]), NA-1):
if args.node_perm:
perm = torch.randperm(data['graph_size'][idx])
perm_adj = data['sub_adj'][idx].clone().detach()
perm_adj[:data['graph_size'][idx]] = perm_adj[perm]
else:
perm_adj = data['sub_adj'][A_idx]
_causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)
causal_loss += [_causal_loss]
causal_loss = args.coef_causal * torch.stack(causal_loss).mean()
else:
causal_loss = 0
if args.coef_kl:
klloss = args.coef_kl * F.kl_div(F.log_softmax(alpha_logit,dim=1), org_probs, reduction='mean')
else:
klloss = 0
if args.coef_size:
size_loss = args.coef_size * alpha_sparsity.mean()
else:
size_loss = 0
loss = nll_loss + causal_loss + klloss + size_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
train_losses += [[nll_loss, causal_loss, klloss, size_loss]]
sys.stdout.flush()
# train_loss = (torch.cat(train_losses)).mean().item()
nll_loss, causal_loss, klloss, size_loss = torch.tensor(train_losses).mean(0)
writer.add_scalar("train/nll", nll_loss, epoch)
writer.add_scalar("train/causal", causal_loss, epoch)
writer.add_scalar("train/kld(Y_alpha,Y_org)", klloss, epoch)
writer.add_scalar("train/alpha_sparsity", size_loss, epoch)
writer.add_scalar("train/total_loss", nll_loss + causal_loss + klloss + size_loss, epoch)
val_loss = eval_model(val_dataset, 'val')
patient -= 1
if val_loss < best_loss:
best_loss = val_loss
save_checkpoint('explanation/%s/model.ckpt' % args.output)
test_loss = eval_model(test_dataset, 'test')
patient = 100
elif patient <= 0:
print("Early stopping!")
break
if epoch % 100 == 0:
save_checkpoint('explanation/%s/model-%depoch.ckpt' % (args.output,epoch))
print("Train time:", time.time() - start_time)
writer.close()
checkpoint = torch.load('explanation/%s/model.ckpt' % args.output)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("Start evaluation.")
model.eval()
results = []
with torch.no_grad():
for data in test_dataset:
labels = cg_dict['label'][data['graph_idx'].long()].long().to(device)
mu, logvar = model.encode(data['sub_feat'], data['sub_adj'])
org_logits = classifier(data['feat'], data['sub_adj'])[0]
org_probs = F.softmax(org_logits, dim=1)
pred_labels = torch.argmax(org_probs,axis=1)
alpha_mu = torch.zeros_like(mu)
std = torch.exp(logvar)
eps = torch.randn_like(std)
alpha_mu[:,:,:args.K] = eps.mul(std).add_(mu)[:,:,:args.K]
alpha_adj = torch.sigmoid(model.dc(alpha_mu))
masked_alpha_adj = alpha_adj * data['sub_adj']
flatten_alpha_adj = masked_alpha_adj.flatten(1)
for sparsity in np.arange(0, 1, 0.05):
topk = torch.round(data['sub_adj'].sum((1,2)) * sparsity).long().unsqueeze(-1)
threshold = torch.gather(flatten_alpha_adj.sort(1,descending=True).values, 1, topk)
threshold = torch.maximum(threshold, torch.ones_like(threshold)*1E-6)
topk_alpha_adj = (flatten_alpha_adj > threshold).float().view(data['sub_adj'].shape)
alpha_logits = classifier(data['feat'], topk_alpha_adj)[0]
alpha_log_probs = F.log_softmax(alpha_logits, dim=1)
results += [{
"sparsity": sparsity,
"alpha_topk": topk_alpha_adj.sum((1,2)).mean().item()/2,
"alpha_sparsity": (topk_alpha_adj.sum((1,2))/data['sub_adj'].sum((1,2))).mean().item(),
"alpha_gt_acc": (torch.argmax(alpha_logits,axis=1) == labels).float().mean().item(),
"alpha_pred_acc": (torch.argmax(alpha_logits,axis=1) == pred_labels).float().mean().item(),
"alpha_kld": F.kl_div(alpha_log_probs, org_probs, reduction='batchmean').item()
}]
columns = results[0].keys()
df = pd.DataFrame(results, columns = columns)
df.to_csv(os.path.join('explanation', args.output, 'results.csv'))
print(df)
if args.plot_info_flow:
print("Calculating information flow...")
with torch.no_grad():
infos = [
[
- causaleffect.joint_uncond_singledim(
ceparams, model.dc, classifier,
data['sub_adj'][idx], data['feat'][idx],
dim, act=torch.sigmoid, device=device
)[0] for dim in range(ceparams['z_dim'])
] for idx in tqdm(range(data['feat'].shape[0]))
]
infos = torch.tensor(infos, device=device)
infos = F.normalize(infos, p=1, dim=1)
print(infos.mean(0))
results = []
for info in infos:
for dim in range(ceparams['z_dim']):
results += [{'dim': dim+1, 'info': info[dim].item()}]
df = pd.DataFrame(results, columns = results[0].keys())
df.to_csv(os.path.join('explanation', args.output, 'info_flow.csv'))
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
colors = ["red", "blue", "orange", "green"]
customPalette = sns.set_palette(sns.color_palette(colors))
matplotlib.rcParams.update({'font.size': 16})
plt.rcParams["font.family"] = "Times New Roman"
f = plt.figure(figsize=(7,5))
ax = sns.barplot(data=df, x='dim', y='info', palette=customPalette)
plt.xlabel('Z [i]')
plt.ylabel('Information Measurements')
f.savefig(os.path.join('explanation', args.output, 'info_flow.pdf'))
plt.show()
if __name__ == "__main__":
main()
| 49.617476
| 174
| 0.60083
|
c639fb5e6a696e82c5f9543ad52f343753589979
| 13,614
|
py
|
Python
|
aerostructures/number_formatting/field_writer_8.py
|
joanmasco/aerostructures
|
4dcf598a126d7e419e08d518d552861744b48bcd
|
[
"Apache-2.0"
] | 5
|
2019-06-22T15:56:04.000Z
|
2021-09-12T15:31:55.000Z
|
aerostructures/number_formatting/field_writer_8.py
|
joanmasco/aerostructures
|
4dcf598a126d7e419e08d518d552861744b48bcd
|
[
"Apache-2.0"
] | null | null | null |
aerostructures/number_formatting/field_writer_8.py
|
joanmasco/aerostructures
|
4dcf598a126d7e419e08d518d552861744b48bcd
|
[
"Apache-2.0"
] | 1
|
2018-11-16T08:14:18.000Z
|
2018-11-16T08:14:18.000Z
|
# -*- coding: utf-8 -*-
"""
Modified by Joan Mas Colomer,
from the code written by the pyNastran Developers.
Copyright (c) 2017 pyNastran Developers. All rights reserved.
Copyright (c) 2018, ONERA
All rights reserved.
Copyright (c) 2018, ISAE
All rights reserved.
Copyright (c) 2018, Joan Mas Colomer
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of the pyNastran developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Defines functions for single precision 8 character field writing.
"""
from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
from six import string_types, integer_types
from six.moves import range
import sys
from numpy import float32, isnan
def set_string8_blank_if_default(value, default):
"""helper method for writing BDFs"""
val = set_blank_if_default(value, default)
if val is None:
return ' '
return '%8s' % val
def is_same(value1, value2):
"""
Checks to see if 2 values are the same
.. note:: this method is used by almost every card when printing
"""
if isinstance(value1, string_types) or value1 is None:
return True if value1 == value2 else False
if value1 == value2:
return True
return False
def set_blank_if_default(value, default):
"""
Used when setting the output data of a card to clear default values
Parameters
----------
value : int/float/str
the field value the may be set to None (blank)
if value=default, the default value for the field
default : int/float/str
the default value
.. note:: this method is used by almost every card when printing
"""
if isinstance(value, (float, float32)) and isnan(value):
return None
return None if is_same(value, default) else value
def set_default_if_blank(value, default):
"""
Used when initializing a card and the default value isn't set
Used on PBARL
"""
return default if value is None or value == '' else value
def print_scientific_8(value):
"""
Prints a value in 8-character scientific notation.
This is a sub-method and shouldnt typically be called
.. seealso:: :func: `print_float_8` for a better method
"""
if value == 0.0:
return '%8s' % '0.'
python_value = '%8.11e' % value
svalue, sexponent = python_value.strip().split('e')
exponent = int(sexponent) # removes 0s
sign = '-' if abs(value) < 1. else '+'
# the exponent will be added later...
exp2 = str(exponent).strip('-+')
value2 = float(svalue)
leftover = 4 - len(exp2)
if value < 0:
fmt = "%%1.%sf" % (leftover - 1)
else:
fmt = "%%1.%sf" % leftover
svalue3 = fmt % value2
svalue4 = svalue3.strip('0')
field = "%8s" % (svalue4 + 'E' + sign + exp2)
return field
def print_float_8(value):
"""
Prints a float in nastran 8-character width syntax using the
highest precision possbile.
"""
if isnan(value):
return ' '
elif value == 0.0:
return '%8s' % '0.'
elif value > 0.: # positive, not perfect...
if value < 5e-8:
field = print_scientific_8(value)
return field
elif value < 0.0001:
field = print_scientific_8(value)
field2 = "%8.7f" % value # small value
field2 = field2.strip('0 ')
field1 = field
if field2 == '.':
return print_scientific_8(value)
if len(field2) <= 8 and float(field1) == float(field2):
field = field2
field = field.strip(' 0')
elif value < 0.1:
field = "%8.7f" % value
elif value < 1.:
field = "%8.7f" % value # same as before...
elif value < 10.:
field = "%8.6f" % value
elif value < 100.:
field = "%8.5f" % value
elif value < 1000.:
field = "%8.4f" % value
elif value < 10000.:
field = "%8.3f" % value
elif value < 100000.:
field = "%8.2f" % value
elif value < 1000000.:
field = "%8.1f" % value
else: # big value
field = "%8.1f" % value
if field.index('.') < 8:
field = '%8.1f' % round(value)
field = field[0:8]
#assert '.' != field[0], field
else:
field = print_scientific_8(value)
return field
else:
if value > -5e-7:
field = print_scientific_8(value)
return field
elif value > -0.001: # -0.001
field = print_scientific_8(value)
field2 = "%8.6f" % value # small value
field2 = field2.strip('0 ')
# get rid of the first minus sign, add it on afterwards
field1 = field
if len(field2) <= 8 and float(field1) == float(field2):
field = field2.rstrip(' 0')
field = field.replace('-0.', '-.')
elif value > -0.1:
# -0.01 >x>-0.1...should be 5 (maybe scientific...)
field = "%8.6f" % value
field = field.replace('-0.', '-.')
elif value > -1.:
# -0.1 >x>-1.....should be 6, but the baseline 0 is kept...
field = "%8.6f" % value
field = field.replace('-0.', '-.')
elif value > -10.:
field = "%8.5f" % value # -1 >x>-10
elif value > -100.:
field = "%8.4f" % value # -10 >x>-100
elif value > -1000.:
field = "%8.3f" % value # -100 >x>-1000
elif value > -10000.:
field = "%8.2f" % value # -1000 >x>-10000
elif value > -100000.:
field = "%8.1f" % value # -10000>x>-100000
else:
field = "%8.1f" % value
try:
ifield = field.index('.')
except ValueError:
raise ValueError('error printing float; cant find decimal; field=%r value=%s' % (field, value))
if ifield < 8:
field = '%7s.' % int(round(value, 0))
#assert '.' != field[0], field
else:
field = print_scientific_8(value)
return field
field = field.strip(' 0')
field = '%8s' % field
#assert len(field) == 8, ('value=|%s| field=|%s| is not 8 characters '
# 'long, its %s' % (value, field, len(field)))
return field
def print_float_or_int_8(value):
"""
Prints a 8-character width field
Parameters
----------
value : int/float
the value to print
Returns
-------
field : str
an 8-character string
"""
if isinstance(value, (float, float32)):
field = print_float_8(value)
elif isinstance(value, integer_types):
field = "%8i" % value
else:
msg = 'Invalid Type: value=%r type=%s' % (value, type(value))
raise TypeError(msg)
def print_field_8(value):
"""
Prints a 8-character width field
Parameters
----------
value : int/float/str
the value to print
Returns
-------
field : str
an 8-character string
"""
if isinstance(value, integer_types):
field = "%8i" % value
elif isinstance(value, (float, float32)):
field = print_float_8(value)
elif value is None:
field = " "
else:
field = "%8s" % value
if len(field) != 8:
msg = 'field=%r is not 8 characters long...raw_value=%r' % (field, value)
raise RuntimeError(msg)
return field
def print_card_8(fields):
"""
Prints a nastran-style card with 8-character width fields.
Parameters
----------
fields : List[int/float/str/None]
all the fields in the BDF card (no trailing Nones)
Returns
-------
card : str
string representation of the card in small field format
.. note:: An internal field value of None or '' will be treated as
a blank field
.. note:: A small field format follows the 8-8-8-8-8-8-8-8 = 80
format where the first 8 is the card name or
blank (continuation). The last 8-character field indicates
an optional continuation, but because it's a left-justified
unneccessary field, print_card doesnt use it.
.. code-block:: python
>>> fields = ['DUMMY', 1, 2, 3, None, 4, 5, 6, 7, 8.]
>>> print_card_8(fields)
DUMMY 1 2 3 4 5 6 7
DUMMY 1 2 3 4 5 6 7
8.
"""
try:
out = '%-8s' % fields[0]
except:
print("ERROR! fields=%s" % fields)
sys.stdout.flush()
raise
for i in range(1, len(fields)):
field = fields[i]
try:
out += print_field_8(field)
except:
print("bad fields = %s" % fields)
raise
if i % 8 == 0: # allow 1+8 fields per line
out = out.rstrip(' ')
if out[-1] == '\n': # empty line
out += '+'
out += '\n '
out = out.rstrip(' \n+') + '\n' # removes blank lines at the end of cards
return out
def print_int_card(fields):
"""
Prints a nastran-style card with 8-character width fields.
All fields (other than the first field) must be integers.
This is used to speed up SET cards.
Parameters
----------
fields : List[int/float/str/None]
The list of fields to write to a nastran card.
.. warning::
Blanks are not allowed!
Floats and strings are not allowed.
.. code-block:: python
fields = ['SET', 1, 2, 3, 4, 5, 6, ..., n]
"""
try:
out = '%-8s' % fields[0]
except:
print("ERROR! fields=%s" % fields)
sys.stdout.flush()
raise
for i in range(1, len(fields)):
field = fields[i]
try:
out += "%8i" % field # balks if you have None or string fields
except:
print("bad fields = %s" % fields)
raise
if i % 8 == 0: # allow 1+8 fields per line
out = out.rstrip(' ')
out += '\n '
out = out.rstrip(' \n+') + '\n' # removes blank lines at the end of cards
return out
def print_int_card_blocks(fields_blocks):
"""
Prints a nastran-style card with 8-character width fields.
All fields other than the card name must be written in "block" format.
This is used to speed up SET cards.
Parameters
----------
fields_blocks : List[int]
The fields written in "block" notation.
Returns
-------
msg : str
the field blocks as a 8-character width Nastran card
.. note:: Blanks are allowed in the False block.
.. code-block:: python
fields_blocks = [
'SET1',
[['a', 1.0, 3], False], # these are not all integers
[[1, 2, 3], True], # these are all integers
]
msg = print_int_card_blocks(fields_blocks)
print(msg)
>>> 'SET1 a 1. 3 1 2 3\n'
"""
card_name = fields_blocks[0]
try:
out = '%-8s' % card_name
except:
print("ERROR! fields_blocks=%s" % fields_blocks)
sys.stdout.flush()
raise
i = 0
for block in fields_blocks[1:]:
(fields, is_all_ints) = block
if is_all_ints is True:
for field in fields:
out += "%8i" % field
i += 1
if i == 8: # allow 1+8 fields per line
out += '\n '
i = 0
elif is_all_ints is False:
for field in fields:
out += print_field_8(field)
i += 1
if i == 8: # allow 1+8 fields per line
out += '\n '
i = 0
else:
raise SyntaxError('is_all_ints must be a boolean. is_all_ints=%r' % is_all_ints)
out = out.rstrip(' \n') + '\n' # removes blank lines at the end of cards
return out
| 32.491647
| 756
| 0.555237
|
750edb96d36eaed2336e31f32eb77380a76d4970
| 505
|
py
|
Python
|
room_assistance/indico_room_assistance/__init__.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
room_assistance/indico_room_assistance/__init__.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
room_assistance/indico_room_assistance/__init__.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2022 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from indico.core import signals
from indico.util.i18n import make_bound_gettext
_ = make_bound_gettext('room_assistance')
@signals.core.import_tasks.connect
def _import_tasks(sender, **kwargs):
import indico_room_assistance.tasks # noqa: F401
| 28.055556
| 65
| 0.774257
|
10fa3bfd9d23465dc410be3fa2525c900d5475a9
| 2,005
|
py
|
Python
|
nsc/spikes.py
|
GuilhermeToso/masters-project
|
01d5acfddaedb3cbf7fa9247a88108530547e155
|
[
"MIT"
] | 1
|
2021-08-01T20:13:15.000Z
|
2021-08-01T20:13:15.000Z
|
nsc/spikes.py
|
GuilhermeToso/masters-project
|
01d5acfddaedb3cbf7fa9247a88108530547e155
|
[
"MIT"
] | null | null | null |
nsc/spikes.py
|
GuilhermeToso/masters-project
|
01d5acfddaedb3cbf7fa9247a88108530547e155
|
[
"MIT"
] | null | null | null |
"""
Author: Guilherme M. Toso
Title: spikes.py
Project: Semi-Supervised Learning Using Competition for Neurons' Synchronization
Description:
This module contain several methods that can perform some trajectories analyses
"""
""" Dependencies """
import numpy as np
__all__ = ['discrete_potential', 'get_peak_ind', 'get_periods', 'get_spikes_periods']
def discrete_potential(function, threshold):
"""
Returns the trajectories array as an array with
1 (V > threshold) and 0 (otherwise)
Args:
function: array of the potential trajectories in time of all neurons
(shape: neurons, time)
theshold: value that indicates there is a spike
"""
return np.where(function >= threshold, 1, 0)
def get_peak_ind(discrete_array):
"""
Get the indexes of the potential peaks
Args:
discrete_array: trajectory of neuron i of 0 and 1 values
i: the neuron index
"""
indexes = [j for j in range(discrete_array.size) if discrete_array[j-1]==0 and\
discrete_array[j]==1]
return indexes
def get_periods(indexes, step):
"""
Return the periods of thetrajectory of neuron i
Args:
indexes: indexes of the peaks of the neuron i
step: step time
i: the ith neuron index
"""
period = np.array([indexes[j+1] - indexes[j] for j in range(len(indexes) -1)])*step
return period
def get_spikes_periods(function, threshold, step):
""" Get the spike indexes and the periods of all neurons """
spikes = discrete_potential(function, threshold)
index_list = []
periods_list = []
for neuron in range(len(function)):
indexes = get_peak_ind(spikes[neuron])
periods = get_periods(indexes, step)
if len(indexes) > 1 and len(periods) > 0:
index_list.append(indexes)
periods_list.append(periods)
return index_list, periods_list
| 21.329787
| 87
| 0.638903
|
95a8975a3a4f3abff47702d545ce50f29a48a3be
| 33,208
|
py
|
Python
|
pyleecan/Classes/OutGeo.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | 4
|
2017-11-27T10:14:34.000Z
|
2018-09-20T11:30:32.000Z
|
pyleecan/Classes/OutGeo.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Classes/OutGeo.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Output/OutGeo.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Output/OutGeo
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from copy import deepcopy
from ._frozen import FrozenClass
from numpy import isnan
from ._check import InitUnKnowClassError
class OutGeo(FrozenClass):
"""Gather the geometrical and the global outputs"""
VERSION = 1
# generic save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
stator=None,
rotor=None,
Wgap_mec=None,
Wgap_mag=None,
Rgap_mec=None,
Lgap=None,
logger_name="Pyleecan.OutGeo",
angle_rotor_initial=None,
rot_dir=None,
per_a=None,
is_antiper_a=None,
per_t_S=None,
is_antiper_t_S=None,
axes_dict=None,
per_t_R=None,
is_antiper_t_R=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "stator" in list(init_dict.keys()):
stator = init_dict["stator"]
if "rotor" in list(init_dict.keys()):
rotor = init_dict["rotor"]
if "Wgap_mec" in list(init_dict.keys()):
Wgap_mec = init_dict["Wgap_mec"]
if "Wgap_mag" in list(init_dict.keys()):
Wgap_mag = init_dict["Wgap_mag"]
if "Rgap_mec" in list(init_dict.keys()):
Rgap_mec = init_dict["Rgap_mec"]
if "Lgap" in list(init_dict.keys()):
Lgap = init_dict["Lgap"]
if "logger_name" in list(init_dict.keys()):
logger_name = init_dict["logger_name"]
if "angle_rotor_initial" in list(init_dict.keys()):
angle_rotor_initial = init_dict["angle_rotor_initial"]
if "rot_dir" in list(init_dict.keys()):
rot_dir = init_dict["rot_dir"]
if "per_a" in list(init_dict.keys()):
per_a = init_dict["per_a"]
if "is_antiper_a" in list(init_dict.keys()):
is_antiper_a = init_dict["is_antiper_a"]
if "per_t_S" in list(init_dict.keys()):
per_t_S = init_dict["per_t_S"]
if "is_antiper_t_S" in list(init_dict.keys()):
is_antiper_t_S = init_dict["is_antiper_t_S"]
if "axes_dict" in list(init_dict.keys()):
axes_dict = init_dict["axes_dict"]
if "per_t_R" in list(init_dict.keys()):
per_t_R = init_dict["per_t_R"]
if "is_antiper_t_R" in list(init_dict.keys()):
is_antiper_t_R = init_dict["is_antiper_t_R"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.stator = stator
self.rotor = rotor
self.Wgap_mec = Wgap_mec
self.Wgap_mag = Wgap_mag
self.Rgap_mec = Rgap_mec
self.Lgap = Lgap
self.logger_name = logger_name
self.angle_rotor_initial = angle_rotor_initial
self.rot_dir = rot_dir
self.per_a = per_a
self.is_antiper_a = is_antiper_a
self.per_t_S = per_t_S
self.is_antiper_t_S = is_antiper_t_S
self.axes_dict = axes_dict
self.per_t_R = per_t_R
self.is_antiper_t_R = is_antiper_t_R
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
OutGeo_str = ""
if self.parent is None:
OutGeo_str += "parent = None " + linesep
else:
OutGeo_str += "parent = " + str(type(self.parent)) + " object" + linesep
if self.stator is not None:
tmp = self.stator.__str__().replace(linesep, linesep + "\t").rstrip("\t")
OutGeo_str += "stator = " + tmp
else:
OutGeo_str += "stator = None" + linesep + linesep
if self.rotor is not None:
tmp = self.rotor.__str__().replace(linesep, linesep + "\t").rstrip("\t")
OutGeo_str += "rotor = " + tmp
else:
OutGeo_str += "rotor = None" + linesep + linesep
OutGeo_str += "Wgap_mec = " + str(self.Wgap_mec) + linesep
OutGeo_str += "Wgap_mag = " + str(self.Wgap_mag) + linesep
OutGeo_str += "Rgap_mec = " + str(self.Rgap_mec) + linesep
OutGeo_str += "Lgap = " + str(self.Lgap) + linesep
OutGeo_str += 'logger_name = "' + str(self.logger_name) + '"' + linesep
OutGeo_str += "angle_rotor_initial = " + str(self.angle_rotor_initial) + linesep
OutGeo_str += "rot_dir = " + str(self.rot_dir) + linesep
OutGeo_str += "per_a = " + str(self.per_a) + linesep
OutGeo_str += "is_antiper_a = " + str(self.is_antiper_a) + linesep
OutGeo_str += "per_t_S = " + str(self.per_t_S) + linesep
OutGeo_str += "is_antiper_t_S = " + str(self.is_antiper_t_S) + linesep
OutGeo_str += "axes_dict = " + str(self.axes_dict) + linesep + linesep
OutGeo_str += "per_t_R = " + str(self.per_t_R) + linesep
OutGeo_str += "is_antiper_t_R = " + str(self.is_antiper_t_R) + linesep
return OutGeo_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.stator != self.stator:
return False
if other.rotor != self.rotor:
return False
if other.Wgap_mec != self.Wgap_mec:
return False
if other.Wgap_mag != self.Wgap_mag:
return False
if other.Rgap_mec != self.Rgap_mec:
return False
if other.Lgap != self.Lgap:
return False
if other.logger_name != self.logger_name:
return False
if other.angle_rotor_initial != self.angle_rotor_initial:
return False
if other.rot_dir != self.rot_dir:
return False
if other.per_a != self.per_a:
return False
if other.is_antiper_a != self.is_antiper_a:
return False
if other.per_t_S != self.per_t_S:
return False
if other.is_antiper_t_S != self.is_antiper_t_S:
return False
if other.axes_dict != self.axes_dict:
return False
if other.per_t_R != self.per_t_R:
return False
if other.is_antiper_t_R != self.is_antiper_t_R:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.stator is None and self.stator is not None) or (
other.stator is not None and self.stator is None
):
diff_list.append(name + ".stator None mismatch")
elif self.stator is not None:
diff_list.extend(
self.stator.compare(
other.stator,
name=name + ".stator",
ignore_list=ignore_list,
is_add_value=is_add_value,
)
)
if (other.rotor is None and self.rotor is not None) or (
other.rotor is not None and self.rotor is None
):
diff_list.append(name + ".rotor None mismatch")
elif self.rotor is not None:
diff_list.extend(
self.rotor.compare(
other.rotor,
name=name + ".rotor",
ignore_list=ignore_list,
is_add_value=is_add_value,
)
)
if (
other._Wgap_mec is not None
and self._Wgap_mec is not None
and isnan(other._Wgap_mec)
and isnan(self._Wgap_mec)
):
pass
elif other._Wgap_mec != self._Wgap_mec:
if is_add_value:
val_str = (
" (self="
+ str(self._Wgap_mec)
+ ", other="
+ str(other._Wgap_mec)
+ ")"
)
diff_list.append(name + ".Wgap_mec" + val_str)
else:
diff_list.append(name + ".Wgap_mec")
if (
other._Wgap_mag is not None
and self._Wgap_mag is not None
and isnan(other._Wgap_mag)
and isnan(self._Wgap_mag)
):
pass
elif other._Wgap_mag != self._Wgap_mag:
if is_add_value:
val_str = (
" (self="
+ str(self._Wgap_mag)
+ ", other="
+ str(other._Wgap_mag)
+ ")"
)
diff_list.append(name + ".Wgap_mag" + val_str)
else:
diff_list.append(name + ".Wgap_mag")
if (
other._Rgap_mec is not None
and self._Rgap_mec is not None
and isnan(other._Rgap_mec)
and isnan(self._Rgap_mec)
):
pass
elif other._Rgap_mec != self._Rgap_mec:
if is_add_value:
val_str = (
" (self="
+ str(self._Rgap_mec)
+ ", other="
+ str(other._Rgap_mec)
+ ")"
)
diff_list.append(name + ".Rgap_mec" + val_str)
else:
diff_list.append(name + ".Rgap_mec")
if (
other._Lgap is not None
and self._Lgap is not None
and isnan(other._Lgap)
and isnan(self._Lgap)
):
pass
elif other._Lgap != self._Lgap:
if is_add_value:
val_str = (
" (self=" + str(self._Lgap) + ", other=" + str(other._Lgap) + ")"
)
diff_list.append(name + ".Lgap" + val_str)
else:
diff_list.append(name + ".Lgap")
if other._logger_name != self._logger_name:
if is_add_value:
val_str = (
" (self="
+ str(self._logger_name)
+ ", other="
+ str(other._logger_name)
+ ")"
)
diff_list.append(name + ".logger_name" + val_str)
else:
diff_list.append(name + ".logger_name")
if (
other._angle_rotor_initial is not None
and self._angle_rotor_initial is not None
and isnan(other._angle_rotor_initial)
and isnan(self._angle_rotor_initial)
):
pass
elif other._angle_rotor_initial != self._angle_rotor_initial:
if is_add_value:
val_str = (
" (self="
+ str(self._angle_rotor_initial)
+ ", other="
+ str(other._angle_rotor_initial)
+ ")"
)
diff_list.append(name + ".angle_rotor_initial" + val_str)
else:
diff_list.append(name + ".angle_rotor_initial")
if other._rot_dir != self._rot_dir:
if is_add_value:
val_str = (
" (self="
+ str(self._rot_dir)
+ ", other="
+ str(other._rot_dir)
+ ")"
)
diff_list.append(name + ".rot_dir" + val_str)
else:
diff_list.append(name + ".rot_dir")
if other._per_a != self._per_a:
if is_add_value:
val_str = (
" (self=" + str(self._per_a) + ", other=" + str(other._per_a) + ")"
)
diff_list.append(name + ".per_a" + val_str)
else:
diff_list.append(name + ".per_a")
if other._is_antiper_a != self._is_antiper_a:
if is_add_value:
val_str = (
" (self="
+ str(self._is_antiper_a)
+ ", other="
+ str(other._is_antiper_a)
+ ")"
)
diff_list.append(name + ".is_antiper_a" + val_str)
else:
diff_list.append(name + ".is_antiper_a")
if other._per_t_S != self._per_t_S:
if is_add_value:
val_str = (
" (self="
+ str(self._per_t_S)
+ ", other="
+ str(other._per_t_S)
+ ")"
)
diff_list.append(name + ".per_t_S" + val_str)
else:
diff_list.append(name + ".per_t_S")
if other._is_antiper_t_S != self._is_antiper_t_S:
if is_add_value:
val_str = (
" (self="
+ str(self._is_antiper_t_S)
+ ", other="
+ str(other._is_antiper_t_S)
+ ")"
)
diff_list.append(name + ".is_antiper_t_S" + val_str)
else:
diff_list.append(name + ".is_antiper_t_S")
if (other.axes_dict is None and self.axes_dict is not None) or (
other.axes_dict is not None and self.axes_dict is None
):
diff_list.append(name + ".axes_dict None mismatch")
elif self.axes_dict is None:
pass
elif len(other.axes_dict) != len(self.axes_dict):
diff_list.append("len(" + name + "axes_dict)")
else:
for key in self.axes_dict:
diff_list.extend(
self.axes_dict[key].compare(
other.axes_dict[key],
name=name + ".axes_dict[" + str(key) + "]",
ignore_list=ignore_list,
is_add_value=is_add_value,
)
)
if other._per_t_R != self._per_t_R:
if is_add_value:
val_str = (
" (self="
+ str(self._per_t_R)
+ ", other="
+ str(other._per_t_R)
+ ")"
)
diff_list.append(name + ".per_t_R" + val_str)
else:
diff_list.append(name + ".per_t_R")
if other._is_antiper_t_R != self._is_antiper_t_R:
if is_add_value:
val_str = (
" (self="
+ str(self._is_antiper_t_R)
+ ", other="
+ str(other._is_antiper_t_R)
+ ")"
)
diff_list.append(name + ".is_antiper_t_R" + val_str)
else:
diff_list.append(name + ".is_antiper_t_R")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
S += getsizeof(self.stator)
S += getsizeof(self.rotor)
S += getsizeof(self.Wgap_mec)
S += getsizeof(self.Wgap_mag)
S += getsizeof(self.Rgap_mec)
S += getsizeof(self.Lgap)
S += getsizeof(self.logger_name)
S += getsizeof(self.angle_rotor_initial)
S += getsizeof(self.rot_dir)
S += getsizeof(self.per_a)
S += getsizeof(self.is_antiper_a)
S += getsizeof(self.per_t_S)
S += getsizeof(self.is_antiper_t_S)
if self.axes_dict is not None:
for key, value in self.axes_dict.items():
S += getsizeof(value) + getsizeof(key)
S += getsizeof(self.per_t_R)
S += getsizeof(self.is_antiper_t_R)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
OutGeo_dict = dict()
if self.stator is None:
OutGeo_dict["stator"] = None
else:
OutGeo_dict["stator"] = self.stator.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.rotor is None:
OutGeo_dict["rotor"] = None
else:
OutGeo_dict["rotor"] = self.rotor.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OutGeo_dict["Wgap_mec"] = self.Wgap_mec
OutGeo_dict["Wgap_mag"] = self.Wgap_mag
OutGeo_dict["Rgap_mec"] = self.Rgap_mec
OutGeo_dict["Lgap"] = self.Lgap
OutGeo_dict["logger_name"] = self.logger_name
OutGeo_dict["angle_rotor_initial"] = self.angle_rotor_initial
OutGeo_dict["rot_dir"] = self.rot_dir
OutGeo_dict["per_a"] = self.per_a
OutGeo_dict["is_antiper_a"] = self.is_antiper_a
OutGeo_dict["per_t_S"] = self.per_t_S
OutGeo_dict["is_antiper_t_S"] = self.is_antiper_t_S
if self.axes_dict is None:
OutGeo_dict["axes_dict"] = None
else:
OutGeo_dict["axes_dict"] = dict()
for key, obj in self.axes_dict.items():
if obj is not None:
OutGeo_dict["axes_dict"][key] = obj.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
else:
OutGeo_dict["axes_dict"][key] = None
OutGeo_dict["per_t_R"] = self.per_t_R
OutGeo_dict["is_antiper_t_R"] = self.is_antiper_t_R
# The class name is added to the dict for deserialisation purpose
OutGeo_dict["__class__"] = "OutGeo"
return OutGeo_dict
def copy(self):
"""Creates a deepcopy of the object"""
# Handle deepcopy of all the properties
if self.stator is None:
stator_val = None
else:
stator_val = self.stator.copy()
if self.rotor is None:
rotor_val = None
else:
rotor_val = self.rotor.copy()
Wgap_mec_val = self.Wgap_mec
Wgap_mag_val = self.Wgap_mag
Rgap_mec_val = self.Rgap_mec
Lgap_val = self.Lgap
logger_name_val = self.logger_name
angle_rotor_initial_val = self.angle_rotor_initial
rot_dir_val = self.rot_dir
per_a_val = self.per_a
is_antiper_a_val = self.is_antiper_a
per_t_S_val = self.per_t_S
is_antiper_t_S_val = self.is_antiper_t_S
if self.axes_dict is None:
axes_dict_val = None
else:
axes_dict_val = dict()
for key, obj in self.axes_dict.items():
axes_dict_val[key] = obj.copy()
per_t_R_val = self.per_t_R
is_antiper_t_R_val = self.is_antiper_t_R
# Creates new object of the same type with the copied properties
obj_copy = type(self)(
stator=stator_val,
rotor=rotor_val,
Wgap_mec=Wgap_mec_val,
Wgap_mag=Wgap_mag_val,
Rgap_mec=Rgap_mec_val,
Lgap=Lgap_val,
logger_name=logger_name_val,
angle_rotor_initial=angle_rotor_initial_val,
rot_dir=rot_dir_val,
per_a=per_a_val,
is_antiper_a=is_antiper_a_val,
per_t_S=per_t_S_val,
is_antiper_t_S=is_antiper_t_S_val,
axes_dict=axes_dict_val,
per_t_R=per_t_R_val,
is_antiper_t_R=is_antiper_t_R_val,
)
return obj_copy
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
if self.stator is not None:
self.stator._set_None()
if self.rotor is not None:
self.rotor._set_None()
self.Wgap_mec = None
self.Wgap_mag = None
self.Rgap_mec = None
self.Lgap = None
self.logger_name = None
self.angle_rotor_initial = None
self.rot_dir = None
self.per_a = None
self.is_antiper_a = None
self.per_t_S = None
self.is_antiper_t_S = None
self.axes_dict = None
self.per_t_R = None
self.is_antiper_t_R = None
def _get_stator(self):
"""getter of stator"""
return self._stator
def _set_stator(self, value):
"""setter of stator"""
if isinstance(value, str): # Load from file
try:
value = load_init_dict(value)[1]
except Exception as e:
self.get_logger().error(
"Error while loading " + value + ", setting None instead"
)
value = None
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "stator"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
OutGeoLam = import_class("pyleecan.Classes", "OutGeoLam", "stator")
value = OutGeoLam()
check_var("stator", value, "OutGeoLam")
self._stator = value
if self._stator is not None:
self._stator.parent = self
stator = property(
fget=_get_stator,
fset=_set_stator,
doc=u"""Geometry output of the stator
:Type: OutGeoLam
""",
)
def _get_rotor(self):
"""getter of rotor"""
return self._rotor
def _set_rotor(self, value):
"""setter of rotor"""
if isinstance(value, str): # Load from file
try:
value = load_init_dict(value)[1]
except Exception as e:
self.get_logger().error(
"Error while loading " + value + ", setting None instead"
)
value = None
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "rotor"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
OutGeoLam = import_class("pyleecan.Classes", "OutGeoLam", "rotor")
value = OutGeoLam()
check_var("rotor", value, "OutGeoLam")
self._rotor = value
if self._rotor is not None:
self._rotor.parent = self
rotor = property(
fget=_get_rotor,
fset=_set_rotor,
doc=u"""Geometry output of the rotor
:Type: OutGeoLam
""",
)
def _get_Wgap_mec(self):
"""getter of Wgap_mec"""
return self._Wgap_mec
def _set_Wgap_mec(self, value):
"""setter of Wgap_mec"""
check_var("Wgap_mec", value, "float")
self._Wgap_mec = value
Wgap_mec = property(
fget=_get_Wgap_mec,
fset=_set_Wgap_mec,
doc=u"""mechanical airgap width (minimal distance between the lamination including magnet)
:Type: float
""",
)
def _get_Wgap_mag(self):
"""getter of Wgap_mag"""
return self._Wgap_mag
def _set_Wgap_mag(self, value):
"""setter of Wgap_mag"""
check_var("Wgap_mag", value, "float")
self._Wgap_mag = value
Wgap_mag = property(
fget=_get_Wgap_mag,
fset=_set_Wgap_mag,
doc=u"""the magnetic airgap width (distance beetween the two Laminations bore radius)
:Type: float
""",
)
def _get_Rgap_mec(self):
"""getter of Rgap_mec"""
return self._Rgap_mec
def _set_Rgap_mec(self, value):
"""setter of Rgap_mec"""
check_var("Rgap_mec", value, "float")
self._Rgap_mec = value
Rgap_mec = property(
fget=_get_Rgap_mec,
fset=_set_Rgap_mec,
doc=u"""radius of the center of the mecanical airgap
:Type: float
""",
)
def _get_Lgap(self):
"""getter of Lgap"""
return self._Lgap
def _set_Lgap(self, value):
"""setter of Lgap"""
check_var("Lgap", value, "float")
self._Lgap = value
Lgap = property(
fget=_get_Lgap,
fset=_set_Lgap,
doc=u"""Airgap active length
:Type: float
""",
)
def _get_logger_name(self):
"""getter of logger_name"""
return self._logger_name
def _set_logger_name(self, value):
"""setter of logger_name"""
check_var("logger_name", value, "str")
self._logger_name = value
logger_name = property(
fget=_get_logger_name,
fset=_set_logger_name,
doc=u"""Name of the logger to use
:Type: str
""",
)
def _get_angle_rotor_initial(self):
"""getter of angle_rotor_initial"""
return self._angle_rotor_initial
def _set_angle_rotor_initial(self, value):
"""setter of angle_rotor_initial"""
check_var("angle_rotor_initial", value, "float")
self._angle_rotor_initial = value
angle_rotor_initial = property(
fget=_get_angle_rotor_initial,
fset=_set_angle_rotor_initial,
doc=u"""Difference between the d axis angle of the stator and the rotor
:Type: float
""",
)
def _get_rot_dir(self):
"""getter of rot_dir"""
return self._rot_dir
def _set_rot_dir(self, value):
"""setter of rot_dir"""
check_var("rot_dir", value, "int", Vmin=-1, Vmax=1)
self._rot_dir = value
rot_dir = property(
fget=_get_rot_dir,
fset=_set_rot_dir,
doc=u"""Rotation direction of the rotor (rot_dir*N0, default value given by ROT_DIR_REF)
:Type: int
:min: -1
:max: 1
""",
)
def _get_per_a(self):
"""getter of per_a"""
return self._per_a
def _set_per_a(self, value):
"""setter of per_a"""
check_var("per_a", value, "int")
self._per_a = value
per_a = property(
fget=_get_per_a,
fset=_set_per_a,
doc=u"""Number of spatial periodicities of the machine
:Type: int
""",
)
def _get_is_antiper_a(self):
"""getter of is_antiper_a"""
return self._is_antiper_a
def _set_is_antiper_a(self, value):
"""setter of is_antiper_a"""
check_var("is_antiper_a", value, "bool")
self._is_antiper_a = value
is_antiper_a = property(
fget=_get_is_antiper_a,
fset=_set_is_antiper_a,
doc=u"""True if an spatial anti-periodicity is possible after the periodicities
:Type: bool
""",
)
def _get_per_t_S(self):
"""getter of per_t_S"""
return self._per_t_S
def _set_per_t_S(self, value):
"""setter of per_t_S"""
check_var("per_t_S", value, "int")
self._per_t_S = value
per_t_S = property(
fget=_get_per_t_S,
fset=_set_per_t_S,
doc=u"""Number of time periodicities of the machine in static referential
:Type: int
""",
)
def _get_is_antiper_t_S(self):
"""getter of is_antiper_t_S"""
return self._is_antiper_t_S
def _set_is_antiper_t_S(self, value):
"""setter of is_antiper_t_S"""
check_var("is_antiper_t_S", value, "bool")
self._is_antiper_t_S = value
is_antiper_t_S = property(
fget=_get_is_antiper_t_S,
fset=_set_is_antiper_t_S,
doc=u"""True if an time anti-periodicity is possible after the periodicities in static referential
:Type: bool
""",
)
def _get_axes_dict(self):
"""getter of axes_dict"""
if self._axes_dict is not None:
for key, obj in self._axes_dict.items():
if obj is not None:
obj.parent = self
return self._axes_dict
def _set_axes_dict(self, value):
"""setter of axes_dict"""
if type(value) is dict:
for key, obj in value.items():
if isinstance(obj, str): # Load from file
try:
obj = load_init_dict(obj)[1]
except Exception as e:
self.get_logger().error(
"Error while loading " + obj + ", setting None instead"
)
obj = None
value[key] = None
if type(obj) is dict:
class_obj = import_class(
"SciDataTool.Classes", obj.get("__class__"), "axes_dict"
)
value[key] = class_obj(init_dict=obj)
if type(value) is int and value == -1:
value = dict()
check_var("axes_dict", value, "{Data}")
self._axes_dict = value
axes_dict = property(
fget=_get_axes_dict,
fset=_set_axes_dict,
doc=u"""Dict containing axes data without periodicities used for plots and to have simulation full time/angle vectors
:Type: {SciDataTool.Classes.DataND.Data}
""",
)
def _get_per_t_R(self):
"""getter of per_t_R"""
return self._per_t_R
def _set_per_t_R(self, value):
"""setter of per_t_R"""
check_var("per_t_R", value, "int")
self._per_t_R = value
per_t_R = property(
fget=_get_per_t_R,
fset=_set_per_t_R,
doc=u"""Number of time periodicities of the machine in rotating referential
:Type: int
""",
)
def _get_is_antiper_t_R(self):
"""getter of is_antiper_t_R"""
return self._is_antiper_t_R
def _set_is_antiper_t_R(self, value):
"""setter of is_antiper_t_R"""
check_var("is_antiper_t_R", value, "bool")
self._is_antiper_t_R = value
is_antiper_t_R = property(
fget=_get_is_antiper_t_R,
fset=_set_is_antiper_t_R,
doc=u"""True if an time anti-periodicity is possible after the periodicities in rotating referential
:Type: bool
""",
)
| 35.365282
| 126
| 0.524211
|
0c220948ef7c290e701b848447420eef3ec408f8
| 686
|
py
|
Python
|
backend/app/app/schemas.py
|
kaldan007/dakje
|
4a6024f7256cedec02be86eeefe6b58589ecd6a9
|
[
"MIT"
] | null | null | null |
backend/app/app/schemas.py
|
kaldan007/dakje
|
4a6024f7256cedec02be86eeefe6b58589ecd6a9
|
[
"MIT"
] | null | null | null |
backend/app/app/schemas.py
|
kaldan007/dakje
|
4a6024f7256cedec02be86eeefe6b58589ecd6a9
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, validator
class Span(BaseModel):
start: int
end: int
@validator("*")
def span_must_not_be_neg(cls, v):
if v < 0:
raise ValueError("span shouldn't be negative")
return v
@validator("end")
def end_must_not_be_less_than_start(cls, v, values, **kwargs):
if "start" in values and v < values["start"]:
raise ValueError("Span end must not be less than start")
return v
class Text(BaseModel):
content: str
class Token(BaseModel):
form: str
pos: str
class Sentence(BaseModel):
content: str
sentence_lenght: int
verb_per_sentence: int
span: Span
| 19.6
| 68
| 0.631195
|
46ceb6257aa59e43dd18595d8b084969cd0c87c0
| 679
|
py
|
Python
|
tests/datatypes/__init__.py
|
Fogapod/edgedb-python
|
377805660e3455bef536412bd5467b435753b3a5
|
[
"Apache-2.0"
] | 214
|
2019-01-19T03:56:10.000Z
|
2022-03-31T01:37:33.000Z
|
tests/datatypes/__init__.py
|
Fogapod/edgedb-python
|
377805660e3455bef536412bd5467b435753b3a5
|
[
"Apache-2.0"
] | 120
|
2019-03-19T23:01:52.000Z
|
2022-03-14T08:41:27.000Z
|
tests/datatypes/__init__.py
|
Fogapod/edgedb-python
|
377805660e3455bef536412bd5467b435753b3a5
|
[
"Apache-2.0"
] | 24
|
2019-04-29T22:41:10.000Z
|
2021-11-15T00:28:01.000Z
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 37.722222
| 74
| 0.759941
|
abee574306de0d1fb06f9bd61e412f21e5eabdb9
| 4,018
|
py
|
Python
|
RI/flask_server/tapi_server/models/tapi_oam_mip.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 57
|
2018-04-09T08:56:18.000Z
|
2022-03-23T08:31:06.000Z
|
RI/flask_server/tapi_server/models/tapi_oam_mip.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 143
|
2016-06-08T04:09:54.000Z
|
2018-02-23T10:45:59.000Z
|
RI/flask_server/tapi_server/models/tapi_oam_mip.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 64
|
2018-03-07T07:55:17.000Z
|
2022-03-28T07:14:28.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_local_class import TapiCommonLocalClass # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server import util
class TapiOamMip(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, local_id=None, layer_protocol_name=None): # noqa: E501
"""TapiOamMip - a model defined in OpenAPI
:param name: The name of this TapiOamMip. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param local_id: The local_id of this TapiOamMip. # noqa: E501
:type local_id: str
:param layer_protocol_name: The layer_protocol_name of this TapiOamMip. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self.openapi_types = {
'name': List[TapiCommonNameAndValue],
'local_id': str,
'layer_protocol_name': TapiCommonLayerProtocolName
}
self.attribute_map = {
'name': 'name',
'local_id': 'local-id',
'layer_protocol_name': 'layer-protocol-name'
}
self._name = name
self._local_id = local_id
self._layer_protocol_name = layer_protocol_name
@classmethod
def from_dict(cls, dikt) -> 'TapiOamMip':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.Mip of this TapiOamMip. # noqa: E501
:rtype: TapiOamMip
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this TapiOamMip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiOamMip.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiOamMip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiOamMip.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def local_id(self):
"""Gets the local_id of this TapiOamMip.
none # noqa: E501
:return: The local_id of this TapiOamMip.
:rtype: str
"""
return self._local_id
@local_id.setter
def local_id(self, local_id):
"""Sets the local_id of this TapiOamMip.
none # noqa: E501
:param local_id: The local_id of this TapiOamMip.
:type local_id: str
"""
self._local_id = local_id
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiOamMip.
:return: The layer_protocol_name of this TapiOamMip.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiOamMip.
:param layer_protocol_name: The layer_protocol_name of this TapiOamMip.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
| 32.403226
| 223
| 0.665007
|
82c446c22c8ef3921afee3ff0a8b5642c5141e8f
| 27,700
|
py
|
Python
|
tests/support/mixins.py
|
major0/salt
|
2019cc89602d296d060e24111df8f9702a8c6bc9
|
[
"Apache-2.0"
] | null | null | null |
tests/support/mixins.py
|
major0/salt
|
2019cc89602d296d060e24111df8f9702a8c6bc9
|
[
"Apache-2.0"
] | null | null | null |
tests/support/mixins.py
|
major0/salt
|
2019cc89602d296d060e24111df8f9702a8c6bc9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
=============
Class Mix-Ins
=============
Some reusable class Mixins
'''
# pylint: disable=repr-flag-used-in-string
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import types
import atexit
import pprint
import logging
import tempfile
import functools
import subprocess
import multiprocessing
# Import Salt Testing Libs
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.paths import CODE_DIR
# Import salt libs
import salt.config
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.path
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
import salt.exceptions
import salt.utils.process
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt._compat import ElementTree as etree
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
class CheckShellBinaryNameAndVersionMixin(object):
'''
Simple class mix-in to subclass in companion to :class:`ShellTestCase<tests.support.case.ShellTestCase>` which
adds a test case to verify proper version report from Salt's CLI tools.
'''
_call_binary_ = None
_call_binary_expected_version_ = None
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
if self._call_binary_expected_version_ is None:
# Late import
self._call_binary_expected_version_ = salt.version.__version__
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(self._call_binary_expected_version_, out)
class AdaptedConfigurationTestCaseMixin(object):
__slots__ = ()
@staticmethod
def get_temp_config(config_for, **config_overrides):
rootdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
conf_dir = os.path.join(rootdir, 'conf')
for key in ('cachedir', 'pki_dir', 'sock_dir'):
if key not in config_overrides:
config_overrides[key] = key
if 'log_file' not in config_overrides:
config_overrides['log_file'] = 'logs/{}.log'.format(config_for)
if 'user' not in config_overrides:
config_overrides['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
config_overrides['root_dir'] = rootdir
cdict = AdaptedConfigurationTestCaseMixin.get_config(config_for, from_scratch=True)
if config_for in ('master', 'client_config'):
rdict = salt.config.apply_master_config(config_overrides, cdict)
if config_for == 'minion':
rdict = salt.config.apply_minion_config(config_overrides, cdict)
verify_env([os.path.join(rdict['pki_dir'], 'minions'),
os.path.join(rdict['pki_dir'], 'minions_pre'),
os.path.join(rdict['pki_dir'], 'minions_rejected'),
os.path.join(rdict['pki_dir'], 'minions_denied'),
os.path.join(rdict['cachedir'], 'jobs'),
os.path.join(rdict['cachedir'], 'raet'),
os.path.join(rdict['cachedir'], 'tokens'),
os.path.join(rdict['root_dir'], 'cache', 'tokens'),
os.path.join(rdict['pki_dir'], 'accepted'),
os.path.join(rdict['pki_dir'], 'rejected'),
os.path.join(rdict['pki_dir'], 'pending'),
os.path.dirname(rdict['log_file']),
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)
with salt.utils.files.fopen(rdict['conf_file'], 'w') as wfh:
salt.utils.yaml.safe_dump(rdict, wfh, default_flow_style=False)
return rdict
@staticmethod
def get_config(config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('syndic',):
return salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
if config_for not in RUNTIME_VARS.RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('syndic',):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
)
return RUNTIME_VARS.RUNTIME_CONFIGS[config_for]
@staticmethod
def get_config_dir():
return RUNTIME_VARS.TMP_CONF_DIR
@staticmethod
def get_config_file_path(filename):
if filename == 'syndic_master':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
return os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(RUNTIME_VARS.TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the master
'''
return self.get_config('master')
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
'''
Mix-in class that provides a ``client`` attribute which returns a Salt
:class:`LocalClient<salt:salt.client.LocalClient>`.
.. code-block:: python
class LocalClientTestCase(TestCase, SaltClientTestCaseMixin):
def test_check_pub_data(self):
just_minions = {'minions': ['m1', 'm2']}
jid_no_minions = {'jid': '1234', 'minions': []}
valid_pub_data = {'minions': ['m1', 'm2'], 'jid': '1234'}
self.assertRaises(EauthAuthenticationError,
self.client._check_pub_data, None)
self.assertDictEqual({},
self.client._check_pub_data(just_minions),
'Did not handle lack of jid correctly')
self.assertDictEqual(
{},
self.client._check_pub_data({'jid': '0'}),
'Passing JID of zero is not handled gracefully')
'''
_salt_client_config_file_name_ = 'master'
@property
def client(self):
# Late import
import salt.client
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
mopts = self.get_config(self._salt_client_config_file_name_, from_scratch=True)
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(mopts=mopts)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.version import __version_info__, SaltStackVersion
git = salt.utils.path.which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
salt.utils.stringutils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class _FixLoaderModuleMockMixinMroOrder(type):
'''
This metaclass will make sure that LoaderModuleMockMixin will always come as the first
base class in order for LoaderModuleMockMixin.setUp to actually run
'''
def __new__(mcs, cls_name, cls_bases, cls_dict):
if cls_name == 'LoaderModuleMockMixin':
return super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, cls_bases, cls_dict)
bases = list(cls_bases)
for idx, base in enumerate(bases):
if base.__name__ == 'LoaderModuleMockMixin':
bases.insert(0, bases.pop(idx))
break
# Create the class instance
instance = super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, tuple(bases), cls_dict)
# Apply our setUp function decorator
instance.setUp = LoaderModuleMockMixin.__setup_loader_modules_mocks__(instance.setUp)
return instance
class LoaderModuleMockMixin(six.with_metaclass(_FixLoaderModuleMockMixinMroOrder, object)):
'''
This class will setup salt loader dunders.
Please check `set_up_loader_mocks` above
'''
# Define our setUp function decorator
@staticmethod
def __setup_loader_modules_mocks__(setup_func):
@functools.wraps(setup_func)
def wrapper(self):
if NO_MOCK:
self.skipTest(NO_MOCK_REASON)
loader_modules_configs = self.setup_loader_modules()
if not isinstance(loader_modules_configs, dict):
raise RuntimeError(
'{}.setup_loader_modules() must return a dictionary where the keys are the '
'modules that require loader mocking setup and the values, the global module '
'variables for each of the module being mocked. For example \'__salt__\', '
'\'__opts__\', etc.'.format(self.__class__.__name__)
)
salt_dunders = (
'__opts__', '__salt__', '__runner__', '__context__', '__utils__',
'__ext_pillar__', '__thorium__', '__states__', '__serializers__', '__ret__',
'__grains__', '__pillar__', '__sdb__',
# Proxy is commented out on purpose since some code in salt expects a NameError
# and is most of the time not a required dunder
# '__proxy__'
)
for module, module_globals in six.iteritems(loader_modules_configs):
if not isinstance(module, types.ModuleType):
raise RuntimeError(
'The dictionary keys returned by {}.setup_loader_modules() '
'must be an imported module, not {}'.format(
self.__class__.__name__,
type(module)
)
)
if not isinstance(module_globals, dict):
raise RuntimeError(
'The dictionary values returned by {}.setup_loader_modules() '
'must be a dictionary, not {}'.format(
self.__class__.__name__,
type(module_globals)
)
)
module_blacklisted_dunders = module_globals.pop('blacklisted_dunders', ())
minion_funcs = {}
if '__salt__' in module_globals and module_globals['__salt__'] == 'autoload':
if '__opts__' not in module_globals:
raise RuntimeError(
'You must provide \'__opts__\' on the {} module globals dictionary '
'to auto load the minion functions'.format(module.__name__)
)
import salt.loader
ctx = {}
if '__utils__' not in module_globals:
utils = salt.loader.utils(module_globals['__opts__'],
context=module_globals.get('__context__') or ctx)
module_globals['__utils__'] = utils
minion_funcs = salt.loader.minion_mods(
module_globals['__opts__'],
context=module_globals.get('__context__') or ctx,
utils=module_globals.get('__utils__'),
)
module_globals['__salt__'] = minion_funcs
for dunder_name in salt_dunders:
if dunder_name not in module_globals:
if dunder_name in module_blacklisted_dunders:
continue
module_globals[dunder_name] = {}
sys_modules = module_globals.pop('sys.modules', None)
if sys_modules is not None:
if not isinstance(sys_modules, dict):
raise RuntimeError(
'\'sys.modules\' must be a dictionary not: {}'.format(
type(sys_modules)
)
)
patcher = patch.dict(sys.modules, sys_modules)
patcher.start()
def cleanup_sys_modules(patcher, sys_modules):
patcher.stop()
del patcher
del sys_modules
self.addCleanup(cleanup_sys_modules, patcher, sys_modules)
for key in module_globals:
if not hasattr(module, key):
if key in salt_dunders:
setattr(module, key, {})
else:
setattr(module, key, None)
if module_globals:
patcher = patch.multiple(module, **module_globals)
patcher.start()
def cleanup_module_globals(patcher, module_globals):
patcher.stop()
del patcher
del module_globals
self.addCleanup(cleanup_module_globals, patcher, module_globals)
if minion_funcs:
# Since we autoloaded the minion_funcs, let's namespace the functions with the globals
# used to patch above
import salt.utils
for func in minion_funcs:
minion_funcs[func] = salt.utils.functools.namespaced_function(
minion_funcs[func],
module_globals,
preserve_context=True
)
return setup_func(self)
return wrapper
def setup_loader_modules(self):
raise NotImplementedError(
'\'{}.setup_loader_modules()\' must be implemented'.format(self.__class__.__name__)
)
class XMLEqualityMixin(object):
def assertEqualXML(self, e1, e2):
if six.PY3 and isinstance(e1, bytes):
e1 = e1.decode('utf-8')
if six.PY3 and isinstance(e2, bytes):
e2 = e2.decode('utf-8')
if isinstance(e1, six.string_types):
e1 = etree.XML(e1)
if isinstance(e2, six.string_types):
e2 = etree.XML(e2)
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
return all(self.assertEqualXML(c1, c2) for c1, c2 in zip(e1, e2))
class SaltReturnAssertsMixin(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
ret_data = []
for part in six.itervalues(ret):
keys = self.__return_valid_keys(keys)
okeys = keys[:]
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
ret_data.append(ret_item)
return ret_data
def assertSaltTrueReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertTrue(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertFalse(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertIsNone(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertIn(in_comment, saltret)
def assertNotInSaltComment(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertNotIn(not_in_comment, saltret)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertIn(in_comment, saltret)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertNotIn(not_in_comment, saltret)
def assertInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertIn(item_to_check, saltret)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotIn(item_to_check, saltret)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertRegex(saltret, pattern)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertEqual(saltret, comparison)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotEqual(saltret, comparison)
def _fetch_events(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
class SaltMinionEventAssertsMixin(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=_fetch_events, args=(cls.q,)
)
cls.fetch_proc.start()
return object.__new__(cls)
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
| 39.458689
| 136
| 0.572924
|
68060a509a2d59d55dba88034dda13cb1b0bca04
| 6,938
|
py
|
Python
|
reddit-focal-bat.py
|
jphoulihan/reddit-focal-bat
|
06d4148972bd5c178dc26be819b22be1510e935f
|
[
"MIT"
] | null | null | null |
reddit-focal-bat.py
|
jphoulihan/reddit-focal-bat
|
06d4148972bd5c178dc26be819b22be1510e935f
|
[
"MIT"
] | null | null | null |
reddit-focal-bat.py
|
jphoulihan/reddit-focal-bat
|
06d4148972bd5c178dc26be819b22be1510e935f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import praw
import spacy
import random
import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
load_dotenv()
ID = os.getenv('CLIENT_ID')
SECRET = os.getenv('CLIENT_SECRET')
PASSWORD = os.getenv('CLIENT_PASSWORD')
nlp = spacy.load('en_core_web_sm')
def main():
reddit = praw.Reddit(
user_agent= "Comment Extraction (by u/USERNAME)",
client_id= ID,
client_secret= SECRET,
username= "focal-bat",
password= PASSWORD
)
subreddit = reddit.subreddit("ireland").top("day", limit=1) #retrieves the top submission as limit is 1
submission_obj = [reddit.submission(id=f'{sub}') for sub in subreddit] # stores the top thread of the day submission object
if len(submission_obj) == 0:
sys.exit('No Thread found, exiting program')
elif len(submission_obj[0].comments) == 0:
sys.exit('Thread contains 0 comments, exiting program')
parent = reddit.comment(f'{submission_obj[0].comments[0]}')
parent.refresh()
parent.replies.replace_more()
child_comments = parent.replies.list() # all top comment replies converted to list
#wrapping algorithm in a loop which checks if the bot has already replied, if bot username is found in child comments break, else reply
for reply in child_comments:
if reply.author == 'focal-bat':
print('focal-bat reply FOUND in',len(child_comments),'replies, program exited')
break
else:
print('\nfocal-bat reply NOT FOUND in',len(child_comments),'replies, continuing program...\n')
top_comment_author = f'u/{submission_obj[0].comments[0].author}'
top_comment_body = nlp(f'{submission_obj[0].comments[0].body}')
word_pos_dict = {token.lemma_ : f'{token.pos_}' for token in top_comment_body } # populating a dictionary with key (lemmatization) value (part of speech) pairs
pos_list = ['NOUN', 'VERB', 'PROPN', 'ADJ', 'ADV', 'CCONJ']
confirm_comment_has_pos = [True for pos in pos_list if pos in word_pos_dict.values()]
if confirm_comment_has_pos.count(True) < 2: sys.exit('Not enough parts of speech in comment for dictionary search, exiting program')
random.shuffle(pos_list) #increase possibility of different results
word, formatted_translated_word, examples_scrape = dict_search(word_pos_dict, pos_list, check_verb) #makes sure the word gets valid dictionary result with examples, returns the word, html page copy and examples
print(word, 'has been translated to ', formatted_translated_word, '\n')
raw_example_list = [ex.text for ex in examples_scrape]
formatted_example_list = []
for example_str in raw_example_list:
for symbol in example_str:
if ord(symbol) == 187: #phrases begin after right double angle quotes, filter by the ascii value for formatting
formatted_example_list.append(example_str[slice(example_str.index(symbol)+1, len(example_str))].strip()) #substring of raw example added to formatted list
print('Up to 20 example phrases to choose from: \n')
print(formatted_example_list)
print('\n')
random_example_sentence = formatted_example_list[random.randrange(len(formatted_example_list))]
searched_word = f'''"{word}"'''
reply = f'{top_comment_author}, seo duit an focal {searched_word} as gaeilge: {formatted_translated_word}'
example = f'Sampla gaolmhara (Related example): {random_example_sentence}'
search_further = f'https://www.teanglann.ie/en/eid/{word}'
print('focal_bat reply: \n')
print(f'{reply}\n{example}\nLike to learn more? Go on, go on, go on...{search_further}')
lb='\n\n'
#reply to top comment
focal_bat_reply = f'{reply}{lb}{example}{lb}Like to learn more? Go on, go on, go on... {search_further}'
parent.reply(focal_bat_reply)
#checks if word is in part of speech list and ensures word returns an accurate translation and example phrases from online dictionary
def dict_search(word_dict, pos_list, check_verb):
word_search_fail = ['ireland', 'Ireland', 'Irish', 'irish']
for word, pos in word_dict.items():
if pos in pos_list:
if word in word_search_fail:
continue
page_eng_to_irish = requests.get(f'https://www.teanglann.ie/en/eid/{word}') # Getting page_eng_irish HTML through request
soup = BeautifulSoup(page_eng_to_irish.content, 'html5lib')
examples_scrape = soup.find_all('div', attrs={'class': 'ex'}, limit=20) #get example phrases here, if none add word to fail list
if len(word) <= 2 or soup.find('div', attrs={'class': 'nojoy'}) or len(examples_scrape) == 0:
word_search_fail.append(word)
continue
translation_scrape = soup.find('span', attrs={'class': 'eid trg clickable'})
translated_word_list = translation_scrape.text.split()
stopwords = ['m', 'f']
nogender_translated_word_list = [w for w in translated_word_list if w not in stopwords]
translated_word = " ".join(nogender_translated_word_list) #handles word returned with synonyms
if word.lower() == translated_word.lower(): #handle the ocassion that english and irish are same word
word_search_fail.append(word)
continue
break
if word_dict.get(word) == 'VERB':
translated_word = check_verb(word, translated_word)
return word, translated_word, examples_scrape
#caveat in dictionary search result, for verbs the first person present conjunction is often returned and not the infinitive, this function should ensure infinitive is returned
def check_verb(verb, formatted_translated_word):
page_irish_eng = requests.get(f'https://www.teanglann.ie/en/fgb/{verb}')
check_soup = BeautifulSoup(page_irish_eng.content, 'html5lib')
check_translation_scrape = check_soup.find_all('span', attrs={'class': 'head'})
check_translation_list = [trans.text for trans in check_translation_scrape]
irish_eng_list = []
for s in check_translation_list:
for symbol in s:
if ord(symbol) == 187: #irish translated result comes before this symbol, use as end marker for substring
irish_eng_list.append(s[slice(0, s.index(symbol)-1)].strip()) #substring of raw example added to formatted list
for verb_infinitive in irish_eng_list:
if verb_infinitive.lower() in formatted_translated_word.lower(): #eliminates the conjugated suffix
return verb_infinitive
return formatted_translated_word
if __name__ == "__main__":
main()
| 46.563758
| 219
| 0.66979
|
2e87bc1ebb7fa31a2b286543486cfc7bbc9cee8f
| 42,322
|
py
|
Python
|
round01/30_11_dDQN_light_tweak53.py
|
phunc20/rlcomp2020
|
c37f8f05cc86d55fca2648bf5491d6a2218c2cad
|
[
"MIT"
] | null | null | null |
round01/30_11_dDQN_light_tweak53.py
|
phunc20/rlcomp2020
|
c37f8f05cc86d55fca2648bf5491d6a2218c2cad
|
[
"MIT"
] | 1
|
2022-02-10T02:27:10.000Z
|
2022-02-10T02:27:10.000Z
|
round01/30_11_dDQN_light_tweak53.py
|
phunc20/rlcomp2020
|
c37f8f05cc86d55fca2648bf5491d6a2218c2cad
|
[
"MIT"
] | null | null | null |
########################################
# Changes compared to 30_05_CNN_revived_dDQN_light.py
# 01.
# n_epsilon_decay = int(n_episodes // 50)
# as opposed to
# n_epsilon_decay = int(n_episodes*.805)
# Takes around 9_900 to get from epsilon=1 to epsilon=0.01
# 02.
# slower lr: 1e-4
########################################
import sys
import numpy as np
#import pandas as pd
import datetime
import json
from array import *
import os
import math
from random import randrange
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
import tensorflow.keras as keras
#import tensorflow.compat.v1 as tf
#from tensorflow.compat.v1.keras import backend as K
#tf.disable_v2_behavior()
import tensorflow as tf
from tensorflow.keras import backend as K
import constants
import non_RL_agent
import non_RL_agent02
import non_RL_agent03
import non_RL_agent04
import non_RL_agent05
import non_RL_agent06
n_episodes = 500_000
#n_epsilon_decay = int(n_episodes*.6)
#n_epsilon_decay = int(n_episodes*.805)
#n_epsilon_decay = 10**6 / 0.99
n_epsilon_decay = int(n_episodes // 50)
n_episodes_buf_fill = 5_000
batch_size = 32
discount_rate = 0.95
lr_optimizer = 1e-4
#lr_optimizer = 7.3e-4
#loss_fn = keras.losses.mean_squared_error
loss_fn = keras.losses.Huber()
max_replay_len = 50_000
#Classes in GAME_SOCKET_DUMMY.py
class ObstacleInfo:
# initial energy for obstacles: Land (key = 0): -1, Forest(key = -1): 0 (random), Trap(key = -2): -10, Swamp (key = -3): -5
types = {0: -1, -1: 0, -2: -10, -3: -5}
def __init__(self):
self.type = 0
self.posx = 0
self.posy = 0
self.value = 0
class GoldInfo:
def __init__(self):
self.posx = 0
self.posy = 0
self.amount = 0
def loads(self, data):
golds = []
for gd in data:
g = GoldInfo()
g.posx = gd["posx"]
g.posy = gd["posy"]
g.amount = gd["amount"]
golds.append(g)
return golds
class PlayerInfo:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self, id):
self.playerId = id
self.score = 0
self.energy = 0
self.posx = 0
self.posy = 0
self.lastAction = -1
self.status = PlayerInfo.STATUS_PLAYING
self.freeCount = 0
class GameInfo:
def __init__(self):
self.numberOfPlayers = 1
self.width = 0
self.height = 0
self.steps = 100
self.golds = []
self.obstacles = []
def loads(self, data):
m = GameInfo()
m.width = data["width"]
m.height = data["height"]
m.golds = GoldInfo().loads(data["golds"])
m.obstacles = data["obstacles"]
m.numberOfPlayers = data["numberOfPlayers"]
m.steps = data["steps"]
return m
class UserMatch:
def __init__(self):
self.playerId = 1
self.posx = 0
self.posy = 0
self.energy = 50
self.gameinfo = GameInfo()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class StepState:
def __init__(self):
self.players = []
self.golds = []
self.changedObstacles = []
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#Main class in GAME_SOCKET_DUMMY.py
class GameSocket:
bog_energy_chain = {-5: -20, -20: -40, -40: -100, -100: -100}
def __init__(self):
self.stepCount = 0
self.maxStep = 0
self.mapdir = "Maps" # where to load all pre-defined maps
self.mapid = ""
self.userMatch = UserMatch()
self.user = PlayerInfo(1)
self.stepState = StepState()
self.maps = {} # key: map file name, value: file content
self.map = [] # running map info: 0->Land, -1->Forest, -2->Trap, -3:Swamp, >0:Gold
self.energyOnMap = [] # self.energyOnMap[x][y]: <0, amount of energy which player will consume if it move into (x,y)
self.E = 50
self.resetFlag = True
self.craftUsers = [] # players that craft at current step - for calculating amount of gold
self.bots = []
self.craftMap = {} # cells that players craft at current step, key: x_y, value: number of players that craft at (x,y)
def init_bots(self):
self.bots = [Bot1(2), Bot2(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4)
#for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player
for bot in self.bots: # at the beginning, all bots will have same position, energy as player
bot.info.posx = self.user.posx
bot.info.posy = self.user.posy
bot.info.energy = self.user.energy
bot.info.lastAction = -1
bot.info.status = PlayerInfo.STATUS_PLAYING
bot.info.score = 0
self.stepState.players.append(bot.info)
self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players)
#print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers)
def reset(self, requests): # load new game by given request: [map id (filename), posx, posy, initial energy]
# load new map
self.reset_map(requests[0])
self.userMatch.posx = int(requests[1])
self.userMatch.posy = int(requests[2])
self.userMatch.energy = int(requests[3])
self.userMatch.gameinfo.steps = int(requests[4])
self.maxStep = self.userMatch.gameinfo.steps
# init data for players
self.user.posx = self.userMatch.posx # in
self.user.posy = self.userMatch.posy
self.user.energy = self.userMatch.energy
self.user.status = PlayerInfo.STATUS_PLAYING
self.user.score = 0
self.stepState.players = [self.user]
self.E = self.userMatch.energy
self.resetFlag = True
self.init_bots()
self.stepCount = 0
def reset_map(self, id): # load map info
self.mapId = id
self.map = json.loads(self.maps[self.mapId])
self.userMatch = self.map_info(self.map)
self.stepState.golds = self.userMatch.gameinfo.golds
self.map = json.loads(self.maps[self.mapId])
self.energyOnMap = json.loads(self.maps[self.mapId])
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] > 0: # gold
self.energyOnMap[x][y] = -4
else: # obstacles
self.energyOnMap[x][y] = ObstacleInfo.types[self.map[x][y]]
def connect(self): # simulate player's connect request
print("Connected to server.")
for mapid in range(len(Maps)):
filename = "map" + str(mapid)
print("Found: " + filename)
self.maps[filename] = str(Maps[mapid])
def map_info(self, map): # get map info
# print(map)
userMatch = UserMatch()
userMatch.gameinfo.height = len(map)
userMatch.gameinfo.width = len(map[0])
i = 0
while i < len(map):
j = 0
while j < len(map[i]):
if map[i][j] > 0: # gold
g = GoldInfo()
g.posx = j
g.posy = i
g.amount = map[i][j]
userMatch.gameinfo.golds.append(g)
else: # obstacles
o = ObstacleInfo()
o.posx = j
o.posy = i
o.type = -map[i][j]
o.value = ObstacleInfo.types[map[i][j]]
userMatch.gameinfo.obstacles.append(o)
j += 1
i += 1
return userMatch
def receive(self): # send data to player (simulate player's receive request)
if self.resetFlag: # for the first time -> send game info
self.resetFlag = False
data = self.userMatch.to_json()
for (bot) in self.bots:
bot.new_game(data)
# print(data)
return data
else: # send step state
self.stepCount = self.stepCount + 1
if self.stepCount >= self.maxStep:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_END_STEP
data = self.stepState.to_json()
#for (bot) in self.bots: # update bots' state
for bot in self.bots: # update bots' state
bot.new_state(data)
# print(data)
return data
def send(self, message): # receive message from player (simulate send request from player)
if message.isnumeric(): # player send action
self.resetFlag = False
self.stepState.changedObstacles = []
action = int(message)
# print("Action = ", action)
self.user.lastAction = action
self.craftUsers = []
self.step_action(self.user, action)
for bot in self.bots:
if bot.info.status == PlayerInfo.STATUS_PLAYING:
action = bot.next_action()
bot.info.lastAction = action
# print("Bot Action: ", action)
self.step_action(bot.info, action)
self.action_5_craft()
for c in self.stepState.changedObstacles:
self.map[c["posy"]][c["posx"]] = -c["type"]
self.energyOnMap[c["posy"]][c["posx"]] = c["value"]
else: # reset game
requests = message.split(",")
#print("Reset game: ", requests[:3], end='')
self.reset(requests)
def step_action(self, user, action):
switcher = {
0: self.action_0_left,
1: self.action_1_right,
2: self.action_2_up,
3: self.action_3_down,
4: self.action_4_free,
5: self.action_5_craft_pre
}
func = switcher.get(action, self.invalidAction)
func(user)
def action_5_craft_pre(self, user): # collect players who craft at current step
user.freeCount = 0
if self.map[user.posy][user.posx] <= 0: # craft at the non-gold cell
user.energy -= 10
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
else:
user.energy -= 5
if user.energy > 0:
self.craftUsers.append(user)
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
count = self.craftMap[key]
self.craftMap[key] = count + 1
else:
self.craftMap[key] = 1
else:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def action_0_left(self, user): # user go left
user.freeCount = 0
user.posx = user.posx - 1
if user.posx < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_1_right(self, user): # user go right
user.freeCount = 0
user.posx = user.posx + 1
if user.posx >= self.userMatch.gameinfo.width:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_2_up(self, user): # user go up
user.freeCount = 0
user.posy = user.posy - 1
if user.posy < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_3_down(self, user): # user go right
user.freeCount = 0
user.posy = user.posy + 1
if user.posy >= self.userMatch.gameinfo.height:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_4_free(self, user): # user free
user.freeCount += 1
if user.freeCount == 1:
user.energy += int(self.E / 4)
elif user.freeCount == 2:
user.energy += int(self.E / 3)
elif user.freeCount == 3:
user.energy += int(self.E / 2)
else:
user.energy = self.E
if user.energy > self.E:
user.energy = self.E
def action_5_craft(self):
craftCount = len(self.craftUsers)
# print ("craftCount",craftCount)
if (craftCount > 0):
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
c = self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
user.score += m
# print ("user", user.playerId, m)
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
c = self.craftMap[key]
del self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
self.map[y][x] -= m * c
if self.map[y][x] < 0:
self.map[y][x] = 0
self.energyOnMap[y][x] = ObstacleInfo.types[0]
for g in self.stepState.golds:
if g.posx == x and g.posy == y:
g.amount = self.map[y][x]
if g.amount == 0:
self.stepState.golds.remove(g)
self.add_changed_obstacle(x, y, 0, ObstacleInfo.types[0])
if len(self.stepState.golds) == 0:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_EMPTY_GOLD
break;
self.craftMap = {}
def invalidAction(self, user):
user.status = PlayerInfo.STATUS_ELIMINATED_INVALID_ACTION
user.lastAction = 6 #eliminated
def go_to_pos(self, user): # player move to cell(x,y)
if self.map[user.posy][user.posx] == -1:
user.energy -= randrange(16) + 5
elif self.map[user.posy][user.posx] == 0:
user.energy += self.energyOnMap[user.posy][user.posx]
elif self.map[user.posy][user.posx] == -2:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 0, ObstacleInfo.types[0])
elif self.map[user.posy][user.posx] == -3:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 3,
self.bog_energy_chain[self.energyOnMap[user.posy][user.posx]])
else:
user.energy -= 4
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def add_changed_obstacle(self, x, y, t, v):
added = False
for o in self.stepState.changedObstacles:
if o["posx"] == x and o["posy"] == y:
added = True
break
if added == False:
o = {}
o["posx"] = x
o["posy"] = y
o["type"] = t
o["value"] = v
self.stepState.changedObstacles.append(o)
def close(self):
print("Close socket.")
class Bot1:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(greedy_policy(s))
return int(non_RL_agent.greedy_policy(s))
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
class Bot2:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(non_RL_agent03.greedy_policy(s))
return int(non_RL_agent.greedy_policy(s, how_gold=non_RL_agent.find_worthiest_gold))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = np.random.randint(0, 4)
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
class Bot3:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
return int(non_RL_agent02.greedy_policy(s))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = self.ACTION_GO_LEFT
# if self.info.posx % 2 == 0:
# if self.info.posy < self.state.mapInfo.max_y:
# action = self.ACTION_GO_DOWN
# else:
# if self.info.posy > 0:
# action = self.ACTION_GO_UP
# else:
# action = self.ACTION_GO_RIGHT
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
#MinerState.py
def str_2_json(str):
return json.loads(str, encoding="utf-8")
class MapInfo:
def __init__(self):
self.max_x = 0 #Width of the map
self.max_y = 0 #Height of the map
self.golds = [] #List of the golds in the map
self.obstacles = []
self.numberOfPlayers = 0
self.maxStep = 0 #The maximum number of step is set for this map
def init_map(self, gameInfo):
#Initialize the map at the begining of each episode
self.max_x = gameInfo["width"] - 1
self.max_y = gameInfo["height"] - 1
self.golds = gameInfo["golds"]
self.obstacles = gameInfo["obstacles"]
self.maxStep = gameInfo["steps"]
self.numberOfPlayers = gameInfo["numberOfPlayers"]
def update(self, golds, changedObstacles):
#Update the map after every step
self.golds = golds
for cob in changedObstacles:
newOb = True
for ob in self.obstacles:
if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]:
newOb = False
#print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ",
# cob["type"], " / value: ", ob["value"], " -> ", cob["value"])
ob["type"] = cob["type"]
ob["value"] = cob["value"]
break
if newOb:
self.obstacles.append(cob)
#print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ",
# cob["value"])
def get_min_x(self):
return min([cell["posx"] for cell in self.golds])
def get_max_x(self):
return max([cell["posx"] for cell in self.golds])
def get_min_y(self):
return min([cell["posy"] for cell in self.golds])
def get_max_y(self):
return max([cell["posy"] for cell in self.golds])
def is_row_has_gold(self, y):
return y in [cell["posy"] for cell in self.golds]
def is_column_has_gold(self, x):
return x in [cell["posx"] for cell in self.golds]
def gold_amount(self, x, y): #Get the amount of golds at cell (x,y)
for cell in self.golds:
if x == cell["posx"] and y == cell["posy"]:
return cell["amount"]
return 0
def get_obstacle(self, x, y): # Get the kind of the obstacle at cell(x,y)
for cell in self.obstacles:
if x == cell["posx"] and y == cell["posy"]:
return cell["type"]
return -1 # No obstacle at the cell (x,y)
class State:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self):
self.end = False
self.score = 0
self.lastAction = None
self.id = 0
self.x = 0
self.y = 0
self.energy = 0
self.energy_pre = 0
self.mapInfo = MapInfo()
self.players = []
self.stepCount = 0
self.status = State.STATUS_PLAYING
def init_state(self, data): #parse data from server into object
game_info = str_2_json(data)
self.end = False
self.score = 0
self.lastAction = None
self.id = game_info["playerId"]
self.x = game_info["posx"]
self.y = game_info["posy"]
self.energy = game_info["energy"]
self.mapInfo.init_map(game_info["gameinfo"])
self.stepCount = 0
self.status = State.STATUS_PLAYING
self.players = [{"playerId": 2, "posx": self.x, "posy": self.y},
{"playerId": 3, "posx": self.x, "posy": self.y},
{"playerId": 4, "posx": self.x, "posy": self.y}]
def update_state(self, data):
new_state = str_2_json(data)
for player in new_state["players"]:
if player["playerId"] == self.id:
self.x = player["posx"]
self.y = player["posy"]
self.energy_pre = self.energy
self.energy = player["energy"]
self.score = player["score"]
self.lastAction = player["lastAction"]
self.status = player["status"]
self.mapInfo.update(new_state["golds"], new_state["changedObstacles"])
self.players = new_state["players"]
for i in range(len(self.players), 4, 1):
self.players.append({"playerId": i, "posx": self.x, "posy": self.y})
self.stepCount = self.stepCount + 1
#MinerEnv.py
TreeID = 1
TrapID = 2
SwampID = 3
class MinerEnv:
def __init__(self):
self.socket = GameSocket()
self.state = State()
self.score_pre = self.state.score#Storing the last score for designing the reward function
def start(self): #connect to server
self.socket.connect()
def end(self): #disconnect server
self.socket.close()
def send_map_info(self, request):#tell server which map to run
self.socket.send(request)
def reset(self): #start new game
try:
message = self.socket.receive() #receive game info from server
self.state.init_state(message) #init state
except Exception as e:
import traceback
traceback.print_exc()
def step(self, action): #step process
self.socket.send(action) #send action to server
try:
message = self.socket.receive() #receive new state from server
self.state.update_state(message) #update to local state
except Exception as e:
import traceback
traceback.print_exc()
def get_state(self):
"""
Fuse `view` and `energyOnMap` into a single matrix to have a simple and concise state/observation.
We want a matrix showing the following:
`gold`: The amount of gold
`all the others`: The energy that each type of terrain is going to take if being stepped into, e.g.
`land` => -1, `trap` => -10, etc.
"""
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
energyOnMap = np.array(self.socket.energyOnMap)
# `view` will contribute only to the type of terrain of `gold`
view[view <= 0] = -9999 # Just a dummy large negative number to be got rid of later
# `energyOnMap` will contribute to the types of terrain of `land`, `trap`, `forest` and `swamp`.
# Recall. `forest` was designated by BTC to the value of 0, to mean random integer btw [5..20].
energyOnMap[energyOnMap == 0] = - constants.forest_energy
channel0 = np.maximum(view, energyOnMap)
# Finish channel 0
# Channel 1 will contain the position of the agent
channel1 = np.zeros_like(channel0)
x_agent_out_of_map = self.state.x < 0 or self.state.x >= constants.width
y_agent_out_of_map = self.state.y < 0 or self.state.y >= constants.height
if x_agent_out_of_map or y_agent_out_of_map:
pass
else:
channel1[self.state.y, self.state.x] = self.state.energy
state = np.stack((channel0, channel1), axis=-1)
return state
def get_reward(self):
# Initialize reward
reward = 0
score_action = self.state.score - self.score_pre
self.score_pre = self.state.score
if score_action > 0:
#reward += score_action*(100 - self.state.stepCount)
reward += score_action
#if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree
# reward -= TreeID
#if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap
# reward -= TrapID
#if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp
# reward -= SwampID
# if self.state.lastAction == 4:
# reward -= 40
#if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
if self.state.status == constants.agent_state_str2id["out_of_MAP"]:
#if self.state.stepCount < 50:
# reward += -5*(50 - self.state.stepCount)
reward -= 2000
else:
try:
s = self.get_state()
#print(f"self.state.x, self.state.y = {self.state.x}, {self.state.y} ")
terrain_now = s[self.state.y, self.state.x, 0]
if terrain_now < 0 and self.state.lastAction != constants.rest:
# This substract the same amount of reward as energy when the agent steps into terrain_now, except for gold
reward += terrain_now
except Exception:
pass
#if self.state.status == State.STATUS_STOP_END_STEP:
if self.state.status == constants.agent_state_str2id["no_more_STEP"]:
#reward += (self.state.score/total_gold) * 100
pass
#if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY:
if self.state.status == constants.agent_state_str2id["no_more_ENERGY"]:
if self.state.lastAction != constants.rest:
reward -= 500
#if self.state.status == State.STATUS_PLAYING:
if self.state.status == constants.agent_state_str2id["PLAYing"]:
reward += 1
# We punish surplus `rest`
if self.state.energy_pre == constants.max_energy and self.state.lastAction == constants.rest:
reward -= 50
return reward
def check_terminate(self):
#Checking the status of the game
#it indicates the game ends or is playing
return self.state.status != State.STATUS_PLAYING
Maps = [constants.maps[i] for i in range(1, 6)]
env = MinerEnv() # Creating a communication environment between the DQN model and the game environment
env.start() # Connect to the game
#eliminated = []
#def pictorial_state(obs):
# pictorial = np.zeros((constants.height, constants.width, 1+4), dtype=np.float32)
# # 1+4 is +1 for map and +1 for each of the players = 5 channels
# # dtype=np.float32 because pictorial will later be carried into tensorflow CNN
# pictorial[..., 0] = obs[:constants.n_px].reshape((constants.height, constants.width))
# # position of agent: we put the energy value at the coordinate where stands the agent, the whole in channel 1, the channel for the agent.
# x_agent, y_agent = obs[constants.n_px], obs[constants.n_px+1]
# if x_agent >= constants.width or y_agent >= constants.height:
# pass
# else:
# pictorial[y_agent, x_agent, 1] = obs[constants.n_px+2]
# # position of bots: we put -1 on the coord of the bots
# for i in range(1, 3+1):
# if i in eliminated:
# continue
# y = obs[constants.n_px+(2*i+2)]
# x = obs[constants.n_px+(2*i+1)]
# if x >= constants.width or y >= constants.height:
# eliminated.append(i)
# continue
# pictorial[y, x, i+1] = -1
# return pictorial
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
tf.random.set_seed(42)
np.random.seed(42)
#input_shape = [constants.height, constants.width, 1+4]
input_shape = [constants.height, constants.width, 1+1]
n_outputs = 6
model = keras.models.Sequential([
Conv2D(2, 3, activation="relu", padding="same", input_shape=input_shape),
#MaxPooling2D(2),
Conv2D(4, 3, activation="relu", padding="same"),
#Conv2D(128, 3, activation="relu", padding="same"),
#MaxPooling2D(2),
Flatten(),
#Dense(128, activation="elu"),
Dense(64, activation="elu"),
Dense(32, activation="elu"),
Dense(16, activation="elu"),
Dense(n_outputs)
])
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
from collections import deque
replay_memory = deque(maxlen=max_replay_len)
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_memory), size=batch_size)
batch = [replay_memory[index] for index in indices]
states, actions, rewards, next_states, dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states, actions, rewards, next_states, dones
def epsilon_greedy_policy(state, epsilon=0, n_actions=6):
if np.random.rand() < epsilon:
return np.random.randint(n_actions)
else:
#pictorial = pictorial_state(state)
#Q_values = model.predict(pictorial[np.newaxis])
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
def play_one_step(env, state, epsilon):
action = epsilon_greedy_policy(state, epsilon)
#next_state, reward, done, info = env.step(action)
env.step(str(action))
next_state = env.get_state()
reward = env.get_reward()
done = env.check_terminate()
replay_memory.append((state, action, reward, next_state, done))
return next_state, reward, done
#optimizer = keras.optimizers.Adam(lr=1e-3)
#optimizer = keras.optimizers.Adam(lr=2.5e-4)
optimizer = keras.optimizers.Adam(lr=lr_optimizer)
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
#pictorials = np.array([pictorial_state(s) for s in states])
#next_pictorials = np.array([pictorial_state(next_s) for next_s in next_states])
#next_Q_values = model.predict(next_pictorials)
next_Q_values = model.predict(next_states)
#max_next_Q_values = np.max(next_Q_values, axis=1)
best_next_actions = np.argmax(next_Q_values, axis=1)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
#target_Q_values = rewards + (1 - dones) * discount_rate * max_next_Q_values
target_Q_values = rewards + (1 - dones) * discount_rate * next_best_Q_values
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
#all_Q_values = model(pictorials)
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
np.random.seed(42)
tf.random.set_seed(42)
from constants import n_allowed_steps
now = datetime.datetime.now()
now_str = now.strftime("%Y%m%d-%H%M")
script_name = __file__.split('.')[0]
save_path = os.path.join("models", script_name)
os.makedirs(save_path, exist_ok=True)
scores = []
scores_avg = []
best_score = 0
k = 10
scores_k_most_recent = deque([0]*k, maxlen=k)
best_score_avg = 1400
with open(os.path.join(save_path, f"log-{now_str}.txt"), 'w') as log:
for episode in range(n_episodes):
eliminated = []
mapID = np.random.randint(0, 5)
posID_x = np.random.randint(constants.width)
posID_y = np.random.randint(constants.height)
request = "map{},{},{},50,100".format(mapID, posID_x, posID_y)
env.send_map_info(request)
env.reset()
obs = env.get_state()
undiscounted_return = 0
for step in range(n_allowed_steps):
epsilon = max(1 - episode / n_epsilon_decay, 0.01)
obs, reward, done = play_one_step(env, obs, epsilon)
undiscounted_return += reward
if done:
break
score = env.state.score
scores.append(score)
scores_k_most_recent.append(score)
#score_avg = np.mean(scores_k_most_recent) / k
score_avg = round(np.mean(scores_k_most_recent), 1)
scores_avg.append(score_avg)
#if score > best_score:
if score_avg > best_score_avg:
#best_weights = model.get_weights()
best_score_avg = score_avg
#best_score = score
#model.save(os.path.join(save_path, f"episode-{episode+1}-gold-{env.state.score}-avg-{score_avg:4.2f}-step-{step+1}-{now_str}.h5"))
model.save(os.path.join(save_path, f"avg-{score_avg:07.2f}-episode-{episode+1}-{__file__.split('.')[0]}-gold-{env.state.score}-step-{step+1}-{now_str}.h5"))
message = "(Episode {: 5d}/{}) Gold {: 4d} avg {: 8.1f} undisc_return {: 6d} step {: 3d} eps: {:.2f} ({})\n".format(episode+1, n_episodes, env.state.score, score_avg, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
print(message, end='')
log.write(message)
#if episode > 500:
if episode > n_episodes_buf_fill:
training_step(batch_size)
if episode % n_episodes_buf_fill == 0:
target.set_weights(model.get_weights())
#np.save(f"scores-{now_str}", np.array(scores))
#np.save(f"scores-N-scores_avg-{now_str}", np.array([scores, scores_avg]))
np.save(f"scores-N-scores_avg-{__file__.split('.')[0]}-{now_str}", np.array([scores, scores_avg]))
| 37.157155
| 271
| 0.579297
|
c987e2b17fcae547a07d7501a1414cbbf4afa891
| 1,421
|
py
|
Python
|
PiCN/Layers/NFNLayer/R2C/test/test_TimeoutR2CClient.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/NFNLayer/R2C/test/test_TimeoutR2CClient.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | 5
|
2020-07-15T09:01:42.000Z
|
2020-09-28T08:45:21.000Z
|
PiCN/Layers/NFNLayer/R2C/test/test_TimeoutR2CClient.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test the TimeoutR2CClient"""
import unittest
from PiCN.Packets import Name, Content, Interest
from PiCN.Layers.NFNLayer.R2C import TimeoutR2CHandler
from PiCN.Layers.NFNLayer.Parser import DefaultNFNParser
from PiCN.Layers.NFNLayer.NFNComputationTable import NFNComputationList
class test_TimeoutR2CClient(unittest.TestCase):
def setUp(self):
self.r2cClient = TimeoutR2CHandler()
def test_create_r2c_message(self):
"""test the creation of r2c names"""
name = Name("/test/NFN")
new_name = self.r2cClient.R2C_create_message(name)
compare_name = Name("/test/R2C/KEEPALIVE/NFN")
self.assertEqual(compare_name, new_name)
def test_get_original_r2c_message(self):
"""test the creation of r2c names"""
name = Name("/test/R2C/KEEPALIVE/NFN")
compare_name = Name("/test/NFN")
new_name = self.r2cClient.R2C_get_original_message(name)
self.assertEqual(compare_name, new_name)
def test_handle_r2c_request(self):
"""test the handling of r2c messages"""
name = Name("/test/NFN")
comp_list = NFNComputationList(self.r2cClient, DefaultNFNParser())
comp_list.add_computation(name, 1, Interest(name))
r2c_request = self.r2cClient.R2C_create_message(name)
c = self.r2cClient.R2C_handle_request(r2c_request, comp_list)
self.assertEqual(c, Content(r2c_request, "Running"))
| 38.405405
| 74
| 0.711471
|
0fe923b5a5a38799973cf50ee3dfc0f076ec349a
| 2,070
|
py
|
Python
|
profiles_api/models.py
|
iboumiza/profiles-rest-api
|
dd1c161ee4e1a2dcd941f766c0755041040e23ed
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
iboumiza/profiles-rest-api
|
dd1c161ee4e1a2dcd941f766c0755041040e23ed
|
[
"MIT"
] | 5
|
2021-03-19T00:37:39.000Z
|
2021-09-22T18:42:36.000Z
|
profiles_api/models.py
|
BlueSkyTrading/profiles-rest-api
|
dd1c161ee4e1a2dcd941f766c0755041040e23ed
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Databaase model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Retrieve string representation of our user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 28.75
| 64
| 0.680193
|
a29aa9eec9067c882e58068f0640613f655750e5
| 4,566
|
py
|
Python
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/send_veri_code_for_change_pwd_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/send_veri_code_for_change_pwd_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/send_veri_code_for_change_pwd_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class SendVeriCodeForChangePwdRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_request_id': 'str',
'accept_language': 'str',
'body': 'VerifyCodeSendDTOV1'
}
attribute_map = {
'x_request_id': 'X-Request-ID',
'accept_language': 'Accept-Language',
'body': 'body'
}
def __init__(self, x_request_id=None, accept_language=None, body=None):
"""SendVeriCodeForChangePwdRequest - a model defined in huaweicloud sdk"""
self._x_request_id = None
self._accept_language = None
self._body = None
self.discriminator = None
if x_request_id is not None:
self.x_request_id = x_request_id
if accept_language is not None:
self.accept_language = accept_language
if body is not None:
self.body = body
@property
def x_request_id(self):
"""Gets the x_request_id of this SendVeriCodeForChangePwdRequest.
请求requestId,用来标识一路请求,用于问题跟踪定位,建议使用uuId,若不携带,则后台自动生成
:return: The x_request_id of this SendVeriCodeForChangePwdRequest.
:rtype: str
"""
return self._x_request_id
@x_request_id.setter
def x_request_id(self, x_request_id):
"""Sets the x_request_id of this SendVeriCodeForChangePwdRequest.
请求requestId,用来标识一路请求,用于问题跟踪定位,建议使用uuId,若不携带,则后台自动生成
:param x_request_id: The x_request_id of this SendVeriCodeForChangePwdRequest.
:type: str
"""
self._x_request_id = x_request_id
@property
def accept_language(self):
"""Gets the accept_language of this SendVeriCodeForChangePwdRequest.
语言参数,默认为中文zh_CN, 英文为en_US
:return: The accept_language of this SendVeriCodeForChangePwdRequest.
:rtype: str
"""
return self._accept_language
@accept_language.setter
def accept_language(self, accept_language):
"""Sets the accept_language of this SendVeriCodeForChangePwdRequest.
语言参数,默认为中文zh_CN, 英文为en_US
:param accept_language: The accept_language of this SendVeriCodeForChangePwdRequest.
:type: str
"""
self._accept_language = accept_language
@property
def body(self):
"""Gets the body of this SendVeriCodeForChangePwdRequest.
:return: The body of this SendVeriCodeForChangePwdRequest.
:rtype: VerifyCodeSendDTOV1
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SendVeriCodeForChangePwdRequest.
:param body: The body of this SendVeriCodeForChangePwdRequest.
:type: VerifyCodeSendDTOV1
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SendVeriCodeForChangePwdRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.01227
| 92
| 0.595926
|
6a72e7890eabef6ccafe3e01a7183871ddf78d47
| 24,840
|
py
|
Python
|
pytypes/__init__.py
|
admdev8/pytypes
|
95d58a30a8ddb665500d8e88b13a94e0e0f76373
|
[
"Apache-2.0"
] | 189
|
2016-09-17T13:45:58.000Z
|
2022-03-12T10:53:42.000Z
|
pytypes/__init__.py
|
admdev8/pytypes
|
95d58a30a8ddb665500d8e88b13a94e0e0f76373
|
[
"Apache-2.0"
] | 104
|
2017-02-23T16:43:18.000Z
|
2022-03-17T17:36:18.000Z
|
pytypes/__init__.py
|
admdev8/pytypes
|
95d58a30a8ddb665500d8e88b13a94e0e0f76373
|
[
"Apache-2.0"
] | 21
|
2017-02-17T08:05:12.000Z
|
2021-12-08T11:22:15.000Z
|
# Copyright 2017 Stefan Richthofer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created on 12.12.2016
"""
pytypes main package.
This file provides some behavioral flags and options you can modify to
control various aspects of pytypes.
Attributes
----------
version : str
Version of this pytypes distribution as a string.
checking_enabled : bool
Flag to enable or disable runtime typechecking.
Default: True, unless -o is set.
Note that you cannot change this flag later on. You must specify
this right after first import of pytypes, because typecheck decorators
are applied on function definition time and install wrapper functions.
typelogging_enabled : bool
Flag to enable or disable typelogging.
Default: True
Note that you cannot change this flag later on. You must specify
this right after first import of pytypes, because typelogging decorators
are applied on function definition time and install wrapper functions.
do_logging_in_typechecked : bool
Let the typechecked-decorator also perform typelogging.
Default: False
In contrast to checking_enabled and typelogging_enabled, this can be
switched on and off at any time.
global_typechecked_decorator : bool
Flag indicating global typechecking mode via decorators.
Default: False
Every function or method with type annotation is typechecked now.
Will affect all functions and methods imported after this flag
was set. Use enable_global_typechecked_decorator for a retrospective option.
Does not work if checking_enabled is false.
Does not work reliably if checking_enabled has ever been set to
false during current run.
global_auto_override_decorator : bool
Flag indicating global auto_override mode via decorators.
Default: False
Every method with type annotation that also has a parent method
with type annotation is now checked for type consistency with its
parent.
Will affect all functions and methods imported after this flag
was set. Use enable_global_auto_override_decorator for a retrospective option.
global_annotations_decorator : bool
Flag indicating global annotation mode via decorators.
Default: False
Methods with typestring will have type hints parsed from that
string and get them attached as __annotations__ attribute.
Methods with either a typestring or ordinary type annotations in
a stubfile will get that information attached as __annotations__
attribute. Behavior in case of collision with previously (manually)
attached __annotations__ can be controlled using the flags
annotations_override_typestring and annotations_from_typestring.
Will affect all methods imported after this flag
was set. Use enable_global_annotations_decorator for a retrospective option.
global_typelogged_decorator : bool
Flag indicating global typelog mode via decorators.
Default: False
Every function and method call is recorded. The observed type
information can be written into stubfiles by calling dump_cache.
Will affect all methods imported after this flag
was set. Use enable_global_typelogged_decorator for a retrospective option.
global_typechecked_profiler : bool
Flag indicating global typechecking mode via profiler.
Default: False
Read-only flag. Use enable_global_typechecked_profiler to change it.
global_typelogged_profiler : bool
Flag indicating global typelog mode via profiler.
Default: False
Read-only flag. Use enable_global_typelogged_profiler to change it.
warning_mode : bool
Flag indicating that typecheck errors shall be raised as warnings.
Default: False
warn_argnames : bool
Flag indicating that warnings for non-idiomatic names of first
argument of methods and classmethods shall be raised.
Idiomatic names would be 'self' and 'cls' respectively.
Default: True
check_override_at_runtime : bool
Flag indicating override consistency is checked at runtime.
Default: False
check_override_at_class_definition_time : bool
Flag indicating override consistency is checked at class definition time.
Default: True
always_check_parent_types : bool
Lets typechecked decorator also apply check like done in auto_override.
Default: True
If true, typechecked decorator always checks type consistency with
type-annotated parent methods if any exist.
check_callables : bool
Turns callables into typechecked wrappers.
Default: True
If true, callables that pass a typecheck decorator are passed on wrapped
into another typechecker that checks calls according to type info from a
Callable type object. Will only be applied if such type information exists.
check_iterables : bool
Turns iterables into typechecked wrappers.
Default: True
If true, iterables that pass a typecheck decorator are passed on wrapped
into another typechecker that checks elements returned by iterators according
to type info from an Iterable type object.
Will only be applied if such type information exists.
check_generators : bool
Turns generators into typechecked wrappers.
Default: True
If true, generators that pass a typecheck decorator are passed on wrapped
into another typechecker that checks elements returned by yield, etc. according
to type info from an Generator type object.
Will only be applied if such type information exists.
check_unbound_types : bool
If true, treat missing parameters as unknown.
Default: True
Tells pytypes to actually attempt typechecking of unbound types, e.g
things like is_subtype(List[Any], list).
If false such checks are prohibited.
If true, missing parameters are treated as unknown, which in turn is
treated according to strict_unknown_check flag.
strict_unknown_check : bool
Controls the meaning of unknown parameters.
Default: False
If false, treat unknown parameters somewhat like Any.
If true (i.e. strict mode), treat unknown parameters
somewhat like 'nothing', because no assumptions can be made.
apply_numeric_tower : bool
Lets pytypes treat int as subtype of float as subtype of complex
Default: True
If true, numeric tower like described in
https://www.python.org/dev/peps/pep-0484/#the-numeric-tower
is applied to runtime typechecking and typelogging.
covariant_Mapping : bool
For runtime checking, treat Mapping-types as covariant.
Default: True
For runtime checking it is usually okay to treat Mapping-types as covariant,
given that a Mapping here wouldn't accept every value of proper type anyway.
(Unlike a mathematical mapping that accepts all values from a certain set.)
Note that we cannot treat the key type as contravariant as one might expect,
because in Python Mappings are Iterables over the key type.
infer_default_value_types : bool
Lets pytypes take type information from default values into account.
Default: True
If true, lets pytypes apply deep_type on default values of functions and
methods. Will only be applied to parameters without type annotation.
The default values are obtained via inspect.getargspec (Python 2.7) or
inspect.getfullargspec (Python 3.x).
annotations_override_typestring : bool
A manually inserted __annotations__ will override a typestring.
Default: False
annotations_from_typestring : bool
Lets typechecked decorator work like annotations decorator.
Default: False
If true, typechecked decorator will automatically attach parsed
typestrings as __annotations__ to the according function or method.
Won't be applied if annotations_override_typestring is true.
strict_annotation_collision_check : bool
Prohibits to have __annotations__ and typestring at the same time.
Default: False
According to
https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code
__annotations__ and typestring must not be present for the same
function or method at the same time. By default pytypes does not
enforce this rule, but instead asserts equivalence of such concurring
type hints.
If this flag is true, pytypes will prohibit multiple type hints.
tp_comment_parser_import_typing : bool
Lets type comment parser implicitly import typing on parsing.
Default: True
With this flag enabled it is not necessary for modules with type comments
to import the typing module. For usual production mode with typechecking
disabled, the typing module would be an unnecessary and undesired import.
default_typecheck_depth : int
Specifies maximal recursion depth of deep_type.
Default: 10
Default maximal recursion depth for inferring a structured type of
a given object.
deep_type_samplesize : int
The number of elements pytypes considers when it determines the element
type of a list, set or dict.
Default: -1
When it builds a List, Set or Dict type from a given list, set or dict,
pytypes considers all elements within by default to determine the element
type. For larger data amounts one might want to base this procedure on a
smaller, somewhat randomly drawn set of elements.
-1 lets pytypes always evaluate the whole list, set or dict, while other
positive values let it only check a somewhat random sample of that size.
canonical_type_str : bool
Forces type_util.type_str to sort parameters of Unions.
Default: True
While the order of type parameters of a Union is arbitrary, it might be
desirable to obtain a canonical type string that properly reflects equality
of the same Union with different parameter order. This is achieved by sorting
the string representations of the parameters.
Set this flag to False, if a representation of the internal type structure is
desired.
Note that this flag not only affects string representations of Unions, but of
every type that contains a Union directly or indirectly as a parameter.
dump_typelog_at_exit : bool
Lets typelogger dump typelog at exit.
Default: True
dump_typelog_at_exit_python2 : bool
Lets typelogger dump Python 2 style typelog at exit.
If used in combination with dump_typelog_at_exit, two logs are dumped -
one in Python 2 style, one in Python 3 style.
Default: False
clean_traceback : bool
If true, hides pytypes' internal part of exception traceback output.
Default: True
Use this variable only for reading. Use enable_clean_traceback function to
modify it. Disable clean_traceback, if you want to trace a bug in pytypes.
import_hook_enabled : bool
Required for some edgy situations with stubfiles and forward declarations.
Default: True
This lets pytypes hook into import.
In case this is not desired, use this flag to disable it.
Setting this flag only has effect right after first import of pytypes.
Note that with this flag disabled, global decorator mode won't work.
python3_5_executable : str
Python command used to parse Python 3.5 style stubfiles.
Default: 'python3'
Must be >= 3.5.0.
pytypes comes with the stubfile converter stub_2_convert that creates
Python 2.7 compliant stubfiles. The converter itself requires Python 3.5
to run. On Python 2.7 pytype can use this command to convert Python 3.5
stubfiles to Python 2.7, so they can be used in current execution then.
stub_path : List[str]
Search-path for stubfiles.
Default: []
Additionally to this list of paths, pytypes will look for stubfiles on
the pythonpath.
stub_gen_dir : Optional[str]
Directory to collect generated stubs.
Default: None
When pytypes uses stub_2_convert, the output files will land in this folder.
If None, tempfile.gettempdir() is used.
default_indent : str
Indentation used by typelogger when generating stubfiles.
Default: '\t'
default_typelogger_path : str
Directory where typelogger places generated stubs.
Default: 'typelogger_output'
"""
_version = '>=1.0b5' # Only used as fallback for jython-standalone.jar
# Needs to be imported before touching the Python import machinery
try:
import pkg_resources
except ImportError:
pass
import typing
typing_3_7 = False
try:
from typing import ForwardRef
typing_3_7 = True
except: pass
from .typechecker import _install_import_hook
checking_enabled = False # Will be enabled by default, unless -o is set
# Note that you cannot change this flag later on. You must specify
# this right after first import of pytypes.
typelogging_enabled = True
# Note that you cannot change this flag later on. You must specify
# this right after first import of pytypes.
do_logging_in_typechecked = False # Let the typechecked-decorator also perform logging
typelogger_include_typehint = True # Let typelogger also include info from existing typehint
global_typechecked_decorator = False
global_auto_override_decorator = False
global_annotations_decorator = False
global_typelogged_decorator = False
global_typechecked_profiler = False
global_typelogged_profiler = False
_global_type_agent = None
# Some behavior flags:
warning_mode = False
warn_argnames = True
check_override_at_runtime = False
check_override_at_class_definition_time = True
always_check_parent_types = True
check_callables = True
check_iterables = True
check_generators = True
check_unbound_types = True # if true, treat missing parameters as unknown
strict_unknown_check = False # if false, treat unknown parameters somewhat like Any
apply_numeric_tower = True # i.e. int is subtype of float is subtype of complex
# For runtime checking it is usually okay to treat Mapping-types as covariant,
# given that a Mapping here wouldn't accept every value of proper type anyway.
# (Unlike a mathematical mapping that accepts all values from a certain set.)
# Note that we cannot treat the key type as contravariant as one might expect,
# because in Python Mappings are Iterables over the Key-type.
covariant_Mapping = True
infer_default_value_types = True
annotations_override_typestring = False
annotations_from_typestring = False
strict_annotation_collision_check = False
tp_comment_parser_import_typing = True
default_typecheck_depth = 10
# -1 lets pytypes always evaluate the whole list, set or dict
deep_type_samplesize = -1
canonical_type_str = True
dump_typelog_at_exit = True
dump_typelog_at_exit_python2 = False
clean_traceback = True
import_hook_enabled = True
python3_5_executable = 'python3' # Must be >= 3.5.0
def enable_checking(flag = True):
"""Convenience function to set the checking_enabled flag. Intended
for use in an assert statement, so the call depends on -o flag.
"""
global checking_enabled
checking_enabled = flag
return checking_enabled
def enable_global_typechecked_decorator(flag = True, retrospective = True):
"""Enables or disables global typechecking mode via decorators.
See flag global_typechecked_decorator.
In contrast to setting the flag directly, this function provides
a retrospective option. If retrospective is true, this will also
affect already imported modules, not only future imports.
Does not work if checking_enabled is false.
Does not work reliably if checking_enabled has ever been set to
false during current run.
"""
global global_typechecked_decorator
global_typechecked_decorator = flag
if import_hook_enabled:
_install_import_hook()
if global_typechecked_decorator and retrospective:
_catch_up_global_typechecked_decorator()
return global_typechecked_decorator
def enable_global_auto_override_decorator(flag = True, retrospective = True):
"""Enables or disables global auto_override mode via decorators.
See flag global_auto_override_decorator.
In contrast to setting the flag directly, this function provides
a retrospective option. If retrospective is true, this will also
affect already imported modules, not only future imports.
"""
global global_auto_override_decorator
global_auto_override_decorator = flag
if import_hook_enabled:
_install_import_hook()
if global_auto_override_decorator and retrospective:
_catch_up_global_auto_override_decorator()
return global_auto_override_decorator
def enable_global_annotations_decorator(flag = True, retrospective = True):
"""Enables or disables global annotation mode via decorators.
See flag global_annotations_decorator.
In contrast to setting the flag directly, this function provides
a retrospective option. If retrospective is true, this will also
affect already imported modules, not only future imports.
"""
global global_annotations_decorator
global_annotations_decorator = flag
if import_hook_enabled:
_install_import_hook()
if global_annotations_decorator and retrospective:
_catch_up_global_annotations_decorator()
return global_annotations_decorator
def enable_global_typelogged_decorator(flag = True, retrospective = True):
"""Enables or disables global typelog mode via decorators.
See flag global_typelogged_decorator.
In contrast to setting the flag directly, this function provides
a retrospective option. If retrospective is true, this will also
affect already imported modules, not only future imports.
"""
global global_typelogged_decorator
global_typelogged_decorator = flag
if import_hook_enabled:
_install_import_hook()
if global_typelogged_decorator and retrospective:
_catch_up_global_typelogged_decorator()
return global_typelogged_decorator
def enable_global_typechecked_profiler(flag = True):
"""Enables or disables global typechecking mode via a profiler.
See flag global_typechecked_profiler.
Does not work if checking_enabled is false.
"""
global global_typechecked_profiler, _global_type_agent, global_typelogged_profiler
global_typechecked_profiler = flag
if flag and checking_enabled:
if _global_type_agent is None:
_global_type_agent = TypeAgent()
_global_type_agent.start()
elif not _global_type_agent.active:
_global_type_agent.start()
elif not flag and not global_typelogged_profiler and \
not _global_type_agent is None and _global_type_agent.active:
_global_type_agent.stop()
def enable_global_typelogged_profiler(flag = True):
"""Enables or disables global typelogging mode via a profiler.
See flag global_typelogged_profiler.
Does not work if typelogging_enabled is false.
"""
global global_typelogged_profiler, _global_type_agent, global_typechecked_profiler
global_typelogged_profiler = flag
if flag and typelogging_enabled:
if _global_type_agent is None:
_global_type_agent = TypeAgent()
_global_type_agent.start()
elif not _global_type_agent.active:
_global_type_agent.start()
elif not flag and not global_typechecked_profiler and \
not _global_type_agent is None and _global_type_agent.active:
_global_type_agent.stop()
def enable_clean_traceback(flag = True):
"""Activates traceback cleaning. This means that traceback of uncaught
TypeErrors does not include pytypes' internal calls for typechecking etc,
but instead focuses on the location of an ill-typed call itself.
"""
global clean_traceback
clean_traceback = flag
if clean_traceback:
_install_excepthook()
# This way we glue typechecking to activeness of the assert statement by default,
# no matter what conditions it depends on (or will depend on, e.g. currently -O flag).
assert(enable_checking())
def _detect_issue351():
"""Detect if github.com/python/typing/issues/351 applies
to the installed typing-version.
"""
class Tuple(typing.Generic[typing.T]):
pass
res = Tuple[str] == typing.Tuple[str]
del Tuple
return res
if _detect_issue351():
# monkeypatch the issue away...
_GenericMeta__new__ = typing.GenericMeta.__new__
def _GenericMeta__new__351(cls, *args, **kwds):
origin = None
if len(args) >= 6:
# origin is at index 5 in original signature:
# name, bases, namespace, tvars=None, args=None, origin=None, extra=None, orig_bases=None
origin = args[5]
elif 'origin' in kwds:
origin = kwds['origin']
res = _GenericMeta__new__(cls, *args, **kwds)
# we correct the hash according to the fix in https://github.com/python/typing/pull/371
res.__tree_hash__ = (hash(res._subs_tree()) if origin else
super(typing.GenericMeta, res).__hash__())
return res
typing.GenericMeta.__new__ = staticmethod(_GenericMeta__new__351)
# Search-path for stubfiles.
stub_path = []
# Directory to collect generated stubs. If None, tempfile.gettempdir() is used.
stub_gen_dir = None
# Used if get_indentation doesn't yield a result.
default_indent = '\t'
default_typelogger_path = 'typelogger_output'
# typelogger uses this to indent typestrings in output files.
# Uses get_indentation if None.
# typelogger_indent = None # currently uses default_indent always
# Monkeypatch Generic to circumvent type erasure:
# (Only applies to legacy versions of typing.
# Existence of '_generic_new' is suitable to detect whether this
# monkeypatch is required, i.e. in typing-3.5.2.2.)
if not hasattr(typing, '_generic_new') and not typing_3_7:
# This former approach has issues if self.__orig_class__ is needed in __init__:
# _Generic__new__ = typing.Generic.__new__
# def __Generic__new__(cls, *args, **kwds):
# res = _Generic__new__(cls, *args, **kwds)
# res.__orig_class__ = cls
# return res
def __Generic__new__(cls, *args, **kwds):
# this is based on Generic.__new__ from typing-3.5.2.2
if cls.__origin__ is None:
obj = cls.__next_in_mro__.__new__(cls)
obj.__orig_class__ = cls
else:
origin = typing._gorg(cls)
obj = cls.__next_in_mro__.__new__(origin)
obj.__orig_class__ = cls
obj.__init__(*args, **kwds)
return obj
typing.Generic.__new__ = __Generic__new__
# We import some public API for central access:
from .exceptions import TypeCheckError, InputTypeError, ReturnTypeError, TypeWarning, \
InputTypeWarning, ReturnTypeWarning, OverrideError, TypeSyntaxError, ForwardRefError
from .type_util import deep_type, is_builtin_type, has_type_hints, resolve_fw_decl, \
type_str, get_types, get_type_hints, is_iterable, get_iterable_itemtype, get_generator_type, \
get_generator_yield_type, is_Union, get_Union_params, get_Tuple_params, is_Tuple_ellipsis, \
get_Callable_args_res, get_Generic_itemtype, get_Mapping_key_value, get_Generic_parameters,\
get_arg_for_TypeVar, _issubclass as is_subtype, _isinstance as is_of_type, annotations, \
get_member_types, Empty, _catch_up_global_annotations_decorator, TypeAgent, restore_profiler, \
is_Tuple, is_Generic, is_Callable, _extra_dict as abc2typing_dict, _bases as type_bases, \
get_Generic_type, get_orig_class
from .util import getargspecs, get_staticmethod_qualname, get_class_qualname, mro, \
get_class_that_defined_method, is_method, is_classmethod, _pytypes_excepthook, \
_install_excepthook
from .stubfile_manager import get_stub_module, as_stub_func_if_any
from .typechecker import typechecked, typechecked_module, no_type_check, \
is_no_type_check, override, check_argument_types, auto_override, \
_catch_up_global_auto_override_decorator, _catch_up_global_typechecked_decorator, \
TypeChecker, _checkfunctype, _checkfuncresult
from .typelogger import dump_cache, log_type, typelogged, typelogged_module, \
_catch_up_global_typelogged_decorator, _register_logged_func, TypeLogger
enable_clean_traceback()
# Some exemplary overrides for this modules's global settings:
# Set custom Python3-executable like this:
#pytypes.python3_5_executable = '/data/workspace/linux/Python-3.5.2/python'
# Set custom directory to store generated stubfiles like this:
# Unlike in tmp directory mode, these are kept over distinct runs.
#stub_gen_dir = '../py2_stubs'
| 40.456026
| 101
| 0.765781
|
98bcb3c655f64f0cb1417ea6a35e58c36c8a7ae3
| 89
|
py
|
Python
|
src/example1.py
|
rummens1337/neural-network-tensorflow
|
95ec089d9c0671b7cc171f07a50c92e377a37a3c
|
[
"MIT"
] | null | null | null |
src/example1.py
|
rummens1337/neural-network-tensorflow
|
95ec089d9c0671b7cc171f07a50c92e377a37a3c
|
[
"MIT"
] | null | null | null |
src/example1.py
|
rummens1337/neural-network-tensorflow
|
95ec089d9c0671b7cc171f07a50c92e377a37a3c
|
[
"MIT"
] | 1
|
2019-12-29T11:30:38.000Z
|
2019-12-29T11:30:38.000Z
|
from generic_neural_network import GenericNeuralNetwork
network = GenericNeuralNetwork()
| 29.666667
| 55
| 0.88764
|
555fd24d98fea83f9f96be08313bd76af9502687
| 1,289
|
py
|
Python
|
setup.py
|
sCrypt-Inc/py-scryptlib
|
58aa2d8dca36b42ea032825f1bfc01e2d9a65424
|
[
"MIT"
] | 7
|
2021-11-14T20:10:29.000Z
|
2022-02-26T10:05:07.000Z
|
setup.py
|
sCrypt-Inc/py-scryptlib
|
58aa2d8dca36b42ea032825f1bfc01e2d9a65424
|
[
"MIT"
] | 1
|
2021-08-12T16:50:42.000Z
|
2021-09-08T20:13:26.000Z
|
setup.py
|
sCrypt-Inc/scryptlib-python
|
4df358e89231bf9c9698240d17e06f04b61218d3
|
[
"MIT"
] | 1
|
2021-10-16T23:46:23.000Z
|
2021-10-16T23:46:23.000Z
|
#!/usr/bin/env python
import codecs
from setuptools import setup
from os import path
def read(rel_path):
here = path.abspath(path.dirname(__file__))
with codecs.open(path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else '\''
return line.split(delim)[1]
else:
raise RuntimeError('Unable to find version string.')
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(long_description=long_description,
long_description_content_type="text/markdown",
name='scryptlib',
version=get_version('scryptlib/__init__.py'),
description='Python SDK for integration of sCrypt Bitcoin SV smart contracts.',
keywords='scrypt scryptlib bitcoin bsv blockchain',
author='Kala',
url='https://www.github.com/sCrypt-Inc/py-scryptlib',
packages=['scryptlib'],
install_requires=['bitcoinX==0.6.0'],
python_requires='>=3.7',
# Dependencies to run all tests.
extras_require = {
'testing': ['pytest', 'rabin', 'ecdsa']
}
)
| 29.295455
| 85
| 0.650892
|
a7ae2c5f7b33358803d4687636faada2fe4c0add
| 5,663
|
py
|
Python
|
model/nmt_commands.py
|
davidcliebman68/awesome-chatbot
|
64d3e201d519946c7c66a64b5143527d18506c3d
|
[
"MIT"
] | 1
|
2020-02-10T19:20:15.000Z
|
2020-02-10T19:20:15.000Z
|
model/nmt_commands.py
|
davidcliebman68/awesome-chatbot
|
64d3e201d519946c7c66a64b5143527d18506c3d
|
[
"MIT"
] | null | null | null |
model/nmt_commands.py
|
davidcliebman68/awesome-chatbot
|
64d3e201d519946c7c66a64b5143527d18506c3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os
from subprocess import Popen
import re
class Commands:
def __init__(self):
self.erase_history = False
self.use_async = False
self.print_to_screen = False
self.url_search = 'https://www.google.com/search?q='
self.url_youtube = 'https://www.youtube.com/results?search_query='
self.launch_google_chrome = 'google-chrome --app='
self.launch_firefox = 'firefox --search '
self.launch_rhythmbox = 'rhythmbox '
self.launch_mail = 'thunderbird'
self.launch_office = 'libreoffice'
self.launch_file = 'nautilus'
self.launch_terminal = 'gnome-terminal'
self.text_commands = {
'play': 'any',
'media': 'any',
'google': 'search',
'search': 'search',
'song': 'music',
'video': 'video',
'movie': 'video',
'music': 'music',
'youtube': 'video',
'mail': 'mail',
'letter':'mail',
'letters':'mail',
'email': 'mail',
'emails':'mail',
'thunderbird':'mail',
'office': 'office',
'libreoffice': 'office',
'file':'file',
'files': 'file',
'directory': 'file',
'directories': 'file',
'terminal': 'terminal',
'firefox': 'firefox'
}
self.command_dict = {
'search': self.launch_google_chrome + self.url_search,
'video': self.launch_google_chrome + self.url_youtube,
'music': self.launch_rhythmbox,
'mail': self.launch_mail,
'office': self.launch_office,
'file': self.launch_file,
'terminal': self.launch_terminal,
'firefox': self.launch_firefox
}
self.command_string = ''
self.p = None
def re(self,i):
return re.sub('[.?!:;,]','', i)
def is_command(self,i):
i = self.re(i)
output = False
for x in i.split():
for xx in self.text_commands:
if x.strip().lower() == xx.strip().lower():
output = True
return output
def strip_command(self,i):
i = self.re(i)
i = i.split()
ii = i[:]
for x in i:
for xx in self.text_commands:
if x.strip().lower() == xx.strip().lower():
ii.remove(x)
return ii
def decide_commmand(self,i):
i = self.re(i)
chosen = {}
any = False
for xx in self.text_commands.values():
if self.print_to_screen: print(xx,'xx')
chosen[xx] = 0
output = False
i = i.split()
ii = i[:]
if self.print_to_screen:
print(self.text_commands)
print(chosen)
for x in i:
for xx in self.text_commands:
if x.strip().lower() == xx.strip().lower() : #and x.strip().lower() in self.text_commands:
output = True
if self.text_commands[xx] in chosen:
chosen[self.text_commands[xx]] += 1
ii.remove(x)
if self.print_to_screen: print(chosen[self.text_commands[xx]], xx, x)
i = ii
#if self.print_to_screen: print(chosen)
if self.command_string == '':
high = 0
old_high = 0
for x in chosen:
high = chosen[x]
if high > old_high and x != 'any':
self.command_string = self.command_dict[x]
old_high = high
elif high > old_high and x == 'any':
any = True
if self.print_to_screen: print(chosen)
if self.command_string == '' and any is True:
self.command_string = self.command_dict['search']
if (
self.command_string == self.command_dict['video'] or
self.command_string == self.command_dict['search'] or
self.command_string == self.command_dict['firefox']
):
self.command_string += '+'.join(i)
return output
def do_command(self, i):
erase = False
self.command_string = ''
if isinstance(i,list): i = ' '.join(i)
i = self.re(i)
#if len(self.command_string) == 0:
self.decide_commmand(i)
if self.print_to_screen: print(self.command_string)
if not self.use_async:
self.launch_sync(self.command_string)
else:
self.launch_async(self.command_string)
if self.erase_history:
erase = True
return erase
def launch_sync(self,i):
## if the program doesn't exist, this command will fail but chatbot will continue.
os.system(i)
pass
def launch_async(self, i):
i = i.split()
self.p = Popen(i)
pass
if __name__ == '__main__':
c = Commands()
command1 = 'play media'
command2 = 'play music like video music like a movie of the music band youtube.'
c.print_to_screen = True
z = c.is_command(command1)
for x in range(2):
if len(c.strip_command(command1)) > 0:
#command = c.strip_command(command)
print(command1, x, 'here1')
c.do_command(command1)
exit()
elif x is 1:
#command = c.strip_command(command)
print(command2, x, 'here2')
c.do_command(command2)
print('use previous command also.')
pass
| 30.945355
| 106
| 0.511037
|
91cfedd83b2774b347adf27052b2e7bae93d6031
| 8,652
|
py
|
Python
|
source/util_data.py
|
kibernetika-ai/ciagan
|
6a4c1f09eb209b507f9ed6eee9366a63093d70b9
|
[
"MIT"
] | null | null | null |
source/util_data.py
|
kibernetika-ai/ciagan
|
6a4c1f09eb209b507f9ed6eee9366a63093d70b9
|
[
"MIT"
] | null | null | null |
source/util_data.py
|
kibernetika-ai/ciagan
|
6a4c1f09eb209b507f9ed6eee9366a63093d70b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 14:54:12 2018
@author: maximov
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
import os
from os import listdir, mkdir
from os.path import isfile, join, isdir, exists
import numpy as np
import importlib
import pickle
import random
import math
from PIL import Image
from collections import defaultdict
import cv2
import numbers
class ImageDataset(torch.utils.data.Dataset):
"""Focal place dataset."""
def __init__(self, root_dir, label_num=1200, transform_fnc=transforms.Compose([transforms.ToTensor()]),
img_size=128, flag_init=True, flag_sample=2, flag_augment=True):
self.root_dir = root_dir
with open(os.path.join(root_dir, 'landmarks.pkl'), 'rb') as f:
self.landmarks = pickle.load(f)
self.transform_fnc = transform_fnc
if isinstance(img_size, tuple):
self.img_shape = img_size
else:
self.img_shape = (img_size, img_size)
self.flag_sample = flag_sample
self.root_img = root_dir
self.im_label, self.im_paths, self.im_index = [], [], []
self.flag_augment = flag_augment
it_j = 0
for person_id in os.listdir(root_dir):
if not os.path.isdir(os.path.join(root_dir, person_id)):
continue
imglist_all = [f for f in listdir(os.path.join(root_dir, person_id)) if f[-4:] in [".jpg", ".png"]]
# imglist_all_int = [int(x[:-4]) for x in imglist_all]
# imglist_all_int.sort()
# imglist_all = [(str(x).zfill(6) + ".jpg") for x in imglist_all_int]
# imglist_all = sorted(imglist_all)
self.im_label += [int(person_id)] * len(imglist_all)
self.im_paths += imglist_all
self.im_index += [it_j] * len(imglist_all)
it_j += 1
print("Dataset initialized")
def __len__(self):
return len(self.im_label)
def get_landmark_img(self, img, landmark):
canvas = np.ones_like(img) * 255
landmark = landmark[:, :2]
chin = landmark[0:17]
left_brow = landmark[17:22]
right_brow = landmark[22:27]
left_eye = landmark[36:42]
right_eye = landmark[42:48]
nose1 = landmark[27:31]
nose2 = landmark[31:36]
mouth = landmark[48:60]
mouth_internal = landmark[60:68]
lines = [
chin,
mouth_internal,
nose1
]
color = (0, 0, 50)
for line in lines:
cv2.polylines(
canvas,
np.int32([line]), False,
color, thickness=1, lineType=cv2.LINE_AA
)
return canvas
def get_mask_img(self, img, landmark):
landmark = landmark[:, :2]
canvas = np.ones_like(img) * 255
chin = landmark[0:17]
cv2.fillPoly(
canvas, np.int32([chin]), (0, 0, 0),
)
return canvas
def denorm(self, landmarks, img):
landmarks = landmarks.copy()
landmarks[:, 0] = landmarks[:, 0] * img.shape[1]
landmarks[:, 1] = landmarks[:, 1] * img.shape[0]
return landmarks
def load_img(self, im_path, im_array=None):
if im_array is not None:
im = Image.fromarray(im_array)
else:
im = Image.open(im_path)
w, h = im.size
if self.flag_augment:
offset_h = 0.1
center_h = h / 2 + offset_h * h
center_w = w / 2
min_sz, max_sz = w / 2, (w - center_w) * 1.5
diff_sz, crop_sz = (max_sz - min_sz) / 2, min_sz / 2
img_res = im.crop(
(int(center_w - crop_sz - diff_sz * self.crop_rnd[0]), int(center_h - crop_sz - diff_sz * self.crop_rnd[1]),
int(center_w + crop_sz + diff_sz * self.crop_rnd[2]), int(center_h + crop_sz + diff_sz * self.crop_rnd[3])))
else:
offset_h = 0.1
center_h = h / 2 + offset_h * h
center_w = w / 2
min_sz, max_sz = w / 2, (w - center_w) * 1.5
diff_sz, crop_sz = (max_sz - min_sz) / 2, min_sz / 2
img_res = im.crop(
(int(center_w - crop_sz - diff_sz),
int(center_h - crop_sz - diff_sz),
int(center_w + crop_sz + diff_sz),
int(center_h + crop_sz + diff_sz)))
img_res = img_res.resize(self.img_shape, resample=Image.LANCZOS)
return self.transform_fnc(img_res)
def __getitem__(self, idx):
im_clr, im_lndm, im_msk, im_ind = [], [], [], []
if self.flag_sample == 1:
idx = [idx]
for k_iter in range(self.flag_sample):
self.crop_rnd = [random.random(), random.random(), random.random(), random.random()]
im_clr_path = os.path.join(self.root_dir, str(self.im_label[idx[k_iter]]), self.im_paths[idx[k_iter]])
img = cv2.cvtColor(cv2.imread(im_clr_path), cv2.COLOR_RGB2BGR)
clr_img = self.load_img(im_clr_path, im_array=img)
im_clr.append(clr_img)
key_path = os.path.join(im_clr_path.split('/')[-2], os.path.basename(im_clr_path))
landmark = self.landmarks[key_path]
landmark = self.denorm(landmark, img)
lndm_img = self.load_img(None, im_array=self.get_landmark_img(img, landmark))
im_lndm.append(lndm_img)
msk = ((1 - self.load_img(None, im_array=self.get_mask_img(img, landmark))) > 0.2)
im_msk.append(msk)
im_ind.append(self.im_index[idx[k_iter]])
return im_clr, im_lndm, im_msk, im_ind
def load_data(DATA_PATH, WORKERS_NUM, BATCH_SIZE, IMG_SIZE, FLAG_DATA_AUGM, LABEL_NUM, mode_train=True):
##### Data loaders
data_dir = DATA_PATH
if mode_train:
dataset_train = ImageDataset(root_dir=data_dir, label_num=LABEL_NUM, transform_fnc=transforms.Compose([transforms.ToTensor()]),
img_size=IMG_SIZE, flag_augment=FLAG_DATA_AUGM)
total_steps = int(len(dataset_train) / BATCH_SIZE)
ddict = defaultdict(list)
for idx, label in enumerate(dataset_train.im_label):
ddict[label].append(idx)
list_of_indices_for_each_class = []
for key in ddict:
list_of_indices_for_each_class.append(ddict[key])
loader_train = torch.utils.data.DataLoader(dataset=dataset_train, num_workers=WORKERS_NUM, batch_size=BATCH_SIZE, shuffle=False, sampler=SiameseSampler(list_of_indices_for_each_class, BATCH_SIZE, total_steps))
print("Total number of steps per epoch:", total_steps)
print("Total number of training samples:", len(dataset_train))
return loader_train, total_steps, LABEL_NUM
else:
label_num = 363
dataset_test = ImageDataset(root_dir=data_dir, label_num=label_num,transform_fnc=transforms.Compose([transforms.ToTensor()]), img_size = IMG_SIZE)
loader_test = torch.utils.data.DataLoader(dataset=dataset_test, num_workers=1, batch_size=1, shuffle=False)
print("Total number of test samples:", len(dataset_test))
return loader_test, len(dataset_test), label_num
class SiameseSampler(Sampler):
def __init__(self, l_inds, batch_size, iterations_per_epoch):
self.l_inds = l_inds
self.max = -1
self.batch_size = batch_size
self.flat_list = []
self.iterations_per_epoch = iterations_per_epoch
def __iter__(self):
self.flat_list = []
for ii in range(int(self.iterations_per_epoch)):
# get half of the images randomly
sep = int(self.batch_size / 2)
for i in range(sep):
first_class = random.choice(self.l_inds)
second_class = random.choice(self.l_inds)
first_element = random.choice(first_class)
second_element = random.choice(second_class)
self.flat_list.append([first_element, second_element])
# get the last half as images from the same class
for i in range(sep, self.batch_size):
c_class = random.choice(self.l_inds)
first_element = random.choice(c_class)
second_element = random.choice(c_class)
self.flat_list.append([first_element, second_element])
random.shuffle(self.flat_list)
return iter(self.flat_list)
def __len__(self):
return len(self.flat_list)
| 36.661017
| 217
| 0.606449
|
c426a5009047b5fc07c2bf74565247185923f1b3
| 1,595
|
py
|
Python
|
md2d.py
|
jfaubertin/KerasCuriosity
|
72b6b0763c3a83ecba8c054620bf5062b225fe40
|
[
"Apache-2.0"
] | 2
|
2019-05-15T05:33:34.000Z
|
2020-10-12T19:36:41.000Z
|
md2d.py
|
jfaubertin/KerasCuriosity
|
72b6b0763c3a83ecba8c054620bf5062b225fe40
|
[
"Apache-2.0"
] | 1
|
2020-11-30T21:17:21.000Z
|
2020-12-03T17:59:29.000Z
|
md2d.py
|
jfaubertin/KerasCuriosity
|
72b6b0763c3a83ecba8c054620bf5062b225fe40
|
[
"Apache-2.0"
] | 1
|
2020-11-25T13:28:52.000Z
|
2020-11-25T13:28:52.000Z
|
import gym
from gym import spaces
# Convert MultiDiscrete to Discrete so Keras-RL Agents will work
class MD2D_ActionWrapper(gym.ActionWrapper):
def __init__(self, env, buttons):
super(MD2D_ActionWrapper, self).__init__(env)
self.action_space = spaces.Discrete(len(buttons))
self.buttons = buttons
def action(self, action):
return self.buttons.get(action)
def reverse_action(self, action):
for k in self.buttons.keys():
if(self.buttons[k] == action):
return self.buttons[k]
return 0
"""
Buttons must be passed to MD2D_ActionWrapper as a dictionary!
EXAMPLE:
buttons = {
0: [0, 0, 0, 0, 0, 0], # Do Nothing
1: [1, 0, 0, 0, 0, 0], # Up
2: [0, 1, 0, 0, 0, 0], # Left
3: [0, 0, 1, 0, 0, 0], # Down
4: [0, 0, 0, 1, 0, 0], # Right
5: [0, 0, 0, 0, 1, 0], # A
6: [0, 0, 0, 0, 0, 1], # B
7: [0, 0, 0, 0, 1, 1], # A + B
8: [1, 0, 0, 1, 0, 0], # Up + Right
9: [1, 1, 0, 0, 0, 0], # Up + Left
10: [0, 0, 1, 1, 0, 0], # Down + Right
11: [0, 1, 1, 0, 0, 0], # Down + Left
12: [1, 0, 0, 0, 1, 0], # Up + A
13: [0, 1, 0, 0, 1, 0], # Left + A
14: [0, 0, 1, 0, 1, 0], # Down + A
15: [0, 0, 0, 1, 1, 0], # Right + A
16: [1, 0, 0, 0, 0, 1], # Up + B
17: [0, 1, 0, 0, 0, 1], # Left + B
18: [0, 0, 1, 0, 0, 1], # Down + B
19: [0, 0, 0, 1, 0, 1], # Right + B
20: [1, 0, 0, 0, 1, 1], # Up + A+B
21: [0, 1, 0, 0, 1, 1], # Left + A+B
22: [0, 0, 1, 0, 1, 1], # Down + A+B
23: [0, 0, 0, 1, 1, 1], # Right + A+B
}
"""
| 30.673077
| 64
| 0.467712
|
839820f1f2b89f18b9cfb80770542aae7d5a4694
| 7,626
|
py
|
Python
|
parser.py
|
mbyra/awww
|
b8107f72e3b2ca49e30c81caeb962538d2867b8a
|
[
"MIT"
] | null | null | null |
parser.py
|
mbyra/awww
|
b8107f72e3b2ca49e30c81caeb962538d2867b8a
|
[
"MIT"
] | null | null | null |
parser.py
|
mbyra/awww
|
b8107f72e3b2ca49e30c81caeb962538d2867b8a
|
[
"MIT"
] | null | null | null |
import sqlite3
import jinja2
conn = sqlite3.connect('dane/db.sqlite3')
c = conn.cursor()
# funckcja generujaca pliki powiatów dla danego okręgu
def generuj_powiaty(wojewodztwo, okreg):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "/home/marcin/PycharmProjects/aplikacjewww1/powiaty/powiat.html"
template = templateEnv.get_template(TEMPLATE_FILE)
# print("jestem w funkcji generuj okregi dla wojewodztwa", okreg)
lista_powiatow= []
for row in c.execute("SELECT subareas FROM main_commune WHERE area='" + str(okreg).upper() + "'"):
lista_powiatow.append(row)
for powiat in lista_powiatow:
# print(" jestem w powiecie nr", powiat[0])
# wyniki kazdego kandydata dla danego powiatu:
wyniki = [] # wyniki poszczególnych kandydatów w danym okregu, alfabetycznie
procenty = [] # procent głosów oddanych na kandydata w danym okregu, alfabetycznie
for row in c.execute("select "
"sum(grabowski), "
"sum(ikonowicz), "
"sum(kalinowski),"
"sum(korwin),"
"sum(krzaklewski),"
"sum(kwasniewski),"
"sum(lepper),"
"sum(lopuszanski),"
"sum(olechowski),"
"sum(pawlowski),"
"sum(walesa),"
"sum(wilecki)"
" from main_commune where subareas = " + str(powiat[0]) + " group by subareas"):
suma = sum(row)
for result in row:
wyniki.append(result)
procenty.append(str(round(result/suma, 4) * 100)[:4]) # magia aby wyświetlało się w stylu 23.45% w html
# stworz liste gmin nalezacych do danego powiatu
gminy = []
for row in c.execute("SELECT DISTINCT name FROM main_commune WHERE subareas='" + str(powiat[0]) + "' AND area='" + str(okreg).upper() + "' AND county='" + str(wojewodztwo).upper() + "'"):
gminy.append(row[0])
print("wygenerowalem liste gmin dla powiatu",powiat, ":", gminy)
templateVars = {"wojewodztwo": wojewodztwo, "okreg": okreg, "powiat" : powiat[0], "wyniki" : wyniki, "procenty" : procenty, "gminy" : gminy }
with open("powiaty/powiat" + str(powiat[0]) + ".html", "w") as fh:
outputText = template.render( templateVars )
fh.write(outputText)
# funkcja generujaca pliki okregow dla danego wojewodztwa
def generuj_okregi(wojewodztwo):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "/home/marcin/PycharmProjects/aplikacjewww1/okregi/okreg.html"
template = templateEnv.get_template(TEMPLATE_FILE)
print("jestem w funkcji generuj okregi dla wojewodztwa", wojewodztwo)
lista_okregow = []
for row in c.execute("SELECT DISTINCT area FROM main_commune WHERE county='" + str(wojewodztwo).upper() + "'"):
lista_okregow.append(row)
for okreg in lista_okregow:
print(" jestem w okregu nr", okreg[0])
# wyniki kazdego kandydata dla danego okregu:
wyniki = [] # wyniki poszczególnych kandydatów w danym okregu, alfabetycznie
procenty = [] # procent głosów oddanych na kandydata w danym okregu, alfabetycznie
for row in c.execute("select "
"sum(grabowski), "
"sum(ikonowicz), "
"sum(kalinowski),"
"sum(korwin),"
"sum(krzaklewski),"
"sum(kwasniewski),"
"sum(lepper),"
"sum(lopuszanski),"
"sum(olechowski),"
"sum(pawlowski),"
"sum(walesa),"
"sum(wilecki)"
" from main_commune where county='" + str(wojewodztwo).upper() + "'and area = " + str(okreg[0]) + " group by area"):
suma = sum(row)
for result in row:
wyniki.append(result)
procenty.append(str(round(result/suma, 4) * 100)[:4]) # magia aby wyświetlało się w stylu 23.45% w html
# stworz liste powiatów nalezacych do danego okregu
powiaty = []
for row in c.execute("SELECT DISTINCT subareas FROM main_commune WHERE county='" + str(wojewodztwo).upper() + "' and area='" + str(okreg[0]) + "'"):
powiaty.append(row[0])
print("wygenerowalem liste powiatów dla okregu",okreg[0], ":", powiaty)
templateVars = {"wojewodztwo": wojewodztwo, "okreg": okreg[0], "wyniki" : wyniki, "procenty" : procenty, "powiaty" : powiaty }
with open("okregi/okreg" + str(okreg[0]) + ".html", "w") as fh:
outputText = template.render( templateVars )
fh.write(outputText)
generuj_powiaty(wojewodztwo, okreg[0])
def generuj_wojewodztwa():
wojewodztwa = []
for row in c.execute("SELECT DISTINCT county FROM main_commune"):
wojewodztwa.append(str(row[0]).lower())
print(wojewodztwa)
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "/home/marcin/PycharmProjects/aplikacjewww1/wojewodztwa/wojewodztwo.html"
template = templateEnv.get_template(TEMPLATE_FILE)
# generujemy pliki z templejta wojewodztwo.html dla poszczegolnych województw
for wojewodztwo in wojewodztwa:
# wyniki kazdego kandydata dla danego wojewodztwa:
wyniki = [] # wyniki poszczególnych kandydatów w danym województwie, alfabetycznie
procenty = [] # procent głosów oddanych na kandydata w danym województwie, alfabetycznie
for row in c.execute("select "
"sum(grabowski), "
"sum(ikonowicz), "
"sum(kalinowski),"
"sum(korwin),"
"sum(krzaklewski),"
"sum(kwasniewski),"
"sum(lepper),"
"sum(lopuszanski),"
"sum(olechowski),"
"sum(pawlowski),"
"sum(walesa),"
"sum(wilecki)"
" from main_commune where county='" + str(wojewodztwo).upper() + "' group by county"):
suma = sum(row)
for result in row:
wyniki.append(result)
procenty.append(str(round(result/suma, 4) * 100)[:4]) # magia aby wyświetlało się w stylu 23.45% w html
# stworz liste okregow nalezacych do danego wojewodztwa
okregi = []
for row in c.execute("SELECT DISTINCT area FROM main_commune WHERE county='" + str(wojewodztwo).upper() + "'"):
okregi.append(row[0])
templateVars = {"wojewodztwo": wojewodztwo, "wyniki": wyniki, "procenty": procenty, "okregi": okregi}
with open("wojewodztwa/" + str(wojewodztwo) + ".html", "w") as fh:
outputText = template.render( templateVars )
fh.write(outputText)
generuj_okregi(wojewodztwo)
generuj_wojewodztwa()
| 46.218182
| 199
| 0.556255
|
1c9c8f989aa852dc22f8a0d7f2df8814e88cfadd
| 14,947
|
py
|
Python
|
tests/test_web_middleware.py
|
sivakov512/aiohttp
|
9bce9c2d462666ad83111c0966358c2e274bc89b
|
[
"Apache-2.0"
] | 1
|
2021-03-26T11:06:21.000Z
|
2021-03-26T11:06:21.000Z
|
tests/test_web_middleware.py
|
sivakov512/aiohttp
|
9bce9c2d462666ad83111c0966358c2e274bc89b
|
[
"Apache-2.0"
] | 224
|
2020-10-25T12:21:48.000Z
|
2022-03-31T08:05:18.000Z
|
tests/test_web_middleware.py
|
sivakov512/aiohttp
|
9bce9c2d462666ad83111c0966358c2e274bc89b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from yarl import URL
from aiohttp import web
async def test_middleware_modifies_response(loop, aiohttp_client) -> None:
async def handler(request):
return web.Response(body=b"OK")
async def middleware(request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[MIDDLEWARE]"
return resp
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[MIDDLEWARE]" == txt
async def test_middleware_handles_exception(loop, aiohttp_client) -> None:
async def handler(request):
raise RuntimeError("Error text")
async def middleware(request, handler):
with pytest.raises(RuntimeError) as ctx:
await handler(request)
return web.Response(status=501, text=str(ctx.value) + "[MIDDLEWARE]")
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 501 == resp.status
txt = await resp.text()
assert "Error text[MIDDLEWARE]" == txt
async def test_middleware_chain(loop, aiohttp_client) -> None:
async def handler(request):
return web.Response(text="OK")
handler.annotation = "annotation_value"
async def handler2(request):
return web.Response(text="OK")
middleware_annotation_seen_values = []
def make_middleware(num):
async def middleware(request, handler):
middleware_annotation_seen_values.append(
getattr(handler, "annotation", None)
)
resp = await handler(request)
resp.text = resp.text + f"[{num}]"
return resp
return middleware
app = web.Application()
app.middlewares.append(make_middleware(1))
app.middlewares.append(make_middleware(2))
app.router.add_route("GET", "/", handler)
app.router.add_route("GET", "/r2", handler2)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 200 == resp.status
txt = await resp.text()
assert "OK[2][1]" == txt
assert middleware_annotation_seen_values == ["annotation_value", "annotation_value"]
# check that attributes from handler are not applied to handler2
resp = await client.get("/r2")
assert 200 == resp.status
assert middleware_annotation_seen_values == [
"annotation_value",
"annotation_value",
None,
None,
]
async def test_middleware_subapp(loop, aiohttp_client) -> None:
async def sub_handler(request):
return web.Response(text="OK")
sub_handler.annotation = "annotation_value"
async def handler(request):
return web.Response(text="OK")
middleware_annotation_seen_values = []
def make_middleware(num):
async def middleware(request, handler):
annotation = getattr(handler, "annotation", None)
if annotation is not None:
middleware_annotation_seen_values.append(f"{annotation}/{num}")
return await handler(request)
return middleware
app = web.Application()
app.middlewares.append(make_middleware(1))
app.router.add_route("GET", "/r2", handler)
subapp = web.Application()
subapp.middlewares.append(make_middleware(2))
subapp.router.add_route("GET", "/", sub_handler)
app.add_subapp("/sub", subapp)
client = await aiohttp_client(app)
resp = await client.get("/sub/")
assert 200 == resp.status
await resp.text()
assert middleware_annotation_seen_values == [
"annotation_value/1",
"annotation_value/2",
]
# check that attributes from sub_handler are not applied to handler
del middleware_annotation_seen_values[:]
resp = await client.get("/r2")
assert 200 == resp.status
assert middleware_annotation_seen_values == []
@pytest.fixture
def cli(loop, aiohttp_client):
async def handler(request):
return web.Response(text="OK")
def wrapper(extra_middlewares):
app = web.Application()
app.router.add_route("GET", "/resource1", handler)
app.router.add_route("GET", "/resource2/", handler)
app.router.add_route("GET", "/resource1/a/b", handler)
app.router.add_route("GET", "/resource2/a/b/", handler)
app.router.add_route("GET", "/resource2/a/b%2Fc/", handler)
app.middlewares.extend(extra_middlewares)
return aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
return wrapper
class TestNormalizePathMiddleware:
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 200),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 200),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 200),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_add_trailing_when_necessary(self, path, status, cli):
extra_middlewares = [web.normalize_path_middleware(merge_slashes=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 200),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 200),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
("/resource12", 404),
("/resource12345", 404),
],
)
async def test_remove_trailing_when_necessary(self, path, status, cli) -> None:
extra_middlewares = [
web.normalize_path_middleware(
append_slash=False, remove_slash=True, merge_slashes=False
)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_no_trailing_slash_when_disabled(self, path, status, cli):
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, merge_slashes=False)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("//resource1//a//b", 200),
("//resource1//a//b/", 404),
("///resource1//a//b", 200),
("/////resource1/a///b", 200),
("/////resource1/a//b/", 404),
("/resource1/a/b?p=1", 200),
("//resource1//a//b?p=1", 200),
("//resource1//a//b/?p=1", 404),
("///resource1//a//b?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a//b/?p=1", 404),
],
)
async def test_merge_slash(self, path, status, cli) -> None:
extra_middlewares = [web.normalize_path_middleware(append_slash=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 404),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 404),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 404),
("/resource2/a/b", 200),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource2//a//b", 200),
("///resource2//a//b/", 200),
("/////resource2/a///b", 200),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 404),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 404),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 404),
("/resource2/a/b?p=1", 200),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 200),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 200),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_append_and_merge_slash(self, path, status, cli) -> None:
extra_middlewares = [web.normalize_path_middleware()]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 200),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 200),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 200),
("/////resource1/a///b///", 200),
("/resource2/a/b", 404),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource2//a//b", 404),
("///resource2//a//b/", 200),
("/////resource2/a///b", 404),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 200),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 200),
("/resource2/a/b?p=1", 404),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 404),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 404),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_remove_and_merge_slash(self, path, status, cli) -> None:
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, remove_slash=True)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
async def test_cannot_remove_and_add_slash(self) -> None:
with pytest.raises(AssertionError):
web.normalize_path_middleware(append_slash=True, remove_slash=True)
async def test_bug_3669(aiohttp_client):
async def paymethod(request):
return web.Response(text="OK")
app = web.Application()
app.router.add_route("GET", "/paymethod", paymethod)
app.middlewares.append(
web.normalize_path_middleware(append_slash=False, remove_slash=True)
)
client = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
resp = await client.get("/paymethods")
assert resp.status == 404
assert resp.url.path != "/paymethod"
async def test_old_style_middleware(loop, aiohttp_client) -> None:
async def view_handler(request):
return web.Response(body=b"OK")
with pytest.warns(DeprecationWarning, match="Middleware decorator is deprecated"):
@web.middleware
async def middleware(request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[old style middleware]"
return resp
app = web.Application(middlewares=[middleware])
app.router.add_route("GET", "/", view_handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[old style middleware]" == txt
async def test_new_style_middleware_class(loop, aiohttp_client) -> None:
async def handler(request):
return web.Response(body=b"OK")
class Middleware:
async def __call__(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[new style middleware]"
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware())
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
assert len(warning_checker) == 0
async def test_new_style_middleware_method(loop, aiohttp_client) -> None:
async def handler(request):
return web.Response(body=b"OK")
class Middleware:
async def call(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[new style middleware]"
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware().call)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
assert len(warning_checker) == 0
| 33.970455
| 88
| 0.558373
|
825441c47397ce115a5d86f0ec31b816d6e2bfbb
| 3,781
|
py
|
Python
|
tests/run_tests.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 5
|
2015-04-17T11:27:00.000Z
|
2017-11-29T11:31:51.000Z
|
tests/run_tests.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 2
|
2020-06-25T19:02:43.000Z
|
2020-06-30T19:33:27.000Z
|
tests/run_tests.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 3
|
2016-07-04T18:22:56.000Z
|
2017-03-03T22:50:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import utool as ut
def run_tests():
# Build module list and run tests
import sys
exclude_doctests_fnames = set([
])
exclude_dirs = [
'_broken',
'old',
'tests',
'timeits',
'_scripts',
'_timeits',
'_doc',
'notebook',
]
import vtool_ibeis as vt
from os.path import dirname
#dpath_list = ['vtool_ibeis']
if ut.in_pyinstaller_package():
# HACK, find_doctestable_modnames does not work in pyinstaller
"""
import utool as ut
import vtool_ibeis as vt
dpath_list = [dirname(vt.__file__)]
doctest_modname_list = ut.find_doctestable_modnames(
dpath_list, exclude_doctests_fnames, exclude_dirs)
print(ut.indent('doctest_modname_list = ' + ub.repr2(doctest_modname_list), ' ' * 8))
"""
doctest_modname_list = [
'vtool_ibeis.spatial_verification',
'vtool_ibeis.constrained_matching',
'vtool_ibeis.coverage_kpts',
'vtool_ibeis.image',
'vtool_ibeis.histogram',
'vtool_ibeis.chip',
'vtool_ibeis.distance',
'vtool_ibeis.coverage_grid',
'vtool_ibeis.linalg',
'vtool_ibeis.geometry',
'vtool_ibeis.other',
'vtool_ibeis.util_math',
'vtool_ibeis.score_normalization',
'vtool_ibeis.test_constrained_matching',
'vtool_ibeis.keypoint',
'vtool_ibeis.sver_c_wrapper',
'vtool_ibeis.quality_classifier',
'vtool_ibeis.features',
'vtool_ibeis.nearest_neighbors',
'vtool_ibeis.segmentation',
'vtool_ibeis.exif',
'vtool_ibeis.patch',
'vtool_ibeis.confusion',
'vtool_ibeis.blend',
'vtool_ibeis.clustering2',
'vtool_ibeis.matching',
]
else:
dpath_list = [dirname(vt.__file__)]
doctest_modname_list = ut.find_doctestable_modnames(
dpath_list, exclude_doctests_fnames, exclude_dirs)
coverage = ut.get_argflag(('--coverage', '--cov',))
if coverage:
import coverage
cov = coverage.Coverage(source=doctest_modname_list)
cov.start()
print('Starting coverage')
exclude_lines = [
'pragma: no cover',
'def __repr__',
'if self.debug:',
'if settings.DEBUG',
'raise AssertionError',
'raise NotImplementedError',
'if 0:',
'if ut.VERBOSE',
'if _debug:',
'if __name__ == .__main__.:',
'print(.*)',
]
for line in exclude_lines:
cov.exclude(line)
modname_list2 = []
for modname in doctest_modname_list:
try:
exec('import ' + modname, globals(), locals())
except ImportError as ex:
ut.printex(ex)
if not ut.in_pyinstaller_package():
raise
else:
modname_list2.append(modname)
if coverage:
print('Stoping coverage')
cov.stop()
print('Saving coverage')
cov.save()
print('Generating coverage html report')
cov.html_report()
module_list = [sys.modules[name] for name in modname_list2]
nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list)
if nPass != nTotal:
return 1
else:
return 0
if __name__ == '__main__':
import multiprocessing
ut.change_term_title('RUN VTOOL TESTS')
multiprocessing.freeze_support()
retcode = run_tests()
sys.exit(retcode)
| 30.248
| 93
| 0.576567
|
bb3e6f0aedd0dbda4ba0f6295a7abfb2248b3b7c
| 2,309
|
py
|
Python
|
blades_helper/mission.py
|
eriksalt/blades_helper_proj
|
96e9d856b0e7f7a9cfc26c9f1bdc89c574eebdbc
|
[
"MIT"
] | null | null | null |
blades_helper/mission.py
|
eriksalt/blades_helper_proj
|
96e9d856b0e7f7a9cfc26c9f1bdc89c574eebdbc
|
[
"MIT"
] | null | null | null |
blades_helper/mission.py
|
eriksalt/blades_helper_proj
|
96e9d856b0e7f7a9cfc26c9f1bdc89c574eebdbc
|
[
"MIT"
] | null | null | null |
from .mission_generator_constants import MissionGeneratorConstants as con
class Mission:
def __init__(self):
self_mission_type=con.NOTHING
self.target=con.NOTHING
self.title=con.NOTHING
self.rewards=[]
self.penalties=[]
self.notes=[]
self.requirements=[]
def set_mission_type(self, mission_type):
self.mission_type=mission_type
def get_mission_type(self):
return self.mission_type
def set_mission_title(self, title):
self.title=title
def set_favor_type(self, favor_type):
the_note = con.FAVOR_NOTE.format(favor_type)
self.add_note(the_note)
def set_danger(self):
self.add_note(con.HAS_DANGER_NOTE)
def set_additional_specialist(self, specialist):
self.add_note(con.ADDITIONAL_SPECIALIST_NOTE)
self.add_requirement(specialist)
def add_requirement(self, specialist):
self.requirements.append(specialist)
def set_target(self, target):
self.target=target
def set_rewards(self, rewards):
self.rewards=rewards
def set_penalties(self, penalties):
self.penalties=penalties
def add_objective(self, objective):
self.notes.append(f'Military Objective: {objective["key"]}. {objective["description"]} Examples: {objective["example"]}')
def add_note(self, note):
self.notes.append(note)
def __repr__(self):
strings= []
strings.append(f'Mission: {self.title}.')
strings.append(f'\tType:{self.mission_type}.')
strings.append(f'\tTarget: {self.target}.')
strings.append('\tRewards:')
for reward_count, reward_type in self.rewards:
strings.append(f'\t\t Type: {reward_type}. Count: {reward_count}.')
strings.append('\tPenalties:')
for penalty_count, penalty_type in self.penalties:
strings.append(f'\t\t Type: {penalty_type}. Count: {penalty_count}.')
strings.append('\tRequirements:')
for requirement in self.requirements:
strings.append(f'\t\t {requirement}.')
if len(self.notes) > 0:
strings.append('\tNotes:')
for note in self.notes:
strings.append(f'\t\t{note}.')
return '\r'.join(strings)
| 33.463768
| 129
| 0.639671
|
b18c1fa7767e15531dce79ef591f8dde649d2ee4
| 2,714
|
py
|
Python
|
sshagentmux/util.py
|
ulyssesbbg/sshagentmux
|
3eb6f9eeecf743750d7b1432eb274af20930ba5d
|
[
"Apache-2.0"
] | 13
|
2016-02-25T18:30:16.000Z
|
2019-09-04T21:51:14.000Z
|
sshagentmux/util.py
|
ulyssesbbg/sshagentmux
|
3eb6f9eeecf743750d7b1432eb274af20930ba5d
|
[
"Apache-2.0"
] | 3
|
2016-09-13T16:51:58.000Z
|
2017-03-03T22:32:32.000Z
|
sshagentmux/util.py
|
ulyssesbbg/sshagentmux
|
3eb6f9eeecf743750d7b1432eb274af20930ba5d
|
[
"Apache-2.0"
] | 9
|
2016-01-27T00:40:33.000Z
|
2018-03-25T04:36:52.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2015, IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
import atexit
import os
import logging
import logging.handlers
import signal
import sys
LOG = logging.getLogger(__name__)
def setup_logging(name, level=logging.DEBUG):
log = logging.getLogger()
log.setLevel(level)
handler = logging.handlers.SysLogHandler(address='/dev/log')
FORMAT = name + "[%(process)d]:%(module)s %(levelname)s %(message)s"
DATE_FORMAT = '%b %d %H:%M:%S'
formatter = logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT)
handler.setFormatter(formatter)
log.addHandler(handler)
def daemonize(target=None, pidfile=None, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null', args=(), kwargs={}):
if pidfile and os.path.exists(pidfile):
raise RuntimeError('Already running')
# First fork (detaches from parent)
try:
if os.fork() > 0:
# Parent returns
return
except OSError as e:
raise RuntimeError('fork #1 failed.')
os.chdir('/')
os.umask(077)
os.setsid()
# Second fork (relinquish session leadership)
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('fork #2 failed with error %s' % e)
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# Replace file descriptors for stdin, stdout, and stderr
with open(stdin, 'rb', 0) as f:
os.dup2(f.fileno(), sys.stdin.fileno())
with open(stdout, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stdout.fileno())
with open(stderr, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stderr.fileno())
if pidfile:
# Write the PID file
with open(pidfile, 'w') as f:
print >>f, os.getpid()
# Arrange to have the PID file removed on exit/signal
atexit.register(lambda: os.remove(pidfile))
# Signal handler for termination (required)
def sigterm_handler(signo, frame):
LOG.error("SIGTERM received, exiting")
sys.exit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
target(*args, **kwargs)
| 29.182796
| 79
| 0.648121
|
a45fb82197f878d3cb112fa443522bc37c868df3
| 393,282
|
py
|
Python
|
ironic/tests/unit/conductor/test_manager.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/conductor/test_manager.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/conductor/test_manager.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic ManagerService."""
from collections import namedtuple
import datetime
import queue
import re
from unittest import mock
import eventlet
from futurist import waiters
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import fields
from ironic.common import boot_devices
from ironic.common import components
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import images
from ironic.common import indicator_states
from ironic.common import nova
from ironic.common import states
from ironic.conductor import cleaning
from ironic.conductor import deployments
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic.db import api as dbapi
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import fake
from ironic.drivers.modules.network import flat as n_flat
from ironic import objects
from ironic.objects import base as obj_base
from ironic.objects import fields as obj_fields
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
@mgr_utils.mock_record_keepalive
class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_change_node_power_state_power_on(self, get_power_mock):
# Test change_node_power_state including integration with
# conductor.utils.node_power_action and lower.
get_power_mock.return_value = states.POWER_OFF
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
self.service.change_node_power_state(self.context,
node.uuid,
states.POWER_ON)
self._stop_service()
get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
node.refresh()
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
# Verify the reservation has been cleared by
# background task's link callback.
self.assertIsNone(node.reservation)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_change_node_power_state_soft_power_off_timeout(self,
get_power_mock):
# Test change_node_power_state with timeout optional parameter
# including integration with conductor.utils.node_power_action and
# lower.
get_power_mock.return_value = states.POWER_ON
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_ON)
self._start_service()
self.service.change_node_power_state(self.context,
node.uuid,
states.SOFT_POWER_OFF,
timeout=2)
self._stop_service()
get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
node.refresh()
self.assertEqual(states.POWER_OFF, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
# Verify the reservation has been cleared by
# background task's link callback.
self.assertIsNone(node.reservation)
@mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_change_node_power_state_node_already_locked(self,
pwr_act_mock):
# Test change_node_power_state with mocked
# conductor.utils.node_power_action.
fake_reservation = 'fake-reserv'
pwr_state = states.POWER_ON
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=pwr_state,
reservation=fake_reservation)
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
self.context,
node.uuid,
states.POWER_ON)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
# In this test worker should not be spawned, but waiting to make sure
# the below perform_mock assertion is valid.
self._stop_service()
self.assertFalse(pwr_act_mock.called, 'node_power_action has been '
'unexpectedly called.')
# Verify existing reservation wasn't broken.
node.refresh()
self.assertEqual(fake_reservation, node.reservation)
def test_change_node_power_state_worker_pool_full(self):
# Test change_node_power_state including integration with
# conductor.utils.node_power_action and lower.
initial_state = states.POWER_OFF
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=initial_state)
self._start_service()
with mock.patch.object(self.service,
'_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
self.context,
node.uuid,
states.POWER_ON)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
spawn_mock.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY, timeout=mock.ANY)
node.refresh()
self.assertEqual(initial_state, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNotNone(node.last_error)
# Verify the picked reservation has been cleared due to full pool.
self.assertIsNone(node.reservation)
@mock.patch.object(fake.FakePower, 'set_power_state', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_change_node_power_state_exception_in_background_task(
self, get_power_mock, set_power_mock):
# Test change_node_power_state including integration with
# conductor.utils.node_power_action and lower.
initial_state = states.POWER_OFF
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=initial_state)
self._start_service()
get_power_mock.return_value = states.POWER_OFF
new_state = states.POWER_ON
set_power_mock.side_effect = exception.PowerStateFailure(
pstate=new_state
)
self.service.change_node_power_state(self.context,
node.uuid,
new_state)
self._stop_service()
get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
set_power_mock.assert_called_once_with(mock.ANY, mock.ANY,
new_state, timeout=None)
node.refresh()
self.assertEqual(initial_state, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNotNone(node.last_error)
# Verify the reservation has been cleared by background task's
# link callback despite exception in background task.
self.assertIsNone(node.reservation)
@mock.patch.object(fake.FakePower, 'validate', autospec=True)
def test_change_node_power_state_validate_fail(self, validate_mock):
# Test change_node_power_state where task.driver.power.validate
# fails and raises an exception
initial_state = states.POWER_ON
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=initial_state)
self._start_service()
validate_mock.side_effect = exception.InvalidParameterValue(
'wrong power driver info')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
self.context,
node.uuid,
states.POWER_ON)
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
node.refresh()
validate_mock.assert_called_once_with(mock.ANY, mock.ANY)
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
def test_node_set_power_state_notif_success(self, mock_notif):
# Test that successfully changing a node's power state sends the
# correct .start and .end notifications
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
self.service.change_node_power_state(self.context,
node.uuid,
states.POWER_ON)
# Give async worker a chance to finish
self._stop_service()
# 2 notifications should be sent: 1 .start and 1 .end
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.end',
obj_fields.NotificationLevel.INFO)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
def test_node_set_power_state_notif_get_power_fail(self, mock_notif,
get_power_mock):
# Test that correct notifications are sent when changing node power
# state and retrieving the node's current power state fails
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
get_power_mock.side_effect = Exception('I have failed')
self.service.change_node_power_state(self.context,
node.uuid,
states.POWER_ON)
# Give async worker a chance to finish
self._stop_service()
get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
# 2 notifications should be sent: 1 .start and 1 .error
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
@mock.patch.object(fake.FakePower, 'set_power_state', autospec=True)
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
def test_node_set_power_state_notif_set_power_fail(self, mock_notif,
set_power_mock):
# Test that correct notifications are sent when changing node power
# state and setting the node's power state fails
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
set_power_mock.side_effect = Exception('I have failed')
self.service.change_node_power_state(self.context,
node.uuid,
states.POWER_ON)
# Give async worker a chance to finish
self._stop_service()
set_power_mock.assert_called_once_with(mock.ANY, mock.ANY,
states.POWER_ON, timeout=None)
# 2 notifications should be sent: 1 .start and 1 .error
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
def test_node_set_power_state_notif_spawn_fail(self, mock_notif):
# Test that failure notification is not sent when spawning the
# background conductor worker fails
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
with mock.patch.object(self.service,
'_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
self.context,
node.uuid,
states.POWER_ON)
spawn_mock.assert_called_once_with(
conductor_utils.node_power_action, mock.ANY, states.POWER_ON,
timeout=None)
self.assertFalse(mock_notif.called)
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
def test_node_set_power_state_notif_no_state_change(self, mock_notif):
# Test that correct notifications are sent when changing node power
# state and no state change is necessary
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
power_state=states.POWER_OFF)
self._start_service()
self.service.change_node_power_state(self.context,
node.uuid,
states.POWER_OFF)
# Give async worker a chance to finish
self._stop_service()
# 2 notifications should be sent: 1 .start and 1 .end
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.end',
obj_fields.NotificationLevel.INFO)
@mock.patch.object(fake.FakePower, 'get_supported_power_states',
autospec=True)
def test_change_node_power_state_unsupported_state(self, supported_mock):
# Test change_node_power_state where unsupported power state raises
# an exception
initial_state = states.POWER_ON
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=initial_state)
self._start_service()
supported_mock.return_value = [
states.POWER_ON, states.POWER_OFF, states.REBOOT]
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
self.context,
node.uuid,
states.SOFT_POWER_OFF)
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
node.refresh()
supported_mock.assert_called_once_with(mock.ANY, mock.ANY)
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
@mgr_utils.mock_record_keepalive
class CreateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_create_node(self):
node = obj_utils.get_test_node(self.context, driver='fake-hardware',
extra={'test': 'one'})
res = self.service.create_node(self.context, node)
self.assertEqual({'test': 'one'}, res['extra'])
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual({'test': 'one'}, res['extra'])
@mock.patch.object(driver_factory, 'check_and_update_node_interfaces',
autospec=True)
def test_create_node_validation_fails(self, mock_validate):
node = obj_utils.get_test_node(self.context, driver='fake-hardware',
extra={'test': 'one'})
mock_validate.side_effect = exception.InterfaceNotFoundInEntrypoint(
'boom')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.create_node,
self.context, node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InterfaceNotFoundInEntrypoint,
exc.exc_info[0])
self.assertRaises(exception.NotFound,
objects.Node.get_by_uuid, self.context, node['uuid'])
@mgr_utils.mock_record_keepalive
class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_update_node(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
extra={'test': 'one'})
# check that ManagerService.update_node actually updates the node
node.extra = {'test': 'two'}
res = self.service.update_node(self.context, node)
self.assertEqual({'test': 'two'}, res['extra'])
def test_update_node_maintenance_set_false(self):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
maintenance=True,
fault='clean failure',
maintenance_reason='reason')
# check that ManagerService.update_node actually updates the node
node.maintenance = False
res = self.service.update_node(self.context, node)
self.assertFalse(res['maintenance'])
self.assertIsNone(res['maintenance_reason'])
self.assertIsNone(res['fault'])
def test_update_node_protected_set(self):
for state in ('active', 'rescue'):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
provision_state=state)
node.protected = True
res = self.service.update_node(self.context, node)
self.assertTrue(res['protected'])
self.assertIsNone(res['protected_reason'])
def test_update_node_protected_unset(self):
# NOTE(dtantsur): we allow unsetting protected in any state to make
# sure a node cannot get stuck in it.
for state in ('active', 'rescue', 'rescue failed'):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
provision_state=state,
protected=True,
protected_reason='reason')
# check that ManagerService.update_node actually updates the node
node.protected = False
res = self.service.update_node(self.context, node)
self.assertFalse(res['protected'])
self.assertIsNone(res['protected_reason'])
def test_update_node_protected_invalid_state(self):
node = obj_utils.create_test_node(self.context,
provision_state='available')
node.protected = True
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertFalse(res['protected'])
self.assertIsNone(res['protected_reason'])
def test_update_node_protected_reason_without_protected(self):
node = obj_utils.create_test_node(self.context,
provision_state='active')
node.protected_reason = 'reason!'
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertFalse(res['protected'])
self.assertIsNone(res['protected_reason'])
def test_update_node_retired_set(self):
for state in ('active', 'rescue', 'manageable'):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
provision_state=state)
node.retired = True
res = self.service.update_node(self.context, node)
self.assertTrue(res['retired'])
self.assertIsNone(res['retired_reason'])
def test_update_node_retired_invalid_state(self):
# NOTE(arne_wiebalck): nodes in available cannot be 'retired'.
# This is to ensure backwards comaptibility.
node = obj_utils.create_test_node(self.context,
provision_state='available')
node.retired = True
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertFalse(res['retired'])
self.assertIsNone(res['retired_reason'])
def test_update_node_retired_unset(self):
for state in ('active', 'manageable', 'rescue', 'rescue failed'):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
provision_state=state,
retired=True,
retired_reason='EOL')
# check that ManagerService.update_node actually updates the node
node.retired = False
res = self.service.update_node(self.context, node)
self.assertFalse(res['retired'])
self.assertIsNone(res['retired_reason'])
def test_update_node_retired_reason_without_retired(self):
node = obj_utils.create_test_node(self.context,
provision_state='active')
node.retired_reason = 'warranty expired'
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertFalse(res['retired'])
self.assertIsNone(res['retired_reason'])
def test_update_node_already_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
extra={'test': 'one'})
# check that it fails if something else has locked it already
with task_manager.acquire(self.context, node['id'], shared=False):
node.extra = {'test': 'two'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
# verify change did not happen
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual({'test': 'one'}, res['extra'])
def test_update_node_already_associated(self):
old_instance = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=old_instance)
node.instance_uuid = uuidutils.generate_uuid()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeAssociated, exc.exc_info[0])
# verify change did not happen
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual(old_instance, res['instance_uuid'])
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
autospec=True)
def _test_associate_node(self, power_state, mock_get_power_state):
mock_get_power_state.return_value = power_state
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=None,
power_state=states.NOSTATE)
uuid1 = uuidutils.generate_uuid()
uuid2 = uuidutils.generate_uuid()
node.instance_uuid = uuid1
self.service.update_node(self.context, node)
# Check if the change was applied
node.instance_uuid = uuid2
node.refresh()
self.assertEqual(uuid1, node.instance_uuid)
def test_associate_node_powered_off(self):
self._test_associate_node(states.POWER_OFF)
def test_associate_node_powered_on(self):
self._test_associate_node(states.POWER_ON)
def test_update_node_invalid_driver(self):
existing_driver = 'fake-hardware'
wrong_driver = 'wrong-driver'
node = obj_utils.create_test_node(self.context,
driver=existing_driver,
extra={'test': 'one'},
instance_uuid=None)
# check that it fails because driver not found
node.driver = wrong_driver
node.driver_info = {}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)
self.assertEqual(exception.DriverNotFound, exc.exc_info[0])
# verify change did not happen
node.refresh()
self.assertEqual(existing_driver, node.driver)
def test_update_node_from_invalid_driver(self):
existing_driver = 'fake-hardware'
wrong_driver = 'wrong-driver'
node = obj_utils.create_test_node(self.context, driver=wrong_driver)
node.driver = existing_driver
result = self.service.update_node(self.context, node)
self.assertEqual(existing_driver, result.driver)
node.refresh()
self.assertEqual(existing_driver, node.driver)
UpdateInterfaces = namedtuple('UpdateInterfaces', ('old', 'new'))
# NOTE(dtantsur): "old" interfaces here do not match the defaults, so that
# we can test resetting them.
IFACE_UPDATE_DICT = {
'boot_interface': UpdateInterfaces('pxe', 'fake'),
'console_interface': UpdateInterfaces('no-console', 'fake'),
'deploy_interface': UpdateInterfaces('iscsi', 'fake'),
'inspect_interface': UpdateInterfaces('no-inspect', 'fake'),
'management_interface': UpdateInterfaces(None, 'fake'),
'network_interface': UpdateInterfaces('noop', 'flat'),
'power_interface': UpdateInterfaces(None, 'fake'),
'raid_interface': UpdateInterfaces('no-raid', 'fake'),
'rescue_interface': UpdateInterfaces('no-rescue', 'fake'),
'storage_interface': UpdateInterfaces('fake', 'noop'),
}
def _create_node_with_interfaces(self, prov_state, maintenance=False):
old_ifaces = {}
for iface_name, ifaces in self.IFACE_UPDATE_DICT.items():
old_ifaces[iface_name] = ifaces.old
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
uuid=uuidutils.generate_uuid(),
provision_state=prov_state,
maintenance=maintenance,
**old_ifaces)
return node
def _test_update_node_interface_allowed(self, node, iface_name, new_iface):
setattr(node, iface_name, new_iface)
self.service.update_node(self.context, node)
node.refresh()
self.assertEqual(new_iface, getattr(node, iface_name))
def _test_update_node_interface_in_allowed_state(self, prov_state,
maintenance=False):
node = self._create_node_with_interfaces(prov_state,
maintenance=maintenance)
for iface_name, ifaces in self.IFACE_UPDATE_DICT.items():
self._test_update_node_interface_allowed(node, iface_name,
ifaces.new)
node.destroy()
def test_update_node_interface_in_allowed_state(self):
for state in [states.ENROLL, states.MANAGEABLE, states.INSPECTING,
states.INSPECTWAIT, states.AVAILABLE]:
self._test_update_node_interface_in_allowed_state(state)
def test_update_node_interface_in_maintenance(self):
self._test_update_node_interface_in_allowed_state(states.ACTIVE,
maintenance=True)
def _test_update_node_interface_not_allowed(self, node, iface_name,
new_iface):
old_iface = getattr(node, iface_name)
setattr(node, iface_name, new_iface)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
node.refresh()
self.assertEqual(old_iface, getattr(node, iface_name))
def _test_update_node_interface_in_not_allowed_state(self, prov_state):
node = self._create_node_with_interfaces(prov_state)
for iface_name, ifaces in self.IFACE_UPDATE_DICT.items():
self._test_update_node_interface_not_allowed(node, iface_name,
ifaces.new)
node.destroy()
def test_update_node_interface_in_not_allowed_state(self):
for state in [states.ACTIVE, states.DELETING]:
self._test_update_node_interface_in_not_allowed_state(state)
def _test_update_node_interface_invalid(self, node, iface_name):
old_iface = getattr(node, iface_name)
setattr(node, iface_name, 'invalid')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)
self.assertEqual(exception.InterfaceNotFoundInEntrypoint,
exc.exc_info[0])
node.refresh()
self.assertEqual(old_iface, getattr(node, iface_name))
def test_update_node_interface_invalid(self):
node = self._create_node_with_interfaces(states.MANAGEABLE)
for iface_name in self.IFACE_UPDATE_DICT:
self._test_update_node_interface_invalid(node, iface_name)
def test_update_node_with_reset_interfaces(self):
# Modify only one interface at a time
for iface_name, ifaces in self.IFACE_UPDATE_DICT.items():
node = self._create_node_with_interfaces(states.AVAILABLE)
setattr(node, iface_name, ifaces.new)
# Updating a driver is mandatory for reset_interfaces to work
node.driver = 'fake-hardware'
self.service.update_node(self.context, node,
reset_interfaces=True)
node.refresh()
self.assertEqual(ifaces.new, getattr(node, iface_name))
# Other interfaces must be reset to their defaults
for other_iface_name, ifaces in self.IFACE_UPDATE_DICT.items():
if other_iface_name == iface_name:
continue
# For this to work, the "old" interfaces in IFACE_UPDATE_DICT
# must not match the defaults.
self.assertNotEqual(ifaces.old,
getattr(node, other_iface_name),
"%s does not match the default after "
"reset with setting %s: %s" %
(other_iface_name, iface_name,
getattr(node, other_iface_name)))
def _test_update_node_change_resource_class(self, state,
resource_class=None,
new_resource_class='new',
expect_error=False,
maintenance=False):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
uuid=uuidutils.generate_uuid(),
provision_state=state,
resource_class=resource_class,
maintenance=maintenance)
self.addCleanup(node.destroy)
node.resource_class = new_resource_class
if expect_error:
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context,
node)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
expected_msg_regex = \
(r'^Node {} can not have resource_class updated unless it is '
r'in one of allowed \(.*\) states.$').format(
re.escape(node.uuid))
self.assertRegex(str(exc.exc_info[1]), expected_msg_regex)
# verify change did not happen
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual(resource_class, res['resource_class'])
else:
self.service.update_node(self.context, node)
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual('new', res['resource_class'])
def test_update_resource_class_allowed_state(self):
for state in [states.ENROLL, states.MANAGEABLE, states.INSPECTING,
states.AVAILABLE]:
self._test_update_node_change_resource_class(
state, resource_class='old', expect_error=False)
def test_update_resource_class_no_previous_value(self):
for state in [states.ENROLL, states.MANAGEABLE, states.INSPECTING,
states.AVAILABLE, states.ACTIVE]:
self._test_update_node_change_resource_class(
state, resource_class=None, expect_error=False)
def test_update_resource_class_not_allowed(self):
self._test_update_node_change_resource_class(
states.ACTIVE, resource_class='old', new_resource_class='new',
expect_error=True)
self._test_update_node_change_resource_class(
states.ACTIVE, resource_class='old', new_resource_class=None,
expect_error=True)
self._test_update_node_change_resource_class(
states.ACTIVE, resource_class='old', new_resource_class=None,
expect_error=True, maintenance=True)
def test_update_node_hardware_type(self):
existing_hardware = 'fake-hardware'
existing_interface = 'fake'
new_hardware = 'manual-management'
new_interface = 'pxe'
node = obj_utils.create_test_node(self.context,
driver=existing_hardware,
boot_interface=existing_interface)
node.driver = new_hardware
node.boot_interface = new_interface
self.service.update_node(self.context, node)
node.refresh()
self.assertEqual(new_hardware, node.driver)
self.assertEqual(new_interface, node.boot_interface)
def test_update_node_deleting_allocation(self):
node = obj_utils.create_test_node(self.context)
alloc = obj_utils.create_test_allocation(self.context)
# Establish cross-linking between the node and the allocation
alloc.node_id = node.id
alloc.save()
node.refresh()
self.assertEqual(alloc.id, node.allocation_id)
self.assertEqual(alloc.uuid, node.instance_uuid)
node.instance_uuid = None
res = self.service.update_node(self.context, node)
self.assertRaises(exception.AllocationNotFound,
objects.Allocation.get_by_id,
self.context, alloc.id)
self.assertIsNone(res['instance_uuid'])
self.assertIsNone(res['allocation_id'])
node.refresh()
self.assertIsNone(node.instance_uuid)
self.assertIsNone(node.allocation_id)
def test_update_node_deleting_allocation_forbidden(self):
node = obj_utils.create_test_node(self.context,
provision_state='active',
maintenance=False)
alloc = obj_utils.create_test_allocation(self.context)
# Establish cross-linking between the node and the allocation
alloc.node_id = node.id
alloc.save()
node.refresh()
self.assertEqual(alloc.id, node.allocation_id)
self.assertEqual(alloc.uuid, node.instance_uuid)
node.instance_uuid = None
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
node.refresh()
self.assertEqual(alloc.id, node.allocation_id)
self.assertEqual(alloc.uuid, node.instance_uuid)
def test_update_node_deleting_allocation_in_maintenance(self):
node = obj_utils.create_test_node(self.context,
provision_state='active',
maintenance=True)
alloc = obj_utils.create_test_allocation(self.context)
# Establish cross-linking between the node and the allocation
alloc.node_id = node.id
alloc.save()
node.refresh()
self.assertEqual(alloc.id, node.allocation_id)
self.assertEqual(alloc.uuid, node.instance_uuid)
node.instance_uuid = None
res = self.service.update_node(self.context, node)
self.assertRaises(exception.AllocationNotFound,
objects.Allocation.get_by_id,
self.context, alloc.id)
self.assertIsNone(res['instance_uuid'])
self.assertIsNone(res['allocation_id'])
node.refresh()
self.assertIsNone(node.instance_uuid)
self.assertIsNone(node.allocation_id)
def test_update_node_maintenance_with_broken_interface(self):
# Updates of non-driver fields are possible with a broken driver
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_interface='foobar',
extra={'test': 'one'})
node.maintenance = True
res = self.service.update_node(self.context, node)
self.assertTrue(res.maintenance)
node.refresh()
self.assertTrue(node.maintenance)
self.assertEqual('foobar', node.power_interface)
def test_update_node_interface_field_with_broken_interface(self):
# Updates of driver fields are NOT possible with a broken driver,
# unless they're fixing the breakage.
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_interface='foobar',
deploy_interface='fake',
extra={'test': 'one'})
node.deploy_interface = 'iscsi'
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)
self.assertEqual(exception.InterfaceNotFoundInEntrypoint,
exc.exc_info[0])
node.refresh()
self.assertEqual('foobar', node.power_interface)
self.assertEqual('fake', node.deploy_interface)
def test_update_node_fix_broken_interface(self):
# Updates of non-driver fields are possible with a broken driver
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_interface='foobar',
extra={'test': 'one'})
node.power_interface = 'fake'
self.service.update_node(self.context, node)
node.refresh()
self.assertEqual('fake', node.power_interface)
@mgr_utils.mock_record_keepalive
class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
@mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_async(self, mock_spawn,
mock_upgrade):
node = obj_utils.create_test_node(self.context,
vendor_interface='fake')
info = {'bar': 'baz'}
self._start_service()
response = self.service.vendor_passthru(self.context, node.uuid,
'second_method', 'POST',
info)
# Waiting to make sure the below assertions are valid.
self._stop_service()
# Assert spawn_after was called
self.assertTrue(mock_spawn.called)
self.assertIsNone(response['return'])
self.assertTrue(response['async'])
# Assert lock was upgraded to an exclusive one
self.assertEqual(1, mock_upgrade.call_count)
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
@mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_sync(self, mock_spawn, mock_upgrade):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'meow'}
self._start_service()
response = self.service.vendor_passthru(self.context, node.uuid,
'third_method_sync',
'POST', info)
# Waiting to make sure the below assertions are valid.
self._stop_service()
# Assert no workers were used
self.assertFalse(mock_spawn.called)
self.assertTrue(response['return'])
self.assertFalse(response['async'])
# Assert lock was upgraded to an exclusive one
self.assertEqual(1, mock_upgrade.call_count)
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
@mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_shared_lock(self, mock_spawn, mock_upgrade):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'woof'}
self._start_service()
response = self.service.vendor_passthru(self.context, node.uuid,
'fourth_method_shared_lock',
'POST', info)
# Waiting to make sure the below assertions are valid.
self._stop_service()
# Assert spawn_after was called
self.assertTrue(mock_spawn.called)
self.assertIsNone(response['return'])
self.assertTrue(response['async'])
# Assert lock was never upgraded to an exclusive one
self.assertFalse(mock_upgrade.called)
node.refresh()
self.assertIsNone(node.last_error)
# Verify there's no reservation on the node
self.assertIsNone(node.reservation)
def test_vendor_passthru_http_method_not_supported(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
# GET not supported by first_method
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vendor_passthru,
self.context, node.uuid,
'second_method', 'GET', {})
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
def test_vendor_passthru_node_already_locked(self):
fake_reservation = 'test_reserv'
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation=fake_reservation)
info = {'bar': 'baz'}
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vendor_passthru,
self.context, node.uuid, 'second_method',
'POST', info)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
node.refresh()
self.assertIsNone(node.last_error)
# Verify the existing reservation is not broken.
self.assertEqual(fake_reservation, node.reservation)
def test_vendor_passthru_unsupported_method(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'baz'}
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vendor_passthru,
self.context, node.uuid,
'unsupported_method', 'POST', info)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue,
exc.exc_info[0])
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
def test_vendor_passthru_missing_method_parameters(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'invalid_param': 'whatever'}
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vendor_passthru,
self.context, node.uuid,
'second_method', 'POST', info)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.MissingParameterValue, exc.exc_info[0])
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
def test_vendor_passthru_worker_pool_full(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'baz'}
self._start_service()
with mock.patch.object(self.service,
'_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vendor_passthru,
self.context, node.uuid,
'second_method', 'POST', info)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
# Waiting to make sure the below assertions are valid.
self._stop_service()
node.refresh()
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_get_node_vendor_passthru_methods(self, mock_iface):
fake_routes = {'test_method': {'async': True,
'description': 'foo',
'http_methods': ['POST'],
'func': None}}
mock_iface.return_value.vendor_routes = fake_routes
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
data = self.service.get_node_vendor_passthru_methods(self.context,
node.uuid)
# The function reference should not be returned
del fake_routes['test_method']['func']
self.assertEqual(fake_routes, data)
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
autospec=True)
def test_driver_vendor_passthru_sync(self, mock_spawn, mock_get_if):
expected = {'foo': 'bar'}
vendor_mock = mock.Mock(spec=drivers_base.VendorInterface)
mock_get_if.return_value = vendor_mock
driver_name = 'fake-hardware'
test_method = mock.MagicMock(return_value=expected)
vendor_mock.driver_routes = {
'test_method': {'func': test_method,
'async': False,
'attach': False,
'http_methods': ['POST']}}
self.service.init_host()
# init_host() called _spawn_worker because of the heartbeat
mock_spawn.reset_mock()
# init_host() called get_interface during driver loading
mock_get_if.reset_mock()
vendor_args = {'test': 'arg'}
response = self.service.driver_vendor_passthru(
self.context, driver_name, 'test_method', 'POST', vendor_args)
# Assert that the vendor interface has no custom
# driver_vendor_passthru()
self.assertFalse(hasattr(vendor_mock, 'driver_vendor_passthru'))
self.assertEqual(expected, response['return'])
self.assertFalse(response['async'])
test_method.assert_called_once_with(self.context, **vendor_args)
# No worker was spawned
self.assertFalse(mock_spawn.called)
mock_get_if.assert_called_once_with(mock.ANY, 'vendor', 'fake')
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
autospec=True)
def test_driver_vendor_passthru_async(self, mock_spawn, mock_iface):
test_method = mock.MagicMock()
mock_iface.return_value.driver_routes = {
'test_sync_method': {'func': test_method,
'async': True,
'attach': False,
'http_methods': ['POST']}}
self.service.init_host()
# init_host() called _spawn_worker because of the heartbeat
mock_spawn.reset_mock()
vendor_args = {'test': 'arg'}
response = self.service.driver_vendor_passthru(
self.context, 'fake-hardware', 'test_sync_method', 'POST',
vendor_args)
self.assertIsNone(response['return'])
self.assertTrue(response['async'])
mock_spawn.assert_called_once_with(self.service, test_method,
self.context, **vendor_args)
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_driver_vendor_passthru_http_method_not_supported(self,
mock_iface):
mock_iface.return_value.driver_routes = {
'test_method': {'func': mock.MagicMock(),
'async': True,
'http_methods': ['POST']}}
self.service.init_host()
# GET not supported by test_method
exc = self.assertRaises(messaging.ExpectedException,
self.service.driver_vendor_passthru,
self.context, 'fake-hardware', 'test_method',
'GET', {})
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue,
exc.exc_info[0])
def test_driver_vendor_passthru_method_not_supported(self):
# Test for when the vendor interface is set, but hasn't passed a
# driver_passthru_mapping to MixinVendorInterface
self.service.init_host()
exc = self.assertRaises(messaging.ExpectedException,
self.service.driver_vendor_passthru,
self.context, 'fake-hardware', 'test_method',
'POST', {})
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue,
exc.exc_info[0])
def test_driver_vendor_passthru_driver_not_found(self):
self.service.init_host()
self.assertRaises(messaging.ExpectedException,
self.service.driver_vendor_passthru,
self.context, 'does_not_exist', 'test_method',
'POST', {})
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_driver_vendor_passthru_no_default_interface(self,
mock_def_iface):
self.service.init_host()
# NOTE(rloo): service.init_host() will call
# driver_factory.default_interface() and we want these to
# succeed, so we set the side effect *after* that call.
mock_def_iface.reset_mock()
mock_def_iface.side_effect = exception.NoValidDefaultForInterface('no')
exc = self.assertRaises(messaging.ExpectedException,
self.service.driver_vendor_passthru,
self.context, 'fake-hardware', 'test_method',
'POST', {})
mock_def_iface.assert_called_once_with(mock.ANY, 'vendor',
driver_name='fake-hardware')
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoValidDefaultForInterface,
exc.exc_info[0])
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_get_driver_vendor_passthru_methods(self, mock_get_if):
vendor_mock = mock.Mock(spec=drivers_base.VendorInterface)
mock_get_if.return_value = vendor_mock
driver_name = 'fake-hardware'
fake_routes = {'test_method': {'async': True,
'description': 'foo',
'http_methods': ['POST'],
'func': None}}
vendor_mock.driver_routes = fake_routes
self.service.init_host()
# init_host() will call get_interface
mock_get_if.reset_mock()
data = self.service.get_driver_vendor_passthru_methods(self.context,
driver_name)
# The function reference should not be returned
del fake_routes['test_method']['func']
self.assertEqual(fake_routes, data)
mock_get_if.assert_called_once_with(mock.ANY, 'vendor', 'fake')
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_get_driver_vendor_passthru_methods_no_default_interface(
self, mock_def_iface):
self.service.init_host()
# NOTE(rloo): service.init_host() will call
# driver_factory.default_interface() and we want these to
# succeed, so we set the side effect *after* that call.
mock_def_iface.reset_mock()
mock_def_iface.side_effect = exception.NoValidDefaultForInterface('no')
exc = self.assertRaises(
messaging.rpc.ExpectedException,
self.service.get_driver_vendor_passthru_methods,
self.context, 'fake-hardware')
mock_def_iface.assert_called_once_with(mock.ANY, 'vendor',
driver_name='fake-hardware')
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoValidDefaultForInterface,
exc.exc_info[0])
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_driver_vendor_passthru_validation_failed(self, mock_iface):
mock_iface.return_value.driver_validate.side_effect = (
exception.MissingParameterValue('error'))
test_method = mock.Mock()
mock_iface.return_value.driver_routes = {
'test_method': {'func': test_method,
'async': False,
'http_methods': ['POST']}}
self.service.init_host()
exc = self.assertRaises(messaging.ExpectedException,
self.service.driver_vendor_passthru,
self.context, 'fake-hardware', 'test_method',
'POST', {})
self.assertEqual(exception.MissingParameterValue,
exc.exc_info[0])
self.assertFalse(test_method.called)
@mgr_utils.mock_record_keepalive
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_do_node_deploy_invalid_state(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# test that node deploy fails if the node is already provisioned
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node['uuid'])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
self.assertFalse(mock_iwdi.called)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
def test_do_node_deploy_maintenance(self, mock_iwdi):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
maintenance=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node['uuid'])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeInMaintenance, exc.exc_info[0])
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
self.assertFalse(mock_iwdi.called)
def _test_do_node_deploy_validate_fail(self, mock_validate, mock_iwdi):
mock_iwdi.return_value = False
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceDeployFailure, exc.exc_info[0])
self.assertEqual(exc.exc_info[1].code, 400)
# Check the message of InstanceDeployFailure. In a
# messaging.rpc.ExpectedException sys.exc_info() is stored in exc_info
# in the exception object. So InstanceDeployFailure will be in
# exc_info[1]
self.assertIn(r'node 1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
str(exc.exc_info[1]))
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate',
autospec=True)
def test_do_node_deploy_validate_fail(self, mock_validate, mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_deploy_power_validate_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
@mock.patch.object(conductor_utils, 'validate_instance_info_traits',
autospec=True)
def test_do_node_deploy_traits_validate_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
@mock.patch.object(conductor_steps, 'validate_deploy_templates',
autospec=True)
def test_do_node_deploy_validate_template_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
def test_do_node_deploy_partial_ok(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
thread = self.service._spawn_worker(lambda: None)
with mock.patch.object(self.service, '_spawn_worker',
autospec=True) as mock_spawn:
mock_spawn.return_value = thread
node = obj_utils.create_test_node(
self.context,
driver='fake-hardware',
provision_state=states.AVAILABLE,
driver_internal_info={'agent_url': 'url'})
self.service.do_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_spawn.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY, None)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
def test_do_node_deploy_rebuild_active_state_error(self, mock_iwdi):
# Tests manager.do_node_deploy() & deployments.do_next_deploy_step(),
# when getting an unexpected state returned from a deploy_step.
mock_iwdi.return_value = True
self._start_service()
# NOTE(rloo): We have to mock this here as opposed to using a
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step. So we defer
# mock'ing until after the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
mock_deploy.return_value = states.DEPLOYING
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE,
instance_info={'image_source': uuidutils.generate_uuid(),
'kernel': 'aaaa', 'ramdisk': 'bbbb'},
driver_internal_info={'is_whole_disk_image': False})
self.service.do_node_deploy(self.context, node.uuid, rebuild=True)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
# Verify instance_info values have been cleared.
self.assertNotIn('kernel', node.instance_info)
self.assertNotIn('ramdisk', node.instance_info)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
# Verify is_whole_disk_image reflects correct value on rebuild.
self.assertTrue(node.driver_internal_info['is_whole_disk_image'])
self.assertIsNone(node.driver_internal_info['deploy_steps'])
def test_do_node_deploy_rebuild_active_state_waiting(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# NOTE(rloo): We have to mock this here as opposed to using a
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step. So we defer
# mock'ing until after the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
mock_deploy.return_value = states.DEPLOYWAIT
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE,
instance_info={'image_source': uuidutils.generate_uuid()})
self.service.do_node_deploy(self.context, node.uuid, rebuild=True)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYWAIT, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
# last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
self.assertEqual(1, len(node.driver_internal_info['deploy_steps']))
def test_do_node_deploy_rebuild_active_state_done(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# NOTE(rloo): We have to mock this here as opposed to using a
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step. So we defer
# mock'ing until after the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
mock_deploy.return_value = None
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE)
self.service.do_node_deploy(self.context, node.uuid, rebuild=True)
self._stop_service()
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
# last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
self.assertIsNone(node.driver_internal_info['deploy_steps'])
def test_do_node_deploy_rebuild_deployfail_state(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# NOTE(rloo): We have to mock this here as opposed to using a
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step. So we defer
# mock'ing until after the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
mock_deploy.return_value = None
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYFAIL,
target_provision_state=states.NOSTATE)
self.service.do_node_deploy(self.context, node.uuid, rebuild=True)
self._stop_service()
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
# last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
self.assertIsNone(node.driver_internal_info['deploy_steps'])
def test_do_node_deploy_rebuild_error_state(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# NOTE(rloo): We have to mock this here as opposed to using a
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step. So we defer
# mock'ing until after the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
mock_deploy.return_value = None
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ERROR,
target_provision_state=states.NOSTATE)
self.service.do_node_deploy(self.context, node.uuid, rebuild=True)
self._stop_service()
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
# last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
self.assertIsNone(node.driver_internal_info['deploy_steps'])
def test_do_node_deploy_rebuild_from_available_state(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
# test node will not rebuild if state is AVAILABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.AVAILABLE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node['uuid'], rebuild=True)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
# Last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
self.assertFalse(mock_iwdi.called)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
def test_do_node_deploy_rebuild_protected(self, mock_iwdi):
mock_iwdi.return_value = False
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
protected=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node['uuid'], rebuild=True)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeProtected, exc.exc_info[0])
# Last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
self.assertFalse(mock_iwdi.called)
def test_do_node_deploy_worker_pool_full(self, mock_iwdi):
mock_iwdi.return_value = False
prv_state = states.AVAILABLE
tgt_prv_state = states.NOSTATE
node = obj_utils.create_test_node(self.context,
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver='fake-hardware')
self._start_service()
with mock.patch.object(self.service, '_spawn_worker',
autospec=True) as mock_spawn:
mock_spawn.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_deploy,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
node.refresh()
# Make sure things were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
@mgr_utils.mock_record_keepalive
class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def setUp(self):
super(ContinueNodeDeployTestCase, self).setUp()
self.deploy_start = {
'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'}
self.deploy_end = {
'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'}
self.in_band_step = {
'step': 'deploy_middle', 'priority': 30, 'interface': 'deploy'}
self.deploy_steps = [self.deploy_start, self.deploy_end]
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_worker_pool_full(self, mock_spawn):
# Test the appropriate exception is raised if the worker pool is full
prv_state = states.DEPLOYWAIT
tgt_prv_state = states.ACTIVE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None)
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
self.assertRaises(exception.NoFreeConductorWorker,
self.service.continue_node_deploy,
self.context, node.uuid)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_wrong_state(self, mock_spawn):
# Test the appropriate exception is raised if node isn't already
# in DEPLOYWAIT state
prv_state = states.DEPLOYFAIL
tgt_prv_state = states.ACTIVE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None)
self._start_service()
self.assertRaises(exception.InvalidStateRequested,
self.service.continue_node_deploy,
self.context, node.uuid)
self._stop_service()
node.refresh()
# Make sure node wasn't modified
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy(self, mock_spawn):
# test a node can continue deploying via RPC
prv_state = states.DEPLOYWAIT
tgt_prv_state = states.ACTIVE
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0,
'steps_validated': True}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver_internal_info=driver_info,
deploy_step=self.deploy_steps[0])
self._start_service()
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
mock_spawn.assert_called_with(mock.ANY,
deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_first_agent_boot(self, mock_spawn,
mock_get_steps):
new_steps = [self.deploy_start, self.in_band_step, self.deploy_end]
mock_get_steps.return_value = new_steps
prv_state = states.DEPLOYWAIT
tgt_prv_state = states.ACTIVE
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver_internal_info=driver_info,
deploy_step=self.deploy_steps[0])
self._start_service()
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertTrue(node.driver_internal_info['steps_validated'])
self.assertEqual(new_steps, node.driver_internal_info['deploy_steps'])
mock_spawn.assert_called_with(mock.ANY,
deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
@mock.patch.object(task_manager.TaskManager, 'process_event',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_deprecated(self, mock_spawn, mock_event):
# TODO(rloo): delete this when we remove support for handling
# deploy steps; node will always be in DEPLOYWAIT then.
# test a node can continue deploying via RPC
prv_state = states.DEPLOYING
tgt_prv_state = states.ACTIVE
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0,
'steps_validated': True}
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver_internal_info=driver_info,
deploy_step=self.deploy_steps[0])
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
mock_spawn.assert_called_with(mock.ANY,
deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
self.assertFalse(mock_event.called)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def _continue_node_deploy_skip_step(self, mock_spawn, skip=True):
# test that skipping current step mechanism works
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0,
'steps_validated': True}
if not skip:
driver_info['skip_current_deploy_step'] = skip
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYWAIT,
target_provision_state=states.MANAGEABLE,
driver_internal_info=driver_info, deploy_step=self.deploy_steps[0])
self._start_service()
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
if skip:
expected_step_index = 1
else:
self.assertNotIn(
'skip_current_deploy_step', node.driver_internal_info)
expected_step_index = 0
mock_spawn.assert_called_with(mock.ANY,
deployments.do_next_deploy_step,
mock.ANY, expected_step_index, mock.ANY)
def test_continue_node_deploy_skip_step(self):
self._continue_node_deploy_skip_step()
def test_continue_node_deploy_no_skip_step(self):
self._continue_node_deploy_skip_step(skip=False)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_polling(self, mock_spawn):
# test that deployment_polling flag is cleared
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0,
'deployment_polling': True,
'steps_validated': True}
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYWAIT,
target_provision_state=states.MANAGEABLE,
driver_internal_info=driver_info, deploy_step=self.deploy_steps[0])
self._start_service()
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertNotIn('deployment_polling', node.driver_internal_info)
mock_spawn.assert_called_with(mock.ANY,
deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
@mock.patch.object(conductor_steps, 'validate_deploy_templates',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_steps_validation(self, mock_spawn, mock_validate):
prv_state = states.DEPLOYWAIT
tgt_prv_state = states.ACTIVE
mock_validate.side_effect = exception.InvalidParameterValue('boom')
driver_info = {'deploy_steps': self.deploy_steps,
'deploy_step_index': 0,
'steps_validated': False}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver_internal_info=driver_info,
deploy_step=self.deploy_steps[0])
self._start_service()
mock_spawn.reset_mock()
self.service.continue_node_deploy(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
self.assertIn('Failed to validate the final deploy steps',
node.last_error)
self.assertIn('boom', node.last_error)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertFalse(mock_spawn.called)
@mgr_utils.mock_record_keepalive
class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.clean_up',
autospec=True)
def test__check_deploy_timeouts(self, mock_cleanup):
self._start_service()
CONF.set_override('deploy_callback_timeout', 1, group='conductor')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE,
provision_updated_at=datetime.datetime(2000, 1, 1, 0, 0))
self.service._check_deploy_timeouts(self.context)
self._stop_service()
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY)
def _check_cleanwait_timeouts(self, manual=False):
self._start_service()
CONF.set_override('clean_callback_timeout', 1, group='conductor')
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=tgt_prov_state,
provision_updated_at=datetime.datetime(2000, 1, 1, 0, 0),
clean_step={
'interface': 'deploy',
'step': 'erase_devices'},
driver_internal_info={
'cleaning_reboot': manual,
'clean_step_index': 0})
self.service._check_cleanwait_timeouts(self.context)
self._stop_service()
node.refresh()
self.assertEqual(states.CLEANFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Test that cleaning parameters have been purged in order
# to prevent looping of the cleaning sequence
self.assertEqual({}, node.clean_step)
self.assertNotIn('clean_step_index', node.driver_internal_info)
self.assertNotIn('cleaning_reboot', node.driver_internal_info)
def test__check_cleanwait_timeouts_automated_clean(self):
self._check_cleanwait_timeouts()
def test__check_cleanwait_timeouts_manual_clean(self):
self._check_cleanwait_timeouts(manual=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up',
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_check_rescuewait_timeouts(self, node_power_mock,
mock_clean_up):
self._start_service()
CONF.set_override('rescue_callback_timeout', 1, group='conductor')
tgt_prov_state = states.RESCUE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
rescue_interface='fake',
network_interface='flat',
provision_state=states.RESCUEWAIT,
target_provision_state=tgt_prov_state,
provision_updated_at=datetime.datetime(2000, 1, 1, 0, 0))
self.service._check_rescuewait_timeouts(self.context)
self._stop_service()
node.refresh()
self.assertEqual(states.RESCUEFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
self.assertIn('Timeout reached while waiting for rescue ramdisk',
node.last_error)
mock_clean_up.assert_called_once_with(mock.ANY, mock.ANY)
node_power_mock.assert_called_once_with(mock.ANY, states.POWER_OFF)
@mgr_utils.mock_record_keepalive
class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_do_node_tear_down_invalid_state(self):
self._start_service()
# test node.provision_state is incorrect for tear_down
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.AVAILABLE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_tear_down,
self.context, node['uuid'])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
def test_do_node_tear_down_protected(self):
self._start_service()
# test node.provision_state is incorrect for tear_down
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
protected=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_tear_down,
self.context, node['uuid'])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeProtected, exc.exc_info[0])
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_tear_down_validate_fail(self, mock_validate):
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_tear_down,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceDeployFailure, exc.exc_info[0])
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
autospec=True)
def test_do_node_tear_down_driver_raises_error(self, mock_tear_down):
# test when driver.deploy.tear_down raises exception
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DELETING,
target_provision_state=states.AVAILABLE,
instance_info={'foo': 'bar'},
driver_internal_info={'is_whole_disk_image': False})
task = task_manager.TaskManager(self.context, node.uuid)
self._start_service()
mock_tear_down.side_effect = exception.InstanceDeployFailure('test')
self.assertRaises(exception.InstanceDeployFailure,
self.service._do_node_tear_down, task,
node.provision_state)
node.refresh()
self.assertEqual(states.ERROR, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Assert instance_info was erased
self.assertEqual({}, node.instance_info)
mock_tear_down.assert_called_once_with(mock.ANY, task)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console',
autospec=True)
def test_do_node_tear_down_console_raises_error(self, mock_console):
# test when _set_console_mode raises exception
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DELETING,
target_provision_state=states.AVAILABLE,
instance_info={'foo': 'bar'},
console_enabled=True,
driver_internal_info={'is_whole_disk_image': False})
task = task_manager.TaskManager(self.context, node.uuid)
self._start_service()
mock_console.side_effect = exception.ConsoleError('test')
self.assertRaises(exception.ConsoleError,
self.service._do_node_tear_down, task,
node.provision_state)
node.refresh()
self.assertEqual(states.ERROR, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Assert instance_info was erased
self.assertEqual({}, node.instance_info)
mock_console.assert_called_once_with(mock.ANY, task)
# TODO(TheJulia): Since we're functionally bound to neutron support
# by default, the fake drivers still invoke neutron.
@mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console',
autospec=True)
@mock.patch('ironic.common.neutron.unbind_neutron_port', autospec=True)
@mock.patch('ironic.conductor.cleaning.do_node_clean', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
autospec=True)
def _test__do_node_tear_down_ok(self, mock_tear_down, mock_clean,
mock_unbind, mock_console,
enabled_console=False,
with_allocation=False):
# test when driver.deploy.tear_down succeeds
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DELETING,
target_provision_state=states.AVAILABLE,
instance_uuid=(uuidutils.generate_uuid()
if not with_allocation else None),
instance_info={'foo': 'bar'},
console_enabled=enabled_console,
driver_internal_info={'is_whole_disk_image': False,
'deploy_steps': {},
'root_uuid_or_disk_id': 'foo',
'instance': {'ephemeral_gb': 10}})
port = obj_utils.create_test_port(
self.context, node_id=node.id,
internal_info={'tenant_vif_port_id': 'foo'})
if with_allocation:
alloc = obj_utils.create_test_allocation(self.context)
# Establish cross-linking between the node and the allocation
alloc.node_id = node.id
alloc.save()
node.refresh()
task = task_manager.TaskManager(self.context, node.uuid)
self._start_service()
self.service._do_node_tear_down(task, node.provision_state)
node.refresh()
port.refresh()
# Node will be moved to AVAILABLE after cleaning, not tested here
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(states.AVAILABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertIsNone(node.instance_uuid)
self.assertIsNone(node.allocation_id)
self.assertEqual({}, node.instance_info)
self.assertNotIn('instance', node.driver_internal_info)
self.assertIsNone(node.driver_internal_info['deploy_steps'])
self.assertNotIn('root_uuid_or_disk_id', node.driver_internal_info)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
mock_tear_down.assert_called_once_with(task.driver.deploy, task)
mock_clean.assert_called_once_with(task)
self.assertEqual({}, port.internal_info)
mock_unbind.assert_called_once_with('foo', context=mock.ANY)
if enabled_console:
mock_console.assert_called_once_with(task.driver.console, task)
else:
self.assertFalse(mock_console.called)
if with_allocation:
self.assertRaises(exception.AllocationNotFound,
objects.Allocation.get_by_id,
self.context, alloc.id)
def test__do_node_tear_down_ok_without_console(self):
self._test__do_node_tear_down_ok(enabled_console=False)
def test__do_node_tear_down_ok_with_console(self):
self._test__do_node_tear_down_ok(enabled_console=True)
def test__do_node_tear_down_with_allocation(self):
self._test__do_node_tear_down_ok(with_allocation=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up',
autospec=True)
@mock.patch('ironic.conductor.cleaning.do_node_clean', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
autospec=True)
def _test_do_node_tear_down_from_state(self, init_state, is_rescue_state,
mock_tear_down, mock_clean,
mock_rescue_clean):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
uuid=uuidutils.generate_uuid(),
provision_state=init_state,
target_provision_state=states.AVAILABLE,
driver_internal_info={'is_whole_disk_image': False})
self._start_service()
self.service.do_node_tear_down(self.context, node.uuid)
self._stop_service()
node.refresh()
# Node will be moved to AVAILABLE after cleaning, not tested here
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(states.AVAILABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertEqual({}, node.instance_info)
mock_tear_down.assert_called_once_with(mock.ANY, mock.ANY)
mock_clean.assert_called_once_with(mock.ANY)
if is_rescue_state:
mock_rescue_clean.assert_called_once_with(mock.ANY, mock.ANY)
else:
self.assertFalse(mock_rescue_clean.called)
def test__do_node_tear_down_from_valid_states(self):
valid_states = [states.ACTIVE, states.DEPLOYWAIT, states.DEPLOYFAIL,
states.ERROR]
for state in valid_states:
self._test_do_node_tear_down_from_state(state, False)
valid_rescue_states = [states.RESCUEWAIT, states.RESCUE,
states.UNRESCUEFAIL, states.RESCUEFAIL]
for state in valid_rescue_states:
self._test_do_node_tear_down_from_state(state, True)
# NOTE(tenbrae): partial tear-down was broken. A node left in a state of
# DELETING could not have tear_down called on it a second
# time Thus, I have removed the unit test, which faultily
# asserted only that a node could be left in a state of
# incomplete deletion -- not that such a node's deletion
# could later be completed.
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_node_tear_down_worker_pool_full(self, mock_spawn):
prv_state = states.ACTIVE
tgt_prv_state = states.NOSTATE
fake_instance_info = {'foo': 'bar'}
driver_internal_info = {'is_whole_disk_image': False}
node = obj_utils.create_test_node(
self.context, driver='fake-hardware', provision_state=prv_state,
target_provision_state=tgt_prv_state,
instance_info=fake_instance_info,
driver_internal_info=driver_internal_info, last_error=None)
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_tear_down,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
node.refresh()
# Assert instance_info/driver_internal_info was not touched
self.assertEqual(fake_instance_info, node.instance_info)
self.assertEqual(driver_internal_info, node.driver_internal_info)
# Make sure things were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mgr_utils.mock_record_keepalive
class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provisioning_action_worker_pool_full(self, mock_spawn):
prv_state = states.MANAGEABLE
tgt_prv_state = states.NOSTATE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None)
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_provisioning_action,
self.context, node.uuid, 'provide')
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
node.refresh()
# Make sure things were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provision_action_provide(self, mock_spawn):
# test when a node is cleaned going from manageable to available
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=states.AVAILABLE)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'provide')
node.refresh()
# Node will be moved to AVAILABLE after cleaning, not tested here
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(states.AVAILABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(self.service,
cleaning.do_node_clean, mock.ANY)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provision_action_provide_in_maintenance(self, mock_spawn):
CONF.set_override('allow_provisioning_in_maintenance', False,
group='conductor')
# test when a node is cleaned going from manageable to available
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=None,
maintenance=True)
self._start_service()
mock_spawn.reset_mock()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_provisioning_action,
self.context, node.uuid, 'provide')
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeInMaintenance, exc.exc_info[0])
node.refresh()
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertFalse(mock_spawn.called)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provision_action_manage(self, mock_spawn):
# test when a node is verified going from enroll to manageable
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ENROLL,
target_provision_state=states.MANAGEABLE)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'manage')
node.refresh()
# Node will be moved to MANAGEABLE after verification, not tested here
self.assertEqual(states.VERIFYING, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(self.service,
self.service._do_node_verify, mock.ANY)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def _do_provision_action_abort(self, mock_spawn, manual=False):
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=tgt_prov_state)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'abort')
node.refresh()
# Node will be moved to tgt_prov_state after cleaning, not tested here
self.assertEqual(states.CLEANFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean_abort, mock.ANY)
def test_do_provision_action_abort_automated_clean(self):
self._do_provision_action_abort()
def test_do_provision_action_abort_manual_clean(self):
self._do_provision_action_abort(manual=True)
def test_do_provision_action_abort_clean_step_not_abortable(self):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=states.AVAILABLE,
clean_step={'step': 'foo', 'abortable': False})
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'abort')
node.refresh()
# Assert the current clean step was marked to be aborted later
self.assertIn('abort_after', node.clean_step)
self.assertTrue(node.clean_step['abort_after'])
# Make sure things stays as it was before
self.assertEqual(states.CLEANWAIT, node.provision_state)
self.assertEqual(states.AVAILABLE, node.target_provision_state)
@mgr_utils.mock_record_keepalive
class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(DoNodeCleanTestCase, self).setUp()
self.config(automated_clean=True, group='conductor')
self.power_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'power'}
self.deploy_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'}
self.deploy_erase = {
'step': 'erase_disks', 'priority': 20, 'interface': 'deploy'}
# Automated cleaning should be executed in this order
self.clean_steps = [self.deploy_erase, self.power_update,
self.deploy_update]
self.next_clean_step_index = 1
# Manual clean step
self.deploy_raid = {
'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_maintenance(self, mock_validate):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=states.NOSTATE,
maintenance=True, maintenance_reason='reason')
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_clean,
self.context, node.uuid, [])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeInMaintenance, exc.exc_info[0])
self.assertFalse(mock_validate.called)
@mock.patch('ironic.conductor.task_manager.TaskManager.process_event',
autospec=True)
def _test_do_node_clean_validate_fail(self, mock_validate, mock_process):
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=states.NOSTATE)
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_clean,
self.context, node.uuid, [])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_process.called)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_power_validate_fail(self, mock_validate):
self._test_do_node_clean_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test_do_node_clean_network_validate_fail(self, mock_validate):
self._test_do_node_clean_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_invalid_state(self, mock_power_valid,
mock_network_valid):
# test node.provision_state is incorrect for clean
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ENROLL,
target_provision_state=states.NOSTATE)
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_clean,
self.context, node.uuid, [])
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
node.refresh()
self.assertNotIn('clean_steps', node.driver_internal_info)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_ok(self, mock_power_valid, mock_network_valid,
mock_spawn):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=states.NOSTATE, last_error='old error')
self._start_service()
clean_steps = [self.deploy_raid]
self.service.do_node_clean(self.context, node.uuid, clean_steps)
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Node will be moved to CLEANING
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertNotIn('clean_steps', node.driver_internal_info)
self.assertIsNone(node.last_error)
@mock.patch('ironic.conductor.utils.remove_agent_url', autospec=True)
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_ok_fast_track(
self, mock_power_valid, mock_network_valid, mock_spawn,
mock_is_fast_track, mock_remove_agent_url):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
driver_internal_info={'agent_url': 'meow'})
mock_is_fast_track.return_value = True
self._start_service()
clean_steps = [self.deploy_raid]
self.service.do_node_clean(self.context, node.uuid, clean_steps)
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Node will be moved to CLEANING
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertNotIn('clean_steps', node.driver_internal_info)
mock_is_fast_track.assert_called_once_with(mock.ANY)
mock_remove_agent_url.assert_not_called()
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_clean_worker_pool_full(self, mock_power_valid,
mock_network_valid, mock_spawn):
prv_state = states.MANAGEABLE
tgt_prv_state = states.NOSTATE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware', provision_state=prv_state,
target_provision_state=tgt_prv_state)
self._start_service()
clean_steps = [self.deploy_raid]
mock_spawn.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_clean,
self.context, node.uuid, clean_steps)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Make sure states were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
self.assertIsNone(node.reservation)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_clean_worker_pool_full(self, mock_spawn):
# Test the appropriate exception is raised if the worker pool is full
prv_state = states.CLEANWAIT
tgt_prv_state = states.AVAILABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None)
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
self.assertRaises(exception.NoFreeConductorWorker,
self.service.continue_node_clean,
self.context, node.uuid)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_clean_wrong_state(self, mock_spawn):
# Test the appropriate exception is raised if node isn't already
# in CLEANWAIT state
prv_state = states.ACTIVE
tgt_prv_state = states.AVAILABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None)
self._start_service()
self.assertRaises(exception.InvalidStateRequested,
self.service.continue_node_clean,
self.context, node.uuid)
self._stop_service()
node.refresh()
# Make sure things were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def _continue_node_clean(self, return_state, mock_spawn, manual=False):
# test a node can continue cleaning via RPC
prv_state = return_state
tgt_prv_state = states.MANAGEABLE if manual else states.AVAILABLE
driver_info = {'clean_steps': self.clean_steps,
'clean_step_index': 0}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver_internal_info=driver_info,
clean_step=self.clean_steps[0])
self._start_service()
self.service.continue_node_clean(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
mock_spawn.assert_called_with(self.service,
cleaning.do_next_clean_step,
mock.ANY, self.next_clean_step_index)
def test_continue_node_clean_automated(self):
self._continue_node_clean(states.CLEANWAIT)
def test_continue_node_clean_manual(self):
self._continue_node_clean(states.CLEANWAIT, manual=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def _continue_node_clean_skip_step(self, mock_spawn, skip=True):
# test that skipping current step mechanism works
driver_info = {'clean_steps': self.clean_steps,
'clean_step_index': 0}
if not skip:
driver_info['skip_current_clean_step'] = skip
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=states.MANAGEABLE,
driver_internal_info=driver_info, clean_step=self.clean_steps[0])
self._start_service()
self.service.continue_node_clean(self.context, node.uuid)
self._stop_service()
node.refresh()
if skip:
expected_step_index = 1
else:
self.assertNotIn(
'skip_current_clean_step', node.driver_internal_info)
expected_step_index = 0
mock_spawn.assert_called_with(self.service,
cleaning.do_next_clean_step,
mock.ANY, expected_step_index)
def test_continue_node_clean_skip_step(self):
self._continue_node_clean_skip_step()
def test_continue_node_clean_no_skip_step(self):
self._continue_node_clean_skip_step(skip=False)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_clean_polling(self, mock_spawn):
# test that cleaning_polling flag is cleared
driver_info = {'clean_steps': self.clean_steps,
'clean_step_index': 0,
'cleaning_polling': True}
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=states.MANAGEABLE,
driver_internal_info=driver_info, clean_step=self.clean_steps[0])
self._start_service()
self.service.continue_node_clean(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertNotIn('cleaning_polling', node.driver_internal_info)
mock_spawn.assert_called_with(self.service,
cleaning.do_next_clean_step,
mock.ANY, 1)
def _continue_node_clean_abort(self, manual=False):
last_clean_step = self.clean_steps[0]
last_clean_step['abortable'] = False
last_clean_step['abort_after'] = True
driver_info = {'clean_steps': self.clean_steps,
'clean_step_index': 0}
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=tgt_prov_state, last_error=None,
driver_internal_info=driver_info, clean_step=self.clean_steps[0])
self._start_service()
self.service.continue_node_clean(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(states.CLEANFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# assert the clean step name is in the last error message
self.assertIn(self.clean_steps[0]['step'], node.last_error)
def test_continue_node_clean_automated_abort(self):
self._continue_node_clean_abort()
def test_continue_node_clean_manual_abort(self):
self._continue_node_clean_abort(manual=True)
def _continue_node_clean_abort_last_clean_step(self, manual=False):
last_clean_step = self.clean_steps[0]
last_clean_step['abortable'] = False
last_clean_step['abort_after'] = True
driver_info = {'clean_steps': [self.clean_steps[0]],
'clean_step_index': 0}
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.CLEANWAIT,
target_provision_state=tgt_prov_state, last_error=None,
driver_internal_info=driver_info, clean_step=self.clean_steps[0])
self._start_service()
self.service.continue_node_clean(self.context, node.uuid)
self._stop_service()
node.refresh()
self.assertEqual(tgt_prov_state, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertIsNone(node.last_error)
def test_continue_node_clean_automated_abort_last_clean_step(self):
self._continue_node_clean_abort_last_clean_step()
def test_continue_node_clean_manual_abort_last_clean_step(self):
self._continue_node_clean_abort_last_clean_step(manual=True)
class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_node_rescue(self, mock_acquire):
self._start_service()
dii = {'agent_secret_token': 'token',
'agent_url': 'http://url',
'other field': 'value'}
task = self._create_task(
node_attrs=dict(driver='fake-hardware',
provision_state=states.ACTIVE,
instance_info={},
driver_internal_info=dii))
mock_acquire.side_effect = self._get_acquire_side_effect(task)
self.service.do_node_rescue(self.context, task.node.uuid,
"password")
task.process_event.assert_called_once_with(
'rescue',
callback=self.service._spawn_worker,
call_args=(self.service._do_node_rescue, task),
err_handler=conductor_utils.spawn_rescue_error_handler)
self.assertIn('rescue_password', task.node.instance_info)
self.assertIn('hashed_rescue_password', task.node.instance_info)
self.assertEqual({'other field': 'value'},
task.node.driver_internal_info)
def test_do_node_rescue_invalid_state(self):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop',
provision_state=states.AVAILABLE,
instance_info={})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_rescue,
self.context, node.uuid, "password")
node.refresh()
self.assertNotIn('rescue_password', node.instance_info)
self.assertNotIn('hashed_rescue_password', node.instance_info)
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
def _test_do_node_rescue_when_validate_fail(self, mock_validate):
# InvalidParameterValue should be re-raised as InstanceRescueFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE,
instance_info={})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_rescue,
self.context, node.uuid, "password")
node.refresh()
self.assertNotIn('hashed_rescue_password', node.instance_info)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceRescueFailure, exc.exc_info[0])
@mock.patch('ironic.drivers.modules.fake.FakeRescue.validate',
autospec=True)
def test_do_node_rescue_when_rescue_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_rescue_when_power_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test_do_node_rescue_when_network_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
def test_do_node_rescue_maintenance(self):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
network_interface='noop',
provision_state=states.ACTIVE,
maintenance=True,
target_provision_state=states.NOSTATE,
instance_info={})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_rescue,
self.context, node['uuid'], "password")
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeInMaintenance, exc.exc_info[0])
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_returns_rescuewait(self, mock_rescue):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUING,
instance_info={'rescue_password': 'password',
'hashed_rescue_password': '1234'})
with task_manager.TaskManager(self.context, node.uuid) as task:
mock_rescue.return_value = states.RESCUEWAIT
self.service._do_node_rescue(task)
node.refresh()
self.assertEqual(states.RESCUEWAIT, node.provision_state)
self.assertEqual(states.RESCUE, node.target_provision_state)
self.assertIn('rescue_password', node.instance_info)
self.assertIn('hashed_rescue_password', node.instance_info)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_returns_rescue(self, mock_rescue):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUING,
instance_info={
'rescue_password': 'password',
'hashed_rescue_password': '1234'})
with task_manager.TaskManager(self.context, node.uuid) as task:
mock_rescue.return_value = states.RESCUE
self.service._do_node_rescue(task)
node.refresh()
self.assertEqual(states.RESCUE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIn('rescue_password', node.instance_info)
self.assertIn('hashed_rescue_password', node.instance_info)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_errors(self, mock_rescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUING,
instance_info={
'rescue_password': 'password',
'hashed_rescue_password': '1234'})
mock_rescue.side_effect = exception.InstanceRescueFailure(
'failed to rescue')
with task_manager.TaskManager(self.context, node.uuid) as task:
self.assertRaises(exception.InstanceRescueFailure,
self.service._do_node_rescue, task)
node.refresh()
self.assertEqual(states.RESCUEFAIL, node.provision_state)
self.assertEqual(states.RESCUE, node.target_provision_state)
self.assertNotIn('rescue_password', node.instance_info)
self.assertNotIn('hashed_rescue_password', node.instance_info)
self.assertTrue(node.last_error.startswith('Failed to rescue'))
self.assertTrue(mock_log.error.called)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_bad_state(self, mock_rescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUING,
instance_info={
'rescue_password': 'password',
'hashed_rescue_password': '1234'})
mock_rescue.return_value = states.ACTIVE
with task_manager.TaskManager(self.context, node.uuid) as task:
self.service._do_node_rescue(task)
node.refresh()
self.assertEqual(states.RESCUEFAIL, node.provision_state)
self.assertEqual(states.RESCUE, node.target_provision_state)
self.assertNotIn('rescue_password', node.instance_info)
self.assertNotIn('hashed_rescue_password', node.instance_info)
self.assertTrue(node.last_error.startswith('Failed to rescue'))
self.assertTrue(mock_log.error.called)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_node_unrescue(self, mock_acquire):
self._start_service()
task = self._create_task(
node_attrs=dict(driver='fake-hardware',
provision_state=states.RESCUE,
driver_internal_info={'agent_url': 'url'}))
mock_acquire.side_effect = self._get_acquire_side_effect(task)
self.service.do_node_unrescue(self.context, task.node.uuid)
task.node.refresh()
self.assertNotIn('agent_url', task.node.driver_internal_info)
task.process_event.assert_called_once_with(
'unrescue',
callback=self.service._spawn_worker,
call_args=(self.service._do_node_unrescue, task),
err_handler=conductor_utils.provisioning_error_handler)
def test_do_node_unrescue_invalid_state(self):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.AVAILABLE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_unrescue,
self.context, node.uuid)
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_unrescue_validate_fail(self, mock_validate):
# InvalidParameterValue should be re-raised as InstanceUnrescueFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUE,
target_provision_state=states.NOSTATE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_unrescue,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceUnrescueFailure, exc.exc_info[0])
def test_do_node_unrescue_maintenance(self):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUE,
maintenance=True,
target_provision_state=states.NOSTATE,
instance_info={})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_node_unrescue,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeInMaintenance, exc.exc_info[0])
# This is a sync operation last_error should be None.
node.refresh()
self.assertIsNone(node.last_error)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
autospec=True)
def test__do_node_unrescue(self, mock_unrescue):
self._start_service()
dii = {'agent_url': 'http://url',
'agent_secret_token': 'token',
'other field': 'value'}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.UNRESCUING,
target_provision_state=states.ACTIVE,
instance_info={},
driver_internal_info=dii)
with task_manager.TaskManager(self.context, node.uuid) as task:
mock_unrescue.return_value = states.ACTIVE
self.service._do_node_unrescue(task)
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertEqual({'other field': 'value'},
node.driver_internal_info)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
autospec=True)
def test__do_node_unrescue_ironic_error(self, mock_unrescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.UNRESCUING,
target_provision_state=states.ACTIVE,
instance_info={})
mock_unrescue.side_effect = exception.InstanceUnrescueFailure(
'Unable to unrescue')
with task_manager.TaskManager(self.context, node.uuid) as task:
self.assertRaises(exception.InstanceUnrescueFailure,
self.service._do_node_unrescue, task)
node.refresh()
self.assertEqual(states.UNRESCUEFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertTrue('Unable to unrescue' in node.last_error)
self.assertTrue(mock_log.error.called)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
autospec=True)
def test__do_node_unrescue_other_error(self, mock_unrescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.UNRESCUING,
target_provision_state=states.ACTIVE,
instance_info={})
mock_unrescue.side_effect = RuntimeError('Some failure')
with task_manager.TaskManager(self.context, node.uuid) as task:
self.assertRaises(RuntimeError,
self.service._do_node_unrescue, task)
node.refresh()
self.assertEqual(states.UNRESCUEFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertTrue('Some failure' in node.last_error)
self.assertTrue(mock_log.exception.called)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
autospec=True)
def test__do_node_unrescue_bad_state(self, mock_unrescue):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.UNRESCUING,
instance_info={})
mock_unrescue.return_value = states.RESCUEWAIT
with task_manager.TaskManager(self.context, node.uuid) as task:
self.service._do_node_unrescue(task)
node.refresh()
self.assertEqual(states.UNRESCUEFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertTrue('Driver returned unexpected state' in
node.last_error)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_provision_rescue_abort(self, mock_spawn):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUEWAIT,
target_provision_state=states.RESCUE,
instance_info={'rescue_password': 'password'})
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'abort')
node.refresh()
self.assertEqual(states.RESCUEFAIL, node.provision_state)
self.assertIsNone(node.last_error)
self.assertNotIn('rescue_password', node.instance_info)
mock_spawn.assert_called_with(
self.service, self.service._do_node_rescue_abort, mock.ANY)
@mock.patch.object(fake.FakeRescue, 'clean_up', autospec=True)
def test__do_node_rescue_abort(self, clean_up_mock):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUEFAIL,
target_provision_state=states.RESCUE,
driver_internal_info={'agent_url': 'url'})
with task_manager.acquire(self.context, node.uuid) as task:
self.service._do_node_rescue_abort(task)
clean_up_mock.assert_called_once_with(task.driver.rescue, task)
self.assertIsNotNone(task.node.last_error)
self.assertFalse(task.node.maintenance)
self.assertNotIn('agent_url', task.node.driver_internal_info)
@mock.patch.object(fake.FakeRescue, 'clean_up', autospec=True)
def test__do_node_rescue_abort_clean_up_fail(self, clean_up_mock):
clean_up_mock.side_effect = Exception('Surprise')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.RESCUEFAIL)
with task_manager.acquire(self.context, node.uuid) as task:
self.service._do_node_rescue_abort(task)
clean_up_mock.assert_called_once_with(task.driver.rescue, task)
self.assertIsNotNone(task.node.last_error)
self.assertIsNotNone(task.node.maintenance_reason)
self.assertTrue(task.node.maintenance)
self.assertEqual('rescue abort failure',
task.node.fault)
@mgr_utils.mock_record_keepalive
class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_verify(self, mock_validate, mock_get_power_state,
mock_notif):
self._start_service()
mock_get_power_state.return_value = states.POWER_OFF
# Required for exception handling
mock_notif.__name__ = 'NodeCorrectedPowerStateNotification'
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.VERIFYING,
target_provision_state=states.MANAGEABLE,
last_error=None,
power_state=states.NOSTATE)
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
self.service._do_node_verify(task)
self._stop_service()
# 1 notification should be sent -
# baremetal.node.power_state_corrected.success
mock_notif.assert_called_once_with(publisher=mock.ANY,
event_type=mock.ANY,
level=mock.ANY,
payload=mock.ANY)
mock_notif.return_value.emit.assert_called_once_with(mock.ANY)
node.refresh()
mock_validate.assert_called_once_with(mock.ANY, task)
mock_get_power_state.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertEqual(states.POWER_OFF, node.power_state)
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_verify_validation_fails(self, mock_validate,
mock_get_power_state):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.VERIFYING,
target_provision_state=states.MANAGEABLE,
last_error=None,
power_state=states.NOSTATE)
mock_validate.side_effect = RuntimeError("boom")
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
self.service._do_node_verify(task)
self._stop_service()
node.refresh()
mock_validate.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ENROLL, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertTrue(node.last_error)
self.assertFalse(mock_get_power_state.called)
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_verify_get_state_fails(self, mock_validate,
mock_get_power_state):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.VERIFYING,
target_provision_state=states.MANAGEABLE,
last_error=None,
power_state=states.NOSTATE)
mock_get_power_state.side_effect = RuntimeError("boom")
with task_manager.acquire(
self.context, node['id'], shared=False) as task:
self.service._do_node_verify(task)
self._stop_service()
node.refresh()
mock_get_power_state.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ENROLL, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertTrue(node.last_error)
@mgr_utils.mock_record_keepalive
class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
db_base.DbTestCase):
def test__mapped_to_this_conductor(self):
self._start_service()
n = db_utils.get_test_node()
self.assertTrue(self.service._mapped_to_this_conductor(
n['uuid'], 'fake-hardware', ''))
self.assertFalse(self.service._mapped_to_this_conductor(
n['uuid'], 'fake-hardware', 'foogroup'))
self.assertFalse(self.service._mapped_to_this_conductor(n['uuid'],
'otherdriver',
''))
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_dynamic_driver_interfaces(self, mock_iwdi):
mock_iwdi.return_value = False
target_raid_config = {'logical_disks': [{'size_gb': 1,
'raid_level': '1'}]}
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
target_raid_config=target_raid_config,
network_interface='noop')
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
expected = {'console': {'result': True},
'power': {'result': True},
'inspect': {'result': True},
'management': {'result': True},
'boot': {'result': True},
'raid': {'result': True},
'deploy': {'result': True},
'network': {'result': True},
'storage': {'result': True},
'rescue': {'result': True},
'bios': {'result': True}}
self.assertEqual(expected, ret)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail(self, mock_iwdi,
mock_val):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
reason = 'fake reason'
mock_val.side_effect = exception.InvalidParameterValue(reason)
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_unexpected(
self, mock_iwdi, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = Exception('boom')
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
reason = ('Unexpected exception, traceback saved '
'into log by ironic conductor service '
'that is running on test-host: boom')
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_instance_traits(
self, mock_iwdi):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
with mock.patch(
'ironic.conductor.utils.validate_instance_info_traits',
autospec=True) as ii_traits:
reason = 'fake reason'
ii_traits.side_effect = exception.InvalidParameterValue(reason)
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_deploy_templates(
self, mock_iwdi):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
with mock.patch(
'ironic.conductor.steps.validate_deploy_templates',
autospec=True) as mock_validate:
reason = 'fake reason'
mock_validate.side_effect = exception.InvalidParameterValue(reason)
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test_iter_nodes(self, mock_nodeinfo_list, mock_mapped,
mock_fail_if_state):
self._start_service()
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
nodes = [self._create_node(id=i, driver='fake-hardware',
conductor_group='')
for i in range(2)]
mock_nodeinfo_list.return_value = self._get_nodeinfo_list_response(
nodes)
mock_mapped.side_effect = [True, False]
result = list(self.service.iter_nodes(fields=['id'],
filters=mock.sentinel.filters))
self.assertEqual([(nodes[0].uuid, 'fake-hardware', '', 0)], result)
mock_nodeinfo_list.assert_called_once_with(
columns=self.columns, filters=mock.sentinel.filters)
expected_calls = [mock.call(mock.ANY, mock.ANY,
{'provision_state': 'deploying',
'reserved': False},
'deploying',
'provision_updated_at',
last_error=mock.ANY),
mock.call(mock.ANY, mock.ANY,
{'provision_state': 'cleaning',
'reserved': False},
'cleaning',
'provision_updated_at',
last_error=mock.ANY)]
mock_fail_if_state.assert_has_calls(expected_calls)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test_iter_nodes_shutdown(self, mock_nodeinfo_list):
self._start_service()
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
nodes = [self._create_node(driver='fake-hardware')]
mock_nodeinfo_list.return_value = self._get_nodeinfo_list_response(
nodes)
self.service._shutdown = True
result = list(self.service.iter_nodes(fields=['id'],
filters=mock.sentinel.filters))
self.assertEqual([], result)
@mgr_utils.mock_record_keepalive
class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_set_console_mode_worker_pool_full(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
with mock.patch.object(self.service,
'_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.set_console_mode,
self.context, node.uuid, True)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
spawn_mock.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_set_console_mode_enabled(self, mock_notify):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
self.service.set_console_mode(self.context, node.uuid, True)
self._stop_service()
node.refresh()
self.assertTrue(node.console_enabled)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.END)])
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_set_console_mode_disabled(self, mock_notify):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
self._start_service()
self.service.set_console_mode(self.context, node.uuid, False)
self._stop_service()
node.refresh()
self.assertFalse(node.console_enabled)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.END)])
@mock.patch.object(fake.FakeConsole, 'validate', autospec=True)
def test_set_console_mode_validation_fail(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
last_error=None)
self._start_service()
mock_val.side_effect = exception.InvalidParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.set_console_mode,
self.context, node.uuid, True)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_set_console_mode_start_fail(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
last_error=None,
console_enabled=False)
self._start_service()
mock_sc.side_effect = exception.IronicException('test-error')
self.service.set_console_mode(self.context, node.uuid, True)
self._stop_service()
mock_sc.assert_called_once_with(mock.ANY, mock.ANY)
node.refresh()
self.assertIsNotNone(node.last_error)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_set_console_mode_stop_fail(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
last_error=None,
console_enabled=True)
self._start_service()
mock_sc.side_effect = exception.IronicException('test-error')
self.service.set_console_mode(self.context, node.uuid, False)
self._stop_service()
mock_sc.assert_called_once_with(mock.ANY, mock.ANY)
node.refresh()
self.assertIsNotNone(node.last_error)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_enable_console_already_enabled(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
self._start_service()
self.service.set_console_mode(self.context, node.uuid, True)
self._stop_service()
self.assertFalse(mock_sc.called)
self.assertFalse(mock_notify.called)
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_disable_console_already_disabled(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=False)
self._start_service()
self.service.set_console_mode(self.context, node.uuid, False)
self._stop_service()
self.assertFalse(mock_sc.called)
self.assertFalse(mock_notify.called)
@mock.patch.object(fake.FakeConsole, 'get_console', autospec=True)
def test_get_console(self, mock_gc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
console_info = {'test': 'test info'}
mock_gc.return_value = console_info
data = self.service.get_console_information(self.context,
node.uuid)
self.assertEqual(console_info, data)
def test_get_console_disabled(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=False)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.get_console_information,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeConsoleNotEnabled, exc.exc_info[0])
@mock.patch.object(fake.FakeConsole, 'validate', autospec=True)
def test_get_console_validate_fail(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
mock_val.side_effect = exception.InvalidParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.get_console_information,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_destroy_node(self):
self._start_service()
for state in states.DELETE_ALLOWED_STATES:
node = obj_utils.create_test_node(self.context,
provision_state=state)
self.service.destroy_node(self.context, node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
def test_destroy_node_reserved(self):
self._start_service()
fake_reservation = 'fake-reserv'
node = obj_utils.create_test_node(self.context,
reservation=fake_reservation)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_node,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
# Verify existing reservation wasn't broken.
node.refresh()
self.assertEqual(fake_reservation, node.reservation)
def test_destroy_node_associated(self):
self._start_service()
node = obj_utils.create_test_node(
self.context, instance_uuid=uuidutils.generate_uuid())
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_node,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeAssociated, exc.exc_info[0])
# Verify reservation was released.
node.refresh()
self.assertIsNone(node.reservation)
def test_destroy_node_with_allocation(self):
# Nodes with allocations can be deleted in maintenance
node = obj_utils.create_test_node(self.context,
provision_state=states.ACTIVE,
maintenance=True)
alloc = obj_utils.create_test_allocation(self.context)
# Establish cross-linking between the node and the allocation
alloc.node_id = node.id
alloc.save()
node.refresh()
self.service.destroy_node(self.context, node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
self.assertRaises(exception.AllocationNotFound,
self.dbapi.get_allocation_by_id,
alloc.id)
def test_destroy_node_invalid_provision_state(self):
self._start_service()
node = obj_utils.create_test_node(self.context,
provision_state=states.ACTIVE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_node,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
# Verify reservation was released.
node.refresh()
self.assertIsNone(node.reservation)
def test_destroy_node_protected_provision_state_available(self):
CONF.set_override('allow_deleting_available_nodes',
False, group='conductor')
self._start_service()
node = obj_utils.create_test_node(self.context,
provision_state=states.AVAILABLE)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_node,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
# Verify reservation was released.
node.refresh()
self.assertIsNone(node.reservation)
def test_destroy_node_protected(self):
self._start_service()
node = obj_utils.create_test_node(self.context,
provision_state=states.ACTIVE,
protected=True,
# Even in maintenance the protected
# nodes are not deleted
maintenance=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_node,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeProtected, exc.exc_info[0])
# Verify reservation was released.
node.refresh()
self.assertIsNone(node.reservation)
def test_destroy_node_allowed_in_maintenance(self):
self._start_service()
node = obj_utils.create_test_node(
self.context, instance_uuid=uuidutils.generate_uuid(),
provision_state=states.ACTIVE, maintenance=True)
self.service.destroy_node(self.context, node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
def test_destroy_node_power_off(self):
self._start_service()
node = obj_utils.create_test_node(self.context,
power_state=states.POWER_OFF)
self.service.destroy_node(self.context, node.uuid)
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_destroy_node_console_enabled(self, mock_notify, mock_sc):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
self.service.destroy_node(self.context, node.uuid)
mock_sc.assert_called_once_with(mock.ANY, mock.ANY)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.END)])
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
def test_destroy_node_console_disable_fail(self, mock_notify, mock_sc):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
mock_sc.side_effect = Exception()
self.service.destroy_node(self.context, node.uuid)
mock_sc.assert_called_once_with(mock.ANY, mock.ANY)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(fake.FakePower, 'set_power_state', autospec=True)
def test_destroy_node_adopt_failed_no_power_change(self, mock_power):
self._start_service()
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.ADOPTFAIL)
self.service.destroy_node(self.context, node.uuid)
self.assertFalse(mock_power.called)
def test_destroy_node_broken_driver(self):
node = obj_utils.create_test_node(self.context,
power_interface='broken')
self._start_service()
self.service.destroy_node(self.context, node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
node.uuid)
@mgr_utils.mock_record_keepalive
class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port(self, mock_validate):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.get_test_port(self.context, node_id=node.id,
extra={'foo': 'bar'})
res = self.service.create_port(self.context, port)
self.assertEqual({'foo': 'bar'}, res.extra)
res = objects.Port.get_by_uuid(self.context, port['uuid'])
self.assertEqual({'foo': 'bar'}, res.extra)
mock_validate.assert_called_once_with(mock.ANY, port)
def test_create_port_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
port = obj_utils.get_test_port(self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.create_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_mac_exists(self, mock_validate):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context, node_id=node.id)
port = obj_utils.get_test_port(self.context, node_id=node.id,
uuid=uuidutils.generate_uuid())
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.create_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.MACAlreadyExists, exc.exc_info[0])
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_physnet_validation_failure_conflict(self,
mock_validate):
mock_validate.side_effect = exception.Conflict
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.get_test_port(self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.create_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.Conflict, exc.exc_info[0])
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_physnet_validation_failure_inconsistent(
self, mock_validate):
mock_validate.side_effect = exception.PortgroupPhysnetInconsistent(
portgroup='pg1', physical_networks='physnet1, physnet2')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.get_test_port(self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.create_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.PortgroupPhysnetInconsistent,
exc.exc_info[0])
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
@mgr_utils.mock_record_keepalive
class UpdatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port(self, mock_val, mock_pc, mock_vpp):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
new_extra = {'foo': 'baz'}
port.extra = new_extra
res = self.service.update_port(self.context, port)
self.assertEqual(new_extra, res.extra)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
mock_vpp.assert_called_once_with(mock.ANY, port)
def test_update_port_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
port = obj_utils.create_test_port(self.context, node_id=node.id)
port.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_port_changed_failure(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context,
node_id=node.id)
old_address = port.address
port.address = '11:22:33:44:55:bb'
mock_pc.side_effect = (exception.FailedToUpdateMacOnPort('boom'))
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
self.assertEqual(exception.FailedToUpdateMacOnPort, exc.exc_info[0])
port.refresh()
self.assertEqual(old_address, port.address)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_address_active_node(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=None,
provision_state='active')
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'vif_port_id': 'fake-id'})
old_address = port.address
new_address = '11:22:33:44:55:bb'
port.address = new_address
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
port.refresh()
self.assertEqual(old_address, port.address)
self.assertFalse(mock_pc.called)
self.assertFalse(mock_val.called)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_address_maintenance(self, mock_val, mock_pc):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware', maintenance=True,
instance_uuid=uuidutils.generate_uuid(), provision_state='active')
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'vif_port_id': 'fake-id'})
new_address = '11:22:33:44:55:bb'
port.address = new_address
res = self.service.update_port(self.context, port)
self.assertEqual(new_address, res.address)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_portgroup_active_node(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=None,
provision_state='active')
pg1 = obj_utils.create_test_portgroup(self.context, node_id=node.id)
pg2 = obj_utils.create_test_portgroup(
self.context, node_id=node.id, name='bar',
address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid())
port = obj_utils.create_test_port(self.context,
node_id=node.id,
portgroup_id=pg1.id)
port.portgroup_id = pg2.id
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidState, exc.exc_info[0])
port.refresh()
self.assertEqual(pg1.id, port.portgroup_id)
self.assertFalse(mock_pc.called)
self.assertFalse(mock_val.called)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_portgroup_enroll_node(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=None,
provision_state='enroll')
pg1 = obj_utils.create_test_portgroup(self.context, node_id=node.id)
pg2 = obj_utils.create_test_portgroup(
self.context, node_id=node.id, name='bar',
address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid())
port = obj_utils.create_test_port(self.context,
node_id=node.id,
portgroup_id=pg1.id)
port.portgroup_id = pg2.id
self.service.update_port(self.context, port)
port.refresh()
self.assertEqual(pg2.id, port.portgroup_id)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
def test_update_port_node_deleting_state(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.DELETING)
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
old_pxe = port.pxe_enabled
port.pxe_enabled = True
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
port.refresh()
self.assertEqual(old_pxe, port.pxe_enabled)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_node_manageable_state(self, mock_val,
mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE)
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
port.pxe_enabled = True
self.service.update_port(self.context, port)
port.refresh()
self.assertEqual(True, port.pxe_enabled)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_to_node_in_inspect_wait_state(self, mock_val,
mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTWAIT)
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
port.pxe_enabled = True
self.service.update_port(self.context, port)
port.refresh()
self.assertEqual(True, port.pxe_enabled)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_node_active_state_and_maintenance(self, mock_val,
mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
maintenance=True)
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
port.pxe_enabled = True
self.service.update_port(self.context, port)
port.refresh()
self.assertEqual(True, port.pxe_enabled)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port_physnet_maintenance(self, mock_val, mock_pc):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware', maintenance=True,
instance_uuid=uuidutils.generate_uuid(), provision_state='active')
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'vif_port_id': 'fake-id'})
new_physnet = 'physnet1'
port.physical_network = new_physnet
res = self.service.update_port(self.context, port)
self.assertEqual(new_physnet, res.physical_network)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, port)
def test_update_port_physnet_node_deleting_state(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.DELETING)
port = obj_utils.create_test_port(self.context,
node_id=node.id,
extra={'foo': 'bar'})
old_physnet = port.physical_network
port.physical_network = 'physnet1'
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
port.refresh()
self.assertEqual(old_physnet, port.physical_network)
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_update_port_physnet_validation_failure_conflict(self,
mock_validate):
mock_validate.side_effect = exception.Conflict
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context, node_id=node.id,
uuid=uuidutils.generate_uuid())
port.extra = {'foo': 'bar'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.Conflict, exc.exc_info[0])
mock_validate.assert_called_once_with(mock.ANY, port)
@mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_update_port_physnet_validation_failure_inconsistent(
self, mock_validate):
mock_validate.side_effect = exception.PortgroupPhysnetInconsistent(
portgroup='pg1', physical_networks='physnet1, physnet2')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context, node_id=node.id,
uuid=uuidutils.generate_uuid())
port.extra = {'foo': 'bar'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.PortgroupPhysnetInconsistent,
exc.exc_info[0])
mock_validate.assert_called_once_with(mock.ANY, port)
@mgr_utils.mock_record_keepalive
class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_all(self):
self._start_service()
CONF.set_override('send_sensor_data_types', ['All'], group='conductor')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
expected_result = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
self.assertEqual(expected_result, actual_result)
def test__filter_out_unsupported_types_part(self):
self._start_service()
CONF.set_override('send_sensor_data_types', ['t1'], group='conductor')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
expected_result = {"t1": {'f1': 'v1'}}
self.assertEqual(expected_result, actual_result)
def test__filter_out_unsupported_types_non(self):
self._start_service()
CONF.set_override('send_sensor_data_types', ['t3'], group='conductor')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
expected_result = {}
self.assertEqual(expected_result, actual_result)
@mock.patch.object(messaging.Notifier, 'info', autospec=True)
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task(self, acquire_mock, notifier_mock):
nodes = queue.Queue()
for i in range(5):
nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', '', None))
self._start_service()
CONF.set_override('send_sensor_data', True, group='conductor')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = False
task.node.driver = 'fake'
task.node.name = 'fake_node'
get_sensors_data_mock = task.driver.management.get_sensors_data
validate_mock = task.driver.management.validate
get_sensors_data_mock.return_value = 'fake-sensor-data'
self.service._sensors_nodes_task(self.context, nodes)
self.assertEqual(5, acquire_mock.call_count)
self.assertEqual(5, validate_mock.call_count)
self.assertEqual(5, get_sensors_data_mock.call_count)
self.assertEqual(5, notifier_mock.call_count)
n_call = mock.call(mock.ANY, mock.ANY, 'hardware.fake.metrics',
{'event_type': 'hardware.fake.metrics.update',
'node_name': 'fake_node', 'timestamp': mock.ANY,
'message_id': mock.ANY,
'payload': 'fake-sensor-data',
'node_uuid': mock.ANY, 'instance_uuid': None})
notifier_mock.assert_has_calls([n_call, n_call, n_call,
n_call, n_call])
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_shutdown(self, acquire_mock):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
self.service._shutdown = True
CONF.set_override('send_sensor_data', True, group='conductor')
self.service._sensors_nodes_task(self.context, nodes)
acquire_mock.return_value.__enter__.assert_not_called()
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_no_management(self, acquire_mock):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
CONF.set_override('send_sensor_data', True, group='conductor')
self._start_service()
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = False
task.driver.management = None
self.service._sensors_nodes_task(self.context, nodes)
self.assertTrue(acquire_mock.called)
@mock.patch.object(manager.LOG, 'debug', autospec=True)
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_maintenance(self, acquire_mock, debug_log):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
CONF.set_override('send_sensor_data', True, group='conductor')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = True
get_sensors_data_mock = task.driver.management.get_sensors_data
validate_mock = task.driver.management.validate
self.service._sensors_nodes_task(self.context, nodes)
self.assertTrue(acquire_mock.called)
self.assertFalse(validate_mock.called)
self.assertFalse(get_sensors_data_mock.called)
self.assertTrue(debug_log.called)
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test___send_sensor_data(self, get_nodeinfo_list_mock,
_mapped_to_this_conductor_mock,
mock_spawn):
self._start_service()
CONF.set_override('send_sensor_data', True, group='conductor')
# NOTE(galyna): do not wait for threads to be finished in unittests
CONF.set_override('send_sensor_data_wait_timeout', 0,
group='conductor')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake', None)]
self.service._send_sensor_data(self.context)
mock_spawn.assert_called_with(self.service,
self.service._sensors_nodes_task,
self.context, mock.ANY)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test___send_sensor_data_multiple_workers(
self, get_nodeinfo_list_mock, _mapped_to_this_conductor_mock,
mock_spawn):
self._start_service()
mock_spawn.reset_mock()
number_of_workers = 8
CONF.set_override('send_sensor_data', True, group='conductor')
CONF.set_override('send_sensor_data_workers', number_of_workers,
group='conductor')
# NOTE(galyna): do not wait for threads to be finished in unittests
CONF.set_override('send_sensor_data_wait_timeout', 0,
group='conductor')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake',
None)] * 20
self.service._send_sensor_data(self.context)
self.assertEqual(number_of_workers,
mock_spawn.call_count)
# TODO(TheJulia): At some point, we should add a test to validate that
# a modified filter to return all nodes actually works, although
# the way the sensor tests are written, the list is all mocked.
@mgr_utils.mock_record_keepalive
class BootDeviceTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(fake.FakeManagement, 'set_boot_device', autospec=True)
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_set_boot_device(self, mock_val, mock_sbd):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.set_boot_device(self.context, node.uuid,
boot_devices.PXE)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_sbd.assert_called_once_with(mock.ANY, mock.ANY, boot_devices.PXE,
persistent=False)
def test_set_boot_device_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.set_boot_device,
self.context, node.uuid, boot_devices.DISK)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_set_boot_device_validate_fail(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = exception.InvalidParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.set_boot_device,
self.context, node.uuid, boot_devices.DISK)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
def test_get_boot_device(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
bootdev = self.service.get_boot_device(self.context, node.uuid)
expected = {'boot_device': boot_devices.PXE, 'persistent': False}
self.assertEqual(expected, bootdev)
def test_get_boot_device_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.get_boot_device,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_get_boot_device_validate_fail(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = exception.InvalidParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.get_boot_device,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
def test_get_supported_boot_devices(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
bootdevs = self.service.get_supported_boot_devices(self.context,
node.uuid)
self.assertEqual([boot_devices.PXE], bootdevs)
@mgr_utils.mock_record_keepalive
class IndicatorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(fake.FakeManagement, 'set_indicator_state',
autospec=True)
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_set_indicator_state(self, mock_val, mock_sbd):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.set_indicator_state(
self.context, node.uuid, components.CHASSIS,
'led', indicator_states.ON)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_sbd.assert_called_once_with(
mock.ANY, mock.ANY, components.CHASSIS, 'led', indicator_states.ON)
def test_get_indicator_state(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
state = self.service.get_indicator_state(
self.context, node.uuid, components.CHASSIS, 'led-0')
expected = indicator_states.ON
self.assertEqual(expected, state)
def test_get_supported_indicators(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
indicators = self.service.get_supported_indicators(
self.context, node.uuid)
expected = {
'chassis': {
'led-0': {
'readonly': True,
'states': [
indicator_states.OFF,
indicator_states.ON
]
}
},
'system': {
'led': {
'readonly': False,
'states': [
indicator_states.BLINKING,
indicator_states.OFF,
indicator_states.ON
]
}
}
}
self.assertEqual(expected, indicators)
@mgr_utils.mock_record_keepalive
class NmiTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(fake.FakeManagement, 'inject_nmi', autospec=True)
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_inject_nmi(self, mock_val, mock_nmi):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.inject_nmi(self.context, node.uuid)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_nmi.assert_called_once_with(mock.ANY, mock.ANY)
def test_inject_nmi_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inject_nmi,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_inject_nmi_validate_invalid_param(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = exception.InvalidParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inject_nmi,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
@mock.patch.object(fake.FakeManagement, 'validate', autospec=True)
def test_inject_nmi_validate_missing_param(self, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = exception.MissingParameterValue('error')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inject_nmi,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.MissingParameterValue, exc.exc_info[0])
def test_inject_nmi_not_implemented(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inject_nmi,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.UnsupportedDriverExtension,
exc.exc_info[0])
@mgr_utils.mock_record_keepalive
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(VifTestCase, self).setUp()
self.vif = {'id': 'fake'}
@mock.patch.object(n_flat.FlatNetwork, 'vif_list', autospec=True)
def test_vif_list(self, mock_list, mock_valid):
mock_list.return_value = ['VIF_ID']
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
data = self.service.vif_list(self.context, node.uuid)
mock_list.assert_called_once_with(mock.ANY, mock.ANY)
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertEqual(mock_list.return_value, data)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach(self, mock_attach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.vif_attach(self.context, node.uuid, self.vif)
mock_attach.assert_called_once_with(mock.ANY, mock.ANY, self.vif)
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_node_locked(self, mock_attach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_attach,
self.context, node.uuid, self.vif)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
self.assertFalse(mock_attach.called)
self.assertFalse(mock_valid.called)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_raises_network_error(self, mock_attach,
mock_valid):
mock_attach.side_effect = exception.NetworkError("BOOM")
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_attach,
self.context, node.uuid, self.vif)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NetworkError, exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_attach.assert_called_once_with(mock.ANY, mock.ANY, self.vif)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_raises_portgroup_physnet_inconsistent(
self, mock_attach, mock_valid):
mock_valid.side_effect = exception.PortgroupPhysnetInconsistent(
portgroup='fake-pg', physical_networks='fake-physnet')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_attach,
self.context, node.uuid, self.vif)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.PortgroupPhysnetInconsistent,
exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_raises_vif_invalid_for_attach(
self, mock_attach, mock_valid):
mock_valid.side_effect = exception.VifInvalidForAttach(
node='fake-node', vif='fake-vif', reason='fake-reason')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_attach,
self.context, node.uuid, self.vif)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.VifInvalidForAttach,
exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
@mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_validate_error(self, mock_attach,
mock_valid):
mock_valid.side_effect = exception.MissingParameterValue("BOOM")
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_attach,
self.context, node.uuid, self.vif)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.MissingParameterValue, exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
@mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach(self, mock_detach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.vif_detach(self.context, node.uuid, "interface")
mock_detach.assert_called_once_with(mock.ANY, mock.ANY, "interface")
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_node_locked(self, mock_detach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_detach,
self.context, node.uuid, "interface")
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
self.assertFalse(mock_detach.called)
self.assertFalse(mock_valid.called)
@mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_raises_network_error(self, mock_detach,
mock_valid):
mock_detach.side_effect = exception.NetworkError("BOOM")
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_detach,
self.context, node.uuid, "interface")
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NetworkError, exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_detach.assert_called_once_with(mock.ANY, mock.ANY, "interface")
@mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_validate_error(self, mock_detach,
mock_valid):
mock_valid.side_effect = exception.MissingParameterValue("BOOM")
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.vif_detach,
self.context, node.uuid, "interface")
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.MissingParameterValue, exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_detach.called)
@mgr_utils.mock_record_keepalive
class UpdatePortgroupTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
new_extra = {'foo': 'baz'}
portgroup.extra = new_extra
self.service.update_portgroup(self.context, portgroup)
portgroup.refresh()
self.assertEqual(new_extra, portgroup.extra)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_failure(self, mock_val, mock_pc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
old_extra = portgroup.extra
new_extra = {'foo': 'baz'}
portgroup.extra = new_extra
mock_pc.side_effect = (exception.FailedToUpdateMacOnPort('boom'))
self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_portgroup,
self.context, portgroup)
portgroup.refresh()
self.assertEqual(old_extra, portgroup.extra)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
def test_update_portgroup_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id)
old_extra = portgroup.extra
portgroup.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_portgroup,
self.context, portgroup)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
portgroup.refresh()
self.assertEqual(old_extra, portgroup.extra)
def test_update_portgroup_to_node_in_deleting_state(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
update_node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DELETING,
uuid=uuidutils.generate_uuid())
old_node_id = portgroup.node_id
portgroup.node_id = update_node.id
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_portgroup,
self.context, portgroup)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
portgroup.refresh()
self.assertEqual(old_node_id, portgroup.node_id)
@mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_manageable_state(self, mock_val,
mock_pgc,
mock_get_ports):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
update_node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
uuid=uuidutils.generate_uuid())
mock_get_ports.return_value = []
self._start_service()
portgroup.node_id = update_node.id
self.service.update_portgroup(self.context, portgroup)
portgroup.refresh()
self.assertEqual(update_node.id, portgroup.node_id)
mock_get_ports.assert_called_once_with(portgroup.uuid)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
@mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_inspect_wait_state(self, mock_val,
mock_pgc,
mock_get_ports):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
update_node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.INSPECTWAIT,
uuid=uuidutils.generate_uuid())
mock_get_ports.return_value = []
self._start_service()
portgroup.node_id = update_node.id
self.service.update_portgroup(self.context, portgroup)
portgroup.refresh()
self.assertEqual(update_node.id, portgroup.node_id)
mock_get_ports.assert_called_once_with(portgroup.uuid)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
@mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_active_state_and_maintenance(
self, mock_val, mock_pgc, mock_get_ports):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
update_node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ACTIVE,
maintenance=True,
uuid=uuidutils.generate_uuid())
mock_get_ports.return_value = []
self._start_service()
portgroup.node_id = update_node.id
self.service.update_portgroup(self.context, portgroup)
portgroup.refresh()
self.assertEqual(update_node.id, portgroup.node_id)
mock_get_ports.assert_called_once_with(portgroup.uuid)
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
@mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_association_with_ports(self, mock_val,
mock_pgc, mock_get_ports):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id,
extra={'foo': 'bar'})
update_node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
maintenance=True,
uuid=uuidutils.generate_uuid())
mock_get_ports.return_value = ['test_port']
self._start_service()
old_node_id = portgroup.node_id
portgroup.node_id = update_node.id
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_portgroup,
self.context, portgroup)
self.assertEqual(exception.PortgroupNotEmpty, exc.exc_info[0])
portgroup.refresh()
self.assertEqual(old_node_id, portgroup.node_id)
mock_get_ports.assert_called_once_with(portgroup.uuid)
self.assertFalse(mock_val.called)
self.assertFalse(mock_pgc.called)
@mgr_utils.mock_record_keepalive
class RaidTestCases(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
driver_name = 'fake-hardware'
raid_interface = None
def setUp(self):
super(RaidTestCases, self).setUp()
self.node = obj_utils.create_test_node(
self.context, driver=self.driver_name,
raid_interface=self.raid_interface,
provision_state=states.MANAGEABLE)
def test_get_raid_logical_disk_properties(self):
self._start_service()
properties = self.service.get_raid_logical_disk_properties(
self.context, self.driver_name)
self.assertIn('raid_level', properties)
self.assertIn('size_gb', properties)
def test_set_target_raid_config(self):
raid_config = {'logical_disks': [{'size_gb': 100, 'raid_level': '1'}]}
self.service.set_target_raid_config(
self.context, self.node.uuid, raid_config)
self.node.refresh()
self.assertEqual(raid_config, self.node.target_raid_config)
def test_set_target_raid_config_empty(self):
self.node.target_raid_config = {'foo': 'bar'}
self.node.save()
raid_config = {}
self.service.set_target_raid_config(
self.context, self.node.uuid, raid_config)
self.node.refresh()
self.assertEqual({}, self.node.target_raid_config)
def test_set_target_raid_config_invalid_parameter_value(self):
# Missing raid_level in the below raid config.
raid_config = {'logical_disks': [{'size_gb': 100}]}
self.node.target_raid_config = {'foo': 'bar'}
self.node.save()
exc = self.assertRaises(
messaging.rpc.ExpectedException,
self.service.set_target_raid_config,
self.context, self.node.uuid, raid_config)
self.node.refresh()
self.assertEqual({'foo': 'bar'}, self.node.target_raid_config)
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class RaidHardwareTypeTestCases(RaidTestCases):
driver_name = 'fake-hardware'
raid_interface = 'fake'
def test_get_raid_logical_disk_properties_iface_not_supported(self):
# NOTE(jroll) we don't run this test as get_logical_disk_properties
# is supported on all RAID implementations, and we cannot have a
# null interface for a hardware type
pass
def test_set_target_raid_config_iface_not_supported(self):
# NOTE(jroll): it's impossible for a dynamic driver to have a null
# interface (e.g. node.driver.raid), so this instead tests that
# if validation fails, we blow up properly.
# need a different raid interface and a hardware type that supports it
self.node = obj_utils.create_test_node(
self.context, driver='manual-management',
raid_interface='no-raid',
uuid=uuidutils.generate_uuid(),
provision_state=states.MANAGEABLE)
raid_config = {'logical_disks': [{'size_gb': 100, 'raid_level': '1'}]}
exc = self.assertRaises(
messaging.rpc.ExpectedException,
self.service.set_target_raid_config,
self.context, self.node.uuid, raid_config)
self.node.refresh()
self.assertEqual({}, self.node.target_raid_config)
self.assertEqual(exception.UnsupportedDriverExtension, exc.exc_info[0])
self.assertIn('manual-management', str(exc.exc_info[1]))
@mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
def setUp(self):
super(ManagerDoSyncPowerStateTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.driver = mock.Mock(spec_set=drivers_base.BareDriver)
self.power = self.driver.power
self.node = obj_utils.create_test_node(
self.context, driver='fake-hardware', maintenance=False,
provision_state=states.AVAILABLE, instance_uuid=uuidutils.uuid)
self.task = mock.Mock(spec_set=['context', 'driver', 'node',
'upgrade_lock', 'shared'])
self.task.context = self.context
self.task.driver = self.driver
self.task.node = self.node
self.task.shared = False
self.config(force_power_state_during_sync=False, group='conductor')
def _do_sync_power_state(self, old_power_state, new_power_states,
fail_validate=False):
self.node.power_state = old_power_state
if not isinstance(new_power_states, (list, tuple)):
new_power_states = [new_power_states]
if fail_validate:
exc = exception.InvalidParameterValue('error')
self.power.validate.side_effect = exc
for new_power_state in new_power_states:
self.node.power_state = old_power_state
if isinstance(new_power_state, Exception):
self.power.get_power_state.side_effect = new_power_state
else:
self.power.get_power_state.return_value = new_power_state
count = manager.do_sync_power_state(
self.task, self.service.power_state_sync_count[self.node.uuid])
self.service.power_state_sync_count[self.node.uuid] = count
def test_state_unchanged(self, node_power_action):
self._do_sync_power_state('fake-power', 'fake-power')
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertEqual('fake-power', self.node.power_state)
self.assertFalse(node_power_action.called)
self.assertFalse(self.task.upgrade_lock.called)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_state_not_set(self, mock_power_update, node_power_action):
self._do_sync_power_state(None, states.POWER_ON)
self.power.validate.assert_called_once_with(self.task)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.task.upgrade_lock.assert_called_once_with()
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_ON)
def test_validate_fail(self, node_power_action):
self._do_sync_power_state(None, states.POWER_ON,
fail_validate=True)
self.power.validate.assert_called_once_with(self.task)
self.assertFalse(self.power.get_power_state.called)
self.assertFalse(node_power_action.called)
self.assertIsNone(self.node.power_state)
def test_get_power_state_fail(self, node_power_action):
self._do_sync_power_state('fake',
exception.IronicException('foo'))
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual('fake', self.node.power_state)
self.assertEqual(1,
self.service.power_state_sync_count[self.node.uuid])
def test_get_power_state_error(self, node_power_action):
self._do_sync_power_state('fake', states.ERROR)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual('fake', self.node.power_state)
self.assertEqual(1,
self.service.power_state_sync_count[self.node.uuid])
@mock.patch.object(nova, 'power_update', autospec=True)
def test_state_changed_no_sync(self, mock_power_update, node_power_action):
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.task.upgrade_lock.assert_called_once_with()
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
@mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_state_changed_no_sync_notify(self, mock_power_update, mock_notif,
node_power_action):
# Required for exception handling
mock_notif.__name__ = 'NodeCorrectedPowerStateNotification'
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.task.upgrade_lock.assert_called_once_with()
# 1 notification should be sent:
# baremetal.node.power_state_updated.success, indicating the DB was
# updated to reflect the actual node power state
mock_notif.assert_called_once_with(publisher=mock.ANY,
event_type=mock.ANY,
level=mock.ANY,
payload=mock.ANY)
mock_notif.return_value.emit.assert_called_once_with(mock.ANY)
notif_args = mock_notif.call_args[1]
self.assertNotificationEqual(
notif_args, 'ironic-conductor', CONF.host,
'baremetal.node.power_state_corrected.success',
obj_fields.NotificationLevel.INFO)
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
def test_state_changed_sync(self, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=1, group='conductor')
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
node_power_action.assert_called_once_with(self.task, states.POWER_ON)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.task.upgrade_lock.assert_called_once_with()
def test_state_changed_sync_failed(self, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
node_power_action.side_effect = exception.IronicException('test')
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
# Just testing that this test doesn't raise.
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
node_power_action.assert_called_once_with(self.task, states.POWER_ON)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertEqual(1,
self.service.power_state_sync_count[self.node.uuid])
@mock.patch.object(nova, 'power_update', autospec=True)
def test_no_power_sync_support(self, mock_power_update, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.power.supports_power_sync.return_value = False
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.task.upgrade_lock.assert_called_once_with()
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded(self, mock_power_update, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=1, group='conductor')
self._do_sync_power_state(states.POWER_ON, [states.POWER_OFF,
states.POWER_OFF])
self.assertFalse(self.power.validate.called)
power_exp_calls = [mock.call(self.task)] * 2
self.assertEqual(power_exp_calls,
self.power.get_power_state.call_args_list)
node_power_action.assert_called_once_with(self.task, states.POWER_ON)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.assertEqual(2,
self.service.power_state_sync_count[self.node.uuid])
self.assertTrue(self.node.maintenance)
self.assertIsNotNone(self.node.maintenance_reason)
self.assertEqual('power failure', self.node.fault)
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded2(self, mock_power_update, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=2, group='conductor')
self._do_sync_power_state(states.POWER_ON, [states.POWER_OFF,
states.POWER_OFF,
states.POWER_OFF])
self.assertFalse(self.power.validate.called)
power_exp_calls = [mock.call(self.task)] * 3
self.assertEqual(power_exp_calls,
self.power.get_power_state.call_args_list)
npa_exp_calls = [mock.call(self.task, states.POWER_ON)] * 2
self.assertEqual(npa_exp_calls, node_power_action.call_args_list)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.assertEqual(3,
self.service.power_state_sync_count[self.node.uuid])
self.assertTrue(self.node.maintenance)
self.assertEqual('power failure', self.node.fault)
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
@mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded_notify(self, mock_power_update,
mock_notif, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=1, group='conductor')
# Required for exception handling
mock_notif.__name__ = 'NodeCorrectedPowerStateNotification'
self._do_sync_power_state(states.POWER_ON, [states.POWER_OFF,
states.POWER_OFF])
# 1 notification should be sent:
# baremetal.node.power_state_corrected.success, indicating
# the DB was updated to reflect the actual node power state
mock_notif.assert_called_once_with(publisher=mock.ANY,
event_type=mock.ANY,
level=mock.ANY,
payload=mock.ANY)
mock_notif.return_value.emit.assert_called_once_with(mock.ANY)
notif_args = mock_notif.call_args[1]
self.assertNotificationEqual(
notif_args, 'ironic-conductor', CONF.host,
'baremetal.node.power_state_corrected.success',
obj_fields.NotificationLevel.INFO)
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
def test_retry_then_success(self, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=2, group='conductor')
self._do_sync_power_state(states.POWER_ON, [states.POWER_OFF,
states.POWER_OFF,
states.POWER_ON])
self.assertFalse(self.power.validate.called)
power_exp_calls = [mock.call(self.task)] * 3
self.assertEqual(power_exp_calls,
self.power.get_power_state.call_args_list)
npa_exp_calls = [mock.call(self.task, states.POWER_ON)] * 2
self.assertEqual(npa_exp_calls, node_power_action.call_args_list)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertEqual(0,
self.service.power_state_sync_count[self.node.uuid])
def test_power_state_sync_max_retries_gps_exception(self,
node_power_action):
self.config(power_state_sync_max_retries=2, group='conductor')
self.service.power_state_sync_count[self.node.uuid] = 2
node_power_action.side_effect = exception.IronicException('test')
self._do_sync_power_state('fake',
exception.IronicException('SpongeBob'))
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertIsNone(self.node.power_state)
self.assertTrue(self.node.maintenance)
self.assertFalse(node_power_action.called)
# make sure the actual error is in the last_error attribute
self.assertIn('SpongeBob', self.node.last_error)
def test_maintenance_on_upgrade_lock(self, node_power_action):
self.node.maintenance = True
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertFalse(node_power_action.called)
self.task.upgrade_lock.assert_called_once_with()
def test_wrong_provision_state_on_upgrade_lock(self, node_power_action):
self.node.provision_state = states.DEPLOYWAIT
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertFalse(node_power_action.called)
self.task.upgrade_lock.assert_called_once_with()
def test_correct_power_state_on_upgrade_lock(self, node_power_action):
def _fake_upgrade():
self.node.power_state = states.POWER_OFF
self.task.upgrade_lock.side_effect = _fake_upgrade
self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
self.assertFalse(self.power.validate.called)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(node_power_action.called)
self.task.upgrade_lock.assert_called_once_with()
@mock.patch.object(waiters, 'wait_for_all',
new=mock.MagicMock(return_value=(0, 0)))
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
new=lambda self, fun, *args: fun(*args))
@mock.patch.object(manager, 'do_sync_power_state', autospec=True)
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
super(ManagerSyncPowerStatesTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.service.dbapi = self.dbapi
self.node = self._create_node()
self.filters = {'maintenance': False}
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
def test_node_not_mapped(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = False
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
self.assertFalse(sync_mock.called)
def test_node_locked_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(reservation='host1', uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_node_in_deploywait_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock,
sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE,
uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_node_in_enroll_on_acquire(self, get_nodeinfo_mock, mapped_mock,
acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(provision_state=states.ENROLL,
target_provision_state=states.NOSTATE,
uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_node_in_power_transition_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock,
sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(target_power_state=states.POWER_ON,
uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_node_in_maintenance_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock,
sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(maintenance=True, uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_node_disappears_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeNotFound(node=self.node.uuid,
host='fake')
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(sync_mock.called)
def test_single_node(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(node_attrs=dict(uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
sync_mock.assert_called_once_with(task, mock.ANY)
def test_single_node_adopt_failed(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(
node_attrs=dict(uuid=self.node.uuid,
provision_state=states.ADOPTFAIL))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
sync_mock.assert_not_called()
def test__sync_power_state_multiple_nodes(self, get_nodeinfo_mock,
mapped_mock, acquire_mock,
sync_mock):
# Create 8 nodes:
# 1st node: Should acquire and try to sync
# 2nd node: Not mapped to this conductor
# 3rd node: In DEPLOYWAIT provision_state
# 4th node: In maintenance mode
# 5th node: Is in power transition
# 6th node: Disappears after getting nodeinfo list
# 7th node: Should acquire and try to sync
# 8th node: do_sync_power_state raises NodeLocked
nodes = []
node_attrs = {}
mapped_map = {}
for i in range(1, 8):
attrs = {'id': i,
'uuid': uuidutils.generate_uuid()}
if i == 3:
attrs['provision_state'] = states.DEPLOYWAIT
attrs['target_provision_state'] = states.ACTIVE
elif i == 4:
attrs['maintenance'] = True
elif i == 5:
attrs['target_power_state'] = states.POWER_ON
n = self._create_node(**attrs)
nodes.append(n)
node_attrs[n.uuid] = attrs
mapped_map[n.uuid] = False if i == 2 else True
tasks = [self._create_task(node_attrs=node_attrs[x.uuid])
for x in nodes if x.id != 2]
# not found during acquire (4 = index of Node6 after removing Node2)
tasks[4] = exception.NodeNotFound(node=6)
sync_results = [0] * 7 + [exception.NodeLocked(node=8, host='')]
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response(nodes))
mapped_mock.side_effect = lambda q, x, y, z: mapped_map[x]
acquire_mock.side_effect = self._get_acquire_side_effect(tasks)
sync_mock.side_effect = sync_results
with mock.patch.object(eventlet, 'sleep', autospec=True) as sleep_mock:
self.service._sync_power_states(self.context)
# Ensure we've yielded on every iteration, except for node
# not mapped to this conductor
self.assertEqual(len(nodes) - 1, sleep_mock.call_count)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_calls = [mock.call(self.service, x.uuid, x.driver,
x.conductor_group) for x in nodes]
self.assertEqual(mapped_calls, mapped_mock.call_args_list)
acquire_calls = [mock.call(self.context, x.uuid,
purpose=mock.ANY,
shared=True)
for x in nodes if x.id != 2]
self.assertEqual(acquire_calls, acquire_mock.call_args_list)
# Nodes 1 and 7 (5 = index of Node7 after removing Node2)
sync_calls = [mock.call(tasks[0], mock.ANY),
mock.call(tasks[5], mock.ANY)]
self.assertEqual(sync_calls, sync_mock.call_args_list)
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
super(ManagerPowerRecoveryTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.service.dbapi = self.dbapi
self.driver = mock.Mock(spec_set=drivers_base.BareDriver)
self.power = self.driver.power
self.task = mock.Mock(spec_set=['context', 'driver', 'node',
'upgrade_lock', 'shared'])
self.node = self._create_node(maintenance=True,
fault='power failure',
maintenance_reason='Unreachable BMC')
self.task.node = self.node
self.task.driver = self.driver
self.filters = {'maintenance': True,
'fault': 'power failure'}
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
def test_node_not_mapped(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = False
self.service._power_failure_recovery(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
self.assertFalse(self.power.validate.called)
def _power_failure_recovery(self, node_dict, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
task = self._create_task(node_attrs=node_dict)
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._power_failure_recovery(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(self.power.validate.called)
def test_node_locked_on_acquire(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
node_dict = dict(reservation='host1', uuid=self.node.uuid)
self._power_failure_recovery(node_dict, get_nodeinfo_mock,
mapped_mock, acquire_mock)
def test_node_in_enroll_on_acquire(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
node_dict = dict(provision_state=states.ENROLL,
target_provision_state=states.NOSTATE,
maintenance=True, uuid=self.node.uuid)
self._power_failure_recovery(node_dict, get_nodeinfo_mock,
mapped_mock, acquire_mock)
def test_node_in_power_transition_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
node_dict = dict(target_power_state=states.POWER_ON,
maintenance=True, uuid=self.node.uuid)
self._power_failure_recovery(node_dict, get_nodeinfo_mock,
mapped_mock, acquire_mock)
def test_node_not_in_maintenance_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
node_dict = dict(maintenance=False, uuid=self.node.uuid)
self._power_failure_recovery(node_dict, get_nodeinfo_mock,
mapped_mock, acquire_mock)
def test_node_disappears_on_acquire(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeNotFound(node=self.node.uuid,
host='fake')
self.service._power_failure_recovery(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.assertFalse(self.power.validate.called)
@mock.patch.object(notification_utils,
'emit_power_state_corrected_notification',
autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_node_recovery_success(self, mock_power_update, notify_mock,
get_nodeinfo_mock, mapped_mock,
acquire_mock):
self.node.power_state = states.POWER_ON
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(self.task)
self.power.get_power_state.return_value = states.POWER_OFF
self.service._power_failure_recovery(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.power.validate.assert_called_once_with(self.task)
self.power.get_power_state.assert_called_once_with(self.task)
self.task.upgrade_lock.assert_called_once_with()
self.assertFalse(self.node.maintenance)
self.assertIsNone(self.node.fault)
self.assertIsNone(self.node.maintenance_reason)
self.assertEqual(states.POWER_OFF, self.node.power_state)
notify_mock.assert_called_once_with(self.task, states.POWER_ON)
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
def test_node_recovery_failed(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(self.task)
self.power.get_power_state.return_value = states.ERROR
self.service._power_failure_recovery(self.context)
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
self.power.validate.assert_called_once_with(self.task)
self.power.get_power_state.assert_called_once_with(self.task)
self.assertFalse(self.task.upgrade_lock.called)
self.assertTrue(self.node.maintenance)
self.assertEqual('power failure', self.node.fault)
self.assertEqual('Unreachable BMC', self.node.maintenance_reason)
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
super(ManagerCheckDeployTimeoutsTestCase, self).setUp()
self.config(deploy_callback_timeout=300, group='conductor')
self.service = manager.ConductorManager('hostname', 'test-topic')
self.service.dbapi = self.dbapi
self.node = self._create_node(provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE)
self.task = self._create_task(node=self.node)
self.node2 = self._create_node(provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE)
self.task2 = self._create_task(node=self.node2)
self.filters = {'reserved': False, 'maintenance': False,
'provisioned_before': 300,
'provision_state': states.DEPLOYWAIT}
self.columns = ['uuid', 'driver', 'conductor_group']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters,
sort_key='provision_updated_at', sort_dir='asc')
def test_not_mapped(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = False
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_timeout(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(self.task)
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=conductor_utils.provisioning_error_handler,
target_state=None)
def test_acquire_node_disappears(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeNotFound(node='fake')
# Exception eaten
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.spawn_after.called)
def test_acquire_node_locked(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeLocked(node='fake',
host='fake')
# Exception eaten
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.spawn_after.called)
def test_no_deploywait_after_lock(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
task = self._create_task(
node_attrs=dict(provision_state=states.AVAILABLE,
uuid=self.node.uuid))
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(task.spawn_after.called)
def test_maintenance_after_lock(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
task = self._create_task(
node_attrs=dict(provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE,
maintenance=True,
uuid=self.node.uuid))
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([task.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([task, self.task2]))
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
self.assertEqual([mock.call(self.service,
self.node.uuid, task.node.driver,
task.node.conductor_group),
mock.call(self.service,
self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
mock.call(self.context, self.node2.uuid,
purpose=mock.ANY)],
acquire_mock.call_args_list)
# First node skipped
self.assertFalse(task.spawn_after.called)
# Second node spawned
self.task2.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task2),
err_handler=conductor_utils.provisioning_error_handler,
target_state=None)
def test_exiting_no_worker_avail(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(
[(self.task, exception.NoFreeConductorWorker()), self.task2])
# Exception should be nuked
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=conductor_utils.provisioning_error_handler,
target_state=None)
def test_exiting_with_other_exception(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(
[(self.task, exception.IronicException('foo')), self.task2])
# Should re-raise
self.assertRaises(exception.IronicException,
self.service._check_deploy_timeouts,
self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
mapped_mock.assert_called_once_with(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=conductor_utils.provisioning_error_handler,
target_state=None)
def test_worker_limit(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
self.config(periodic_max_workers=2, group='conductor')
# Use the same nodes/tasks to make life easier in the tests
# here
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node] * 3))
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([self.task] * 3))
self.service._check_deploy_timeouts(self.context)
# Should only have ran 2.
self.assertEqual([mock.call(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
acquire_mock.call_args_list)
process_event_call = mock.call(
'fail',
callback=self.service._spawn_worker,
call_args=(conductor_utils.cleanup_after_timeout, self.task),
err_handler=conductor_utils.provisioning_error_handler,
target_state=None)
self.assertEqual([process_event_call] * 2,
self.task.process_event.call_args_list)
@mgr_utils.mock_record_keepalive
class ManagerTestProperties(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(ManagerTestProperties, self).setUp()
self.service = manager.ConductorManager('test-host', 'test-topic')
def _check_driver_properties(self, hw_type, expected, agent_common=True,
pxe_common=True):
self._start_service()
properties = self.service.get_driver_properties(self.context, hw_type)
if agent_common:
expected.extend(['agent_verify_ca',
'deploy_kernel', 'deploy_ramdisk',
'deploy_forces_oob_reboot',
'image_download_source',
'image_http_proxy', 'image_https_proxy',
'image_no_proxy'])
if pxe_common:
expected.extend(['force_persistent_boot_device',
'rescue_kernel', 'rescue_ramdisk'])
self.assertCountEqual(expected, properties)
def test_driver_properties_fake(self):
expected = ['B1', 'B2']
self._check_driver_properties("fake-hardware", expected,
agent_common=False, pxe_common=False)
def test_driver_properties_ipmi(self):
self.config(enabled_hardware_types='ipmi',
enabled_power_interfaces=['ipmitool'],
enabled_management_interfaces=['ipmitool'],
enabled_console_interfaces=['ipmitool-socat'])
expected = ['ipmi_address', 'ipmi_terminal_port',
'ipmi_password', 'ipmi_port', 'ipmi_priv_level',
'ipmi_username', 'ipmi_bridging', 'ipmi_transit_channel',
'ipmi_transit_address', 'ipmi_target_channel',
'ipmi_target_address', 'ipmi_local_address',
'ipmi_protocol_version', 'ipmi_force_boot_device',
'ipmi_disable_boot_timeout', 'ipmi_hex_kg_key',
'ipmi_cipher_suite']
self._check_driver_properties("ipmi", expected)
def test_driver_properties_snmp(self):
self.config(enabled_hardware_types='snmp',
enabled_power_interfaces=['snmp'])
expected = ['snmp_driver', 'snmp_address', 'snmp_port', 'snmp_version',
'snmp_community',
'snmp_community_read', 'snmp_community_write',
'snmp_security', 'snmp_outlet',
'snmp_user',
'snmp_context_engine_id', 'snmp_context_name',
'snmp_auth_key', 'snmp_auth_protocol',
'snmp_priv_key', 'snmp_priv_protocol']
self._check_driver_properties("snmp", expected)
def test_driver_properties_ilo(self):
self.config(enabled_hardware_types='ilo',
enabled_power_interfaces=['ilo'],
enabled_management_interfaces=['ilo'],
enabled_boot_interfaces=['ilo-virtual-media'],
enabled_inspect_interfaces=['ilo'],
enabled_console_interfaces=['ilo'])
expected = ['ilo_address', 'ilo_username',
'ilo_password', 'client_port', 'client_timeout',
'ilo_deploy_iso', 'console_port', 'ilo_change_password',
'ca_file', 'snmp_auth_user', 'snmp_auth_prot_password',
'snmp_auth_priv_password', 'snmp_auth_protocol',
'snmp_auth_priv_protocol', 'ilo_verify_ca']
self._check_driver_properties("ilo", expected, pxe_common=False)
def test_driver_properties_manual_management(self):
self.config(enabled_hardware_types=['manual-management'])
self._check_driver_properties('manual-management', [])
def test_driver_properties_fail(self):
self.service.init_host()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.get_driver_properties,
self.context, "bad-driver")
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.DriverNotFound, exc.exc_info[0])
@mock.patch.object(waiters, 'wait_for_all', autospec=True)
@mock.patch.object(manager.ConductorManager, '_spawn_worker', autospec=True)
@mock.patch.object(manager.ConductorManager, '_sync_power_state_nodes_task',
autospec=True)
class ParallelPowerSyncTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
def setUp(self):
super(ParallelPowerSyncTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
def test__sync_power_states_9_nodes_8_workers(
self, sync_mock, spawn_mock, waiter_mock):
CONF.set_override('sync_power_state_workers', 8, group='conductor')
with mock.patch.object(self.service, 'iter_nodes',
new=mock.MagicMock(return_value=[[0]] * 9)):
self.service._sync_power_states(self.context)
self.assertEqual(7, spawn_mock.call_count)
self.assertEqual(1, sync_mock.call_count)
self.assertEqual(1, waiter_mock.call_count)
def test__sync_power_states_6_nodes_8_workers(
self, sync_mock, spawn_mock, waiter_mock):
CONF.set_override('sync_power_state_workers', 8, group='conductor')
with mock.patch.object(self.service, 'iter_nodes',
new=mock.MagicMock(return_value=[[0]] * 6)):
self.service._sync_power_states(self.context)
self.assertEqual(5, spawn_mock.call_count)
self.assertEqual(1, sync_mock.call_count)
self.assertEqual(1, waiter_mock.call_count)
def test__sync_power_states_1_nodes_8_workers(
self, sync_mock, spawn_mock, waiter_mock):
CONF.set_override('sync_power_state_workers', 8, group='conductor')
with mock.patch.object(self.service, 'iter_nodes',
new=mock.MagicMock(return_value=[[0]])):
self.service._sync_power_states(self.context)
self.assertEqual(0, spawn_mock.call_count)
self.assertEqual(1, sync_mock.call_count)
self.assertEqual(1, waiter_mock.call_count)
def test__sync_power_states_9_nodes_1_worker(
self, sync_mock, spawn_mock, waiter_mock):
CONF.set_override('sync_power_state_workers', 1, group='conductor')
with mock.patch.object(self.service, 'iter_nodes',
new=mock.MagicMock(return_value=[[0]] * 9)):
self.service._sync_power_states(self.context)
self.assertEqual(0, spawn_mock.call_count)
self.assertEqual(1, sync_mock.call_count)
self.assertEqual(1, waiter_mock.call_count)
@mock.patch.object(queue, 'Queue', autospec=True)
def test__sync_power_states_node_prioritization(
self, queue_mock, sync_mock, spawn_mock, waiter_mock):
CONF.set_override('sync_power_state_workers', 1, group='conductor')
with mock.patch.object(
self.service, 'iter_nodes',
new=mock.MagicMock(return_value=[[0], [1], [2]])
), mock.patch.dict(
self.service.power_state_sync_count,
{0: 1, 1: 0, 2: 2}, clear=True):
queue_mock.return_value.qsize.return_value = 0
self.service._sync_power_states(self.context)
expected_calls = [mock.call([2]), mock.call([0]), mock.call([1])]
queue_mock.return_value.put.assert_has_calls(expected_calls)
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
def setUp(self):
super(ManagerSyncLocalStateTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.service.conductor = mock.Mock()
self.service.dbapi = self.dbapi
self.service.ring_manager = mock.Mock()
self.node = self._create_node(provision_state=states.ACTIVE,
target_provision_state=states.NOSTATE)
self.task = self._create_task(node=self.node)
self.filters = {'reserved': False,
'maintenance': False,
'provision_state': states.ACTIVE}
self.columns = ['uuid', 'driver', 'conductor_group', 'id',
'conductor_affinity']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
def test_not_mapped(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = False
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_already_mapped(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
# Node is already mapped to the conductor running the periodic task
self.node.conductor_affinity = 123
self.service.conductor.id = 123
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_good(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(self.task)
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
self.task.spawn_after.assert_called_once_with(
self.service._spawn_worker,
self.service._do_takeover, self.task)
def test_no_free_worker(self, get_nodeinfo_mock, mapped_mock,
acquire_mock):
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([self.task] * 3))
self.task.spawn_after.side_effect = [
None,
exception.NoFreeConductorWorker('error')
]
# 3 nodes to be checked
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node] * 3))
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# assert _mapped_to_this_conductor() gets called 2 times only
# instead of 3. When NoFreeConductorWorker is raised the loop
# should be broken
expected = [mock.call(self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 2 times only instead of 3. When
# NoFreeConductorWorker is raised the loop should be broken
expected = [mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2
self.assertEqual(expected, acquire_mock.call_args_list)
# assert spawn_after has been called twice
expected = [mock.call(self.service._spawn_worker,
self.service._do_takeover, self.task)] * 2
self.assertEqual(expected, self.task.spawn_after.call_args_list)
def test_node_locked(self, get_nodeinfo_mock, mapped_mock, acquire_mock,):
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(
[self.task, exception.NodeLocked('error'), self.task])
self.task.spawn_after.side_effect = [None, None]
# 3 nodes to be checked
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node] * 3))
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# assert _mapped_to_this_conductor() gets called 3 times
expected = [mock.call(
self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)] * 3
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 3 times
expected = [mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 3
self.assertEqual(expected, acquire_mock.call_args_list)
# assert spawn_after has been called only 2 times
expected = [mock.call(self.service._spawn_worker,
self.service._do_takeover, self.task)] * 2
self.assertEqual(expected, self.task.spawn_after.call_args_list)
def test_worker_limit(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
# Limit to only 1 worker
self.config(periodic_max_workers=1, group='conductor')
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([self.task] * 3))
self.task.spawn_after.side_effect = [None] * 3
# 3 nodes to be checked
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node] * 3))
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# assert _mapped_to_this_conductor() gets called only once
# because of the worker limit
mapped_mock.assert_called_once_with(
self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)
# assert acquire() gets called only once because of the worker limit
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
self.task.spawn_after.assert_called_once_with(
self.service._spawn_worker,
self.service._do_takeover, self.task)
@mgr_utils.mock_record_keepalive
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_ok(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.INSPECTING,
driver_internal_info={'agent_url': 'url'})
task = task_manager.TaskManager(self.context, node.uuid)
mock_inspect.return_value = states.MANAGEABLE
manager._do_inspect_hardware(task)
node.refresh()
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_inspect.assert_called_once_with(task.driver.inspect, task)
task.node.refresh()
self.assertNotIn('agent_url', task.node.driver_internal_info)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_return_inspecting(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTING)
task = task_manager.TaskManager(self.context, node.uuid)
mock_inspect.return_value = states.INSPECTING
self.assertRaises(exception.HardwareInspectionFailure,
manager._do_inspect_hardware, task)
node.refresh()
self.assertIn('driver returned unexpected state', node.last_error)
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
mock_inspect.assert_called_once_with(task.driver.inspect, task)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTING)
task = task_manager.TaskManager(self.context, node.uuid)
mock_inspect.return_value = states.INSPECTWAIT
manager._do_inspect_hardware(task)
node.refresh()
self.assertEqual(states.INSPECTWAIT, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_inspect.assert_called_once_with(task.driver.inspect, task)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_return_other_state(self, mock_inspect, log_mock):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTING)
task = task_manager.TaskManager(self.context, node.uuid)
mock_inspect.return_value = None
self.assertRaises(exception.HardwareInspectionFailure,
manager._do_inspect_hardware, task)
node.refresh()
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
mock_inspect.assert_called_once_with(task.driver.inspect, task)
self.assertTrue(log_mock.error.called)
def test__check_inspect_wait_timeouts(self):
self._start_service()
CONF.set_override('inspect_wait_timeout', 1, group='conductor')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.INSPECTWAIT,
target_provision_state=states.MANAGEABLE,
provision_updated_at=datetime.datetime(2000, 1, 1, 0, 0),
inspection_started_at=datetime.datetime(2000, 1, 1, 0, 0))
self.service._check_inspect_wait_timeouts(self.context)
self._stop_service()
node.refresh()
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_inspect_hardware_worker_pool_full(self, mock_spawn):
prv_state = states.MANAGEABLE
tgt_prv_state = states.NOSTATE
node = obj_utils.create_test_node(self.context,
provision_state=prv_state,
target_provision_state=tgt_prv_state,
last_error=None,
driver='fake-hardware')
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inspect_hardware,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self._stop_service()
node.refresh()
# Make sure things were rolled back
self.assertEqual(prv_state, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
def _test_inspect_hardware_validate_fail(self, mock_validate):
mock_validate.side_effect = exception.InvalidParameterValue(
'Fake error message')
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inspect_hardware,
self.context, node.uuid)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
mock_validate.side_effect = exception.MissingParameterValue(
'Fake error message')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.inspect_hardware,
self.context, node.uuid)
self.assertEqual(exception.MissingParameterValue, exc.exc_info[0])
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.validate',
autospec=True)
def test_inspect_hardware_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_inspect_hardware_power_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_raises_error(self, mock_inspect):
self._start_service()
mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
state = states.MANAGEABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTING,
target_provision_state=state)
task = task_manager.TaskManager(self.context, node.uuid)
self.assertRaisesRegex(exception.HardwareInspectionFailure, '^test$',
manager._do_inspect_hardware, task)
node.refresh()
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertEqual('test', node.last_error)
self.assertTrue(mock_inspect.called)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
def test_inspect_hardware_unexpected_error(self, mock_inspect):
self._start_service()
mock_inspect.side_effect = RuntimeError('x')
state = states.MANAGEABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.INSPECTING,
target_provision_state=state)
task = task_manager.TaskManager(self.context, node.uuid)
self.assertRaisesRegex(exception.HardwareInspectionFailure,
'Unexpected exception of type RuntimeError: x',
manager._do_inspect_hardware, task)
node.refresh()
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertEqual('Unexpected exception of type RuntimeError: x',
node.last_error)
self.assertTrue(mock_inspect.called)
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
super(ManagerCheckInspectWaitTimeoutsTestCase, self).setUp()
self.config(inspect_wait_timeout=300, group='conductor')
self.service = manager.ConductorManager('hostname', 'test-topic')
self.service.dbapi = self.dbapi
self.node = self._create_node(provision_state=states.INSPECTWAIT,
target_provision_state=states.MANAGEABLE)
self.task = self._create_task(node=self.node)
self.node2 = self._create_node(
provision_state=states.INSPECTWAIT,
target_provision_state=states.MANAGEABLE)
self.task2 = self._create_task(node=self.node2)
self.filters = {'reserved': False,
'maintenance': False,
'inspection_started_before': 300,
'provision_state': states.INSPECTWAIT}
self.columns = ['uuid', 'driver', 'conductor_group']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
sort_dir='asc', columns=self.columns, filters=self.filters,
sort_key='inspection_started_at')
def test__check_inspect_timeouts_not_mapped(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = False
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test__check_inspect_timeout(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(self.task)
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail', target_state=None)
def test__check_inspect_timeouts_acquire_node_disappears(self,
get_nodeinfo_mock,
mapped_mock,
acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeNotFound(node='fake')
# Exception eaten
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.process_event.called)
def test__check_inspect_timeouts_acquire_node_locked(self,
get_nodeinfo_mock,
mapped_mock,
acquire_mock):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = exception.NodeLocked(node='fake',
host='fake')
# Exception eaten
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.process_event.called)
def test__check_inspect_timeouts_no_acquire_after_lock(self,
get_nodeinfo_mock,
mapped_mock,
acquire_mock):
task = self._create_task(
node_attrs=dict(provision_state=states.AVAILABLE,
uuid=self.node.uuid))
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(task.process_event.called)
def test__check_inspect_timeouts_to_maintenance_after_lock(
self, get_nodeinfo_mock, mapped_mock, acquire_mock):
task = self._create_task(
node_attrs=dict(provision_state=states.INSPECTWAIT,
target_provision_state=states.MANAGEABLE,
maintenance=True,
uuid=self.node.uuid))
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([task.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([task, self.task2]))
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
self.assertEqual([mock.call(self.service,
self.node.uuid, task.node.driver,
task.node.conductor_group),
mock.call(self.service,
self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
mock.call(self.context, self.node2.uuid,
purpose=mock.ANY)],
acquire_mock.call_args_list)
# First node skipped
self.assertFalse(task.process_event.called)
# Second node spawned
self.task2.process_event.assert_called_with('fail', target_state=None)
def test__check_inspect_timeouts_exiting_no_worker_avail(
self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(
[(self.task, exception.NoFreeConductorWorker()), self.task2])
# Exception should be nuked
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail', target_state=None)
def test__check_inspect_timeouts_exit_with_other_exception(
self, get_nodeinfo_mock, mapped_mock, acquire_mock):
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node, self.node2]))
mapped_mock.return_value = True
acquire_mock.side_effect = self._get_acquire_side_effect(
[(self.task, exception.IronicException('foo')), self.task2])
# Should re-raise
self.assertRaises(exception.IronicException,
self.service._check_inspect_wait_timeouts,
self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
mapped_mock.assert_called_once_with(self.service,
self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail', target_state=None)
def test__check_inspect_timeouts_worker_limit(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
self.config(periodic_max_workers=2, group='conductor')
# Use the same nodes/tasks to make life easier in the tests
# here
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response([self.node] * 3))
mapped_mock.return_value = True
acquire_mock.side_effect = (
self._get_acquire_side_effect([self.task] * 3))
self.service._check_inspect_wait_timeouts(self.context)
# Should only have ran 2.
self.assertEqual([mock.call(self.service,
self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
acquire_mock.call_args_list)
process_event_call = mock.call('fail', target_state=None)
self.assertEqual([process_event_call] * 2,
self.task.process_event.call_args_list)
@mgr_utils.mock_record_keepalive
class DestroyPortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_destroy_port(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context,
node_id=node.id)
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound, port.refresh)
def test_destroy_port_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
port = obj_utils.create_test_port(self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_port,
self.context, port)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
def test_destroy_port_node_active_state(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='active')
port = obj_utils.create_test_port(
self.context,
node_id=node.id,
internal_info={'tenant_vif_port_id': 'foo'})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_port,
self.context, port)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
def test_destroy_port_node_active_and_maintenance_vif_present(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='active',
maintenance=True)
port = obj_utils.create_test_port(
self.context,
node_id=node.id,
internal_info={'tenant_vif_port_id': 'fake-id'})
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound, port.refresh)
def test_destroy_port_node_active_and_maintenance_no_vif(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='active',
maintenance=True)
port = obj_utils.create_test_port(self.context,
node_id=node.id)
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_uuid,
port.uuid)
def test_destroy_port_with_instance_not_in_active_port_unbound(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='deploy failed')
port = obj_utils.create_test_port(self.context,
node_id=node.id)
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_uuid,
port.uuid)
def test_destroy_port_with_instance_not_in_active_port_bound(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='deploy failed')
port = obj_utils.create_test_port(
self.context,
node_id=node.id,
internal_info={'tenant_vif_port_id': 'foo'})
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_port,
self.context, port)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
def test_destroy_port_node_active_port_unbound(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='active')
port = obj_utils.create_test_port(self.context,
node_id=node.id)
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_uuid,
port.uuid)
@mgr_utils.mock_record_keepalive
class DestroyPortgroupTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_destroy_portgroup(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id)
self.service.destroy_portgroup(self.context, portgroup)
self.assertRaises(exception.PortgroupNotFound, portgroup.refresh)
def test_destroy_portgroup_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_portgroup,
self.context, portgroup)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@mock.patch.object(dbapi.IMPL, 'get_offline_conductors', autospec=True)
class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def setUp(self):
super(ManagerCheckOrphanNodesTestCase, self).setUp()
self._start_service()
self.node = obj_utils.create_test_node(
self.context, id=1, uuid=uuidutils.generate_uuid(),
driver='fake-hardware', provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
target_power_state=states.POWER_ON,
reservation='fake-conductor')
# create a second node in a different state to test the
# filtering nodes in DEPLOYING state
obj_utils.create_test_node(
self.context, id=10, uuid=uuidutils.generate_uuid(),
driver='fake-hardware', provision_state=states.AVAILABLE,
target_provision_state=states.NOSTATE)
def test__check_orphan_nodes(self, mock_off_cond, mock_mapped,
mock_fail_if):
mock_off_cond.return_value = ['fake-conductor']
self.service._check_orphan_nodes(self.context)
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(
self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
callback_method=conductor_utils.abort_on_conductor_take_over,
err_handler=conductor_utils.provisioning_error_handler)
# assert node was released
self.assertIsNone(self.node.reservation)
self.assertIsNone(self.node.target_power_state)
self.assertIsNotNone(self.node.last_error)
def test__check_orphan_nodes_cleaning(self, mock_off_cond, mock_mapped,
mock_fail_if):
self.node.provision_state = states.CLEANING
self.node.save()
mock_off_cond.return_value = ['fake-conductor']
self.service._check_orphan_nodes(self.context)
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(
self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
callback_method=conductor_utils.abort_on_conductor_take_over,
err_handler=conductor_utils.provisioning_error_handler)
# assert node was released
self.assertIsNone(self.node.reservation)
self.assertIsNone(self.node.target_power_state)
self.assertIsNotNone(self.node.last_error)
def test__check_orphan_nodes_alive(self, mock_off_cond,
mock_mapped, mock_fail_if):
mock_off_cond.return_value = []
self.service._check_orphan_nodes(self.context)
self.node.refresh()
mock_off_cond.assert_called_once_with()
self.assertFalse(mock_mapped.called)
self.assertFalse(mock_fail_if.called)
# assert node still locked
self.assertIsNotNone(self.node.reservation)
@mock.patch.object(objects.Node, 'release', autospec=True)
def test__check_orphan_nodes_release_exceptions_skipping(
self, mock_release, mock_off_cond, mock_mapped, mock_fail_if):
mock_off_cond.return_value = ['fake-conductor']
# Add another node so we can check both exceptions
node2 = obj_utils.create_test_node(
self.context, id=2, uuid=uuidutils.generate_uuid(),
driver='fake-hardware', provision_state=states.DEPLOYING,
target_provision_state=states.DEPLOYDONE,
reservation='fake-conductor')
mock_mapped.return_value = True
mock_release.side_effect = [exception.NodeNotFound('not found'),
exception.NodeLocked('locked')]
self.service._check_orphan_nodes(self.context)
self.node.refresh()
mock_off_cond.assert_called_once_with()
expected_calls = [
mock.call(self.service, self.node.uuid, 'fake-hardware', ''),
mock.call(self.service, node2.uuid, 'fake-hardware', '')
]
mock_mapped.assert_has_calls(expected_calls)
# Assert we skipped and didn't try to call _fail_if_in_state
self.assertFalse(mock_fail_if.called)
def test__check_orphan_nodes_release_node_not_locked(
self, mock_off_cond, mock_mapped, mock_fail_if):
# this simulates releasing the node elsewhere
count = [0]
def _fake_release(*args, **kwargs):
self.node.reservation = None
self.node.save()
# raise an exception only the first time release is called
count[0] += 1
if count[0] == 1:
raise exception.NodeNotLocked('not locked')
mock_off_cond.return_value = ['fake-conductor']
mock_mapped.return_value = True
with mock.patch.object(objects.Node, 'release',
side_effect=_fake_release,
autospec=True) as mock_release:
self.service._check_orphan_nodes(self.context)
mock_release.assert_called_with(self.context, mock.ANY,
self.node.id)
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(
self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
callback_method=conductor_utils.abort_on_conductor_take_over,
err_handler=conductor_utils.provisioning_error_handler)
def test__check_orphan_nodes_maintenance(self, mock_off_cond, mock_mapped,
mock_fail_if):
self.node.maintenance = True
self.node.save()
mock_off_cond.return_value = ['fake-conductor']
self.service._check_orphan_nodes(self.context)
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(
self.service, self.node.uuid, 'fake-hardware', '')
# assert node was released
self.assertIsNone(self.node.reservation)
# not changing states in maintenance
self.assertFalse(mock_fail_if.called)
self.assertIsNotNone(self.node.target_power_state)
class TestIndirectionApiConductor(db_base.DbTestCase):
def setUp(self):
super(TestIndirectionApiConductor, self).setUp()
self.conductor = manager.ConductorManager('test-host', 'test-topic')
def _test_object_action(self, is_classmethod, raise_exception,
return_object=False):
@obj_base.IronicObjectRegistry.register
class TestObject(obj_base.IronicObject):
context = self.context
def foo(self, context, raise_exception=False, return_object=False):
if raise_exception:
raise Exception('test')
elif return_object:
return obj
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False, return_object=False):
if raise_exception:
raise Exception('test')
elif return_object:
return obj
else:
return 'test'
obj = TestObject(self.context)
if is_classmethod:
versions = ovo_base.obj_tree_get_versions(TestObject.obj_name())
result = self.conductor.object_class_action_versions(
self.context, TestObject.obj_name(), 'bar', versions,
tuple(), {'raise_exception': raise_exception,
'return_object': return_object})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception,
'return_object': return_object})
if return_object:
self.assertEqual(obj, result)
else:
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_action_on_object(self):
self._test_object_action(False, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_class_action_on_object(self):
self._test_object_action(True, False, False)
def test_object_action_copies_object(self):
@obj_base.IronicObjectRegistry.register
class TestObject(obj_base.IronicObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject(self.context)
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_object_backport_versions(self):
fake_backported_obj = 'fake-backported-obj'
obj_name = 'fake-obj'
test_obj = mock.Mock()
test_obj.obj_name.return_value = obj_name
test_obj.obj_to_primitive.return_value = fake_backported_obj
fake_version_manifest = {obj_name: '1.0'}
result = self.conductor.object_backport_versions(
self.context, test_obj, fake_version_manifest)
self.assertEqual(result, fake_backported_obj)
test_obj.obj_to_primitive.assert_called_once_with(
target_version='1.0', version_manifest=fake_version_manifest)
@mgr_utils.mock_record_keepalive
class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_takeover(self, mock_prepare, mock_take_over,
mock_start_console):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_takeover(task)
node.refresh()
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_takeover_with_console_enabled(self, mock_prepare,
mock_take_over,
mock_start_console,
mock_notify):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_takeover(task)
node.refresh()
self.assertIsNone(node.last_error)
self.assertTrue(node.console_enabled)
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.END)])
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_takeover_with_console_exception(self, mock_prepare,
mock_take_over,
mock_start_console,
mock_notify):
self._start_service()
mock_start_console.side_effect = Exception()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_takeover(task)
node.refresh()
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_takeover_with_console_port_cleaned(self, mock_prepare,
mock_take_over,
mock_start_console,
mock_notify):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
di_info = node.driver_internal_info
di_info['allocated_ipmi_terminal_port'] = 12345
node.driver_internal_info = di_info
node.save()
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_takeover(task)
node.refresh()
self.assertIsNone(node.last_error)
self.assertTrue(node.console_enabled)
self.assertIsNone(
node.driver_internal_info.get('allocated_ipmi_terminal_port',
None))
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.END)])
@mgr_utils.mock_record_keepalive
class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def _fake_spawn(self, conductor_obj, func, *args, **kwargs):
func(*args, **kwargs)
return mock.MagicMock()
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_adoption_with_takeover(self,
mock_prepare,
mock_take_over,
mock_start_console,
mock_boot_validate,
mock_power_validate):
"""Test a successful node adoption"""
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ADOPTING)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_adoption_take_over_failure(self,
mock_prepare,
mock_take_over,
mock_start_console,
mock_boot_validate):
"""Test that adoption failed if an exception is raised"""
# Note(TheJulia): Use of an actual possible exception that
# can be raised due to a misconfiguration.
mock_take_over.side_effect = exception.IPMIFailure(
"something went wrong")
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ADOPTING,
power_state=states.POWER_ON)
# NOTE(TheJulia): When nodes are created for adoption, they
# would have no power state. Under normal circumstances
# during validate the node object is updated with power state
# however we need to make sure that we wipe preserved state
# as part of failure handling.
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
node.refresh()
self.assertEqual(states.ADOPTFAIL, node.provision_state)
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
mock_prepare.assert_called_once_with(task.driver.deploy, task)
mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
self.assertEqual(states.NOSTATE, node.power_state)
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
autospec=True)
def test__do_adoption_boot_validate_failure(self,
mock_prepare,
mock_take_over,
mock_start_console,
mock_boot_validate):
"""Test that adoption fails if the boot validation fails"""
# Note(TheJulia): Use of an actual possible exception that
# can be raised due to a misconfiguration.
mock_boot_validate.side_effect = exception.MissingParameterValue(
"something is missing")
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ADOPTING)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
node.refresh()
self.assertEqual(states.ADOPTFAIL, node.provision_state)
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
self.assertFalse(mock_prepare.called)
self.assertFalse(mock_take_over.called)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provisioning_action_adopt_node(self, mock_spawn):
"""Test an adoption request results in the node in ADOPTING"""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.MANAGEABLE,
target_provision_state=states.NOSTATE)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'adopt')
node.refresh()
self.assertEqual(states.ADOPTING, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(self.service,
self.service._do_adoption, mock.ANY)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_do_provisioning_action_adopt_node_retry(self, mock_spawn):
"""Test a retried adoption from ADOPTFAIL results in ADOPTING state"""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ADOPTFAIL,
target_provision_state=states.ACTIVE)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'adopt')
node.refresh()
self.assertEqual(states.ADOPTING, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(self.service,
self.service._do_adoption, mock.ANY)
def test_do_provisioning_action_manage_of_failed_adoption(self):
"""Test a node in ADOPTFAIL can be taken to MANAGEABLE"""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.ADOPTFAIL,
target_provision_state=states.ACTIVE)
self._start_service()
self.service.do_provisioning_action(self.context, node.uuid, 'manage')
node.refresh()
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNone(node.last_error)
# TODO(TheJulia): We should double check if these heartbeat tests need
# to move. I have this strange feeling we were lacking rpc testing of
# heartbeat until we did adoption testing....
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_without_version(self, mock_spawn, mock_heartbeat):
"""Test heartbeating."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'magic'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
self.service.heartbeat(self.context, node.uuid, 'http://callback',
agent_token='magic')
mock_heartbeat.assert_called_with(mock.ANY, mock.ANY,
'http://callback', '3.0.0', None)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_with_agent_version(self, mock_spawn, mock_heartbeat):
"""Test heartbeating."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'magic'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
self.service.heartbeat(self.context, node.uuid, 'http://callback',
'1.4.1', agent_token='magic')
mock_heartbeat.assert_called_with(mock.ANY, mock.ANY,
'http://callback', '1.4.1', None)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_with_no_required_agent_token(self, mock_spawn,
mock_heartbeat):
"""Tests that we kill the heartbeat attempt very early on."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE)
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
exc = self.assertRaises(
messaging.rpc.ExpectedException, self.service.heartbeat,
self.context, node.uuid, 'http://callback', agent_token=None)
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_with_required_agent_token(self, mock_spawn,
mock_heartbeat):
"""Test heartbeat works when token matches."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
self.service.heartbeat(self.context, node.uuid, 'http://callback',
agent_token='a secret')
mock_heartbeat.assert_called_with(mock.ANY, mock.ANY,
'http://callback', '3.0.0', None)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_with_agent_token(self, mock_spawn,
mock_heartbeat):
"""Test heartbeat works when token matches."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
self.service.heartbeat(self.context, node.uuid, 'http://callback',
agent_token='a secret')
mock_heartbeat.assert_called_with(mock.ANY, mock.ANY,
'http://callback', '3.0.0', None)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_invalid_agent_token(self, mock_spawn,
mock_heartbeat):
"""Heartbeat fails when it does not match."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.heartbeat, self.context,
node.uuid, 'http://callback',
agent_token='evil', agent_version='5.0.0b23')
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_invalid_agent_token_older_version(
self, mock_spawn, mock_heartbeat):
"""Heartbeat is rejected if token is received that is invalid."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
# Intentionally sending an older client in case something fishy
# occurs.
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.heartbeat, self.context,
node.uuid, 'http://callback',
agent_token='evil', agent_version='4.0.0')
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_invalid_newer_version(
self, mock_spawn, mock_heartbeat):
"""Heartbeat rejected if client should be sending a token."""
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE)
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.heartbeat, self.context,
node.uuid, 'http://callback',
agent_token=None, agent_version='6.1.5')
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_tls_required(self, mock_spawn, mock_heartbeat):
"""Heartbeat fails when it does not match."""
self.config(require_tls=True, group='agent')
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.heartbeat, self.context,
node.uuid, 'http://callback',
agent_token='a secret')
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertIn('TLS is required', str(exc.exc_info[1]))
self.assertFalse(mock_heartbeat.called)
@mock.patch.object(conductor_utils, 'store_agent_certificate',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_heartbeat_with_agent_verify_ca(self, mock_spawn,
mock_heartbeat,
mock_store_cert):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
provision_state=states.DEPLOYING,
target_provision_state=states.ACTIVE,
driver_internal_info={'agent_secret_token': 'a secret'})
mock_store_cert.return_value = '/path/to/crt'
self._start_service()
mock_spawn.reset_mock()
mock_spawn.side_effect = self._fake_spawn
self.service.heartbeat(self.context, node.uuid, 'http://callback',
agent_token='a secret', agent_verify_ca='abcd')
mock_heartbeat.assert_called_with(
mock.ANY, mock.ANY, 'http://callback', '3.0.0',
'/path/to/crt')
@mgr_utils.mock_record_keepalive
class DestroyVolumeConnectorTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_destroy_volume_connector(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
self.service.destroy_volume_connector(self.context, volume_connector)
self.assertRaises(exception.VolumeConnectorNotFound,
volume_connector.refresh)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_uuid,
volume_connector.uuid)
def test_destroy_volume_connector_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_connector,
self.context, volume_connector)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
def test_destroy_volume_connector_node_power_on(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_ON)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_connector,
self.context, volume_connector)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class UpdateVolumeConnectorTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_update_volume_connector(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id, extra={'foo': 'bar'})
new_extra = {'foo': 'baz'}
volume_connector.extra = new_extra
res = self.service.update_volume_connector(self.context,
volume_connector)
self.assertEqual(new_extra, res.extra)
def test_update_volume_connector_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
volume_connector.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_connector,
self.context, volume_connector)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
def test_update_volume_connector_type(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id, extra={'vol_id': 'fake-id'})
new_type = 'wwnn'
volume_connector.type = new_type
res = self.service.update_volume_connector(self.context,
volume_connector)
self.assertEqual(new_type, res.type)
def test_update_volume_connector_uuid(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
volume_connector.uuid = uuidutils.generate_uuid()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_connector,
self.context, volume_connector)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
def test_update_volume_connector_duplicate(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_connector1 = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
volume_connector2 = obj_utils.create_test_volume_connector(
self.context, node_id=node.id, uuid=uuidutils.generate_uuid(),
type='diff_type')
volume_connector2.type = volume_connector1.type
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_connector,
self.context, volume_connector2)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.VolumeConnectorTypeAndIdAlreadyExists,
exc.exc_info[0])
def test_update_volume_connector_node_power_on(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_ON)
volume_connector = obj_utils.create_test_volume_connector(
self.context, node_id=node.id)
volume_connector.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_connector,
self.context, volume_connector)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class DestroyVolumeTargetTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_destroy_volume_target(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
self.service.destroy_volume_target(self.context, volume_target)
self.assertRaises(exception.VolumeTargetNotFound,
volume_target.refresh)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_uuid,
volume_target.uuid)
def test_destroy_volume_target_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
def test_destroy_volume_target_node_gone(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
self.service.destroy_node(self.context, node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeNotFound, exc.exc_info[0])
def test_destroy_volume_target_already_destroyed(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
self.service.destroy_volume_target(self.context, volume_target)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.VolumeTargetNotFound, exc.exc_info[0])
def test_destroy_volume_target_node_power_on(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_ON)
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.destroy_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class UpdateVolumeTargetTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_update_volume_target(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(
self.context, node_id=node.id, extra={'foo': 'bar'})
new_extra = {'foo': 'baz'}
volume_target.extra = new_extra
res = self.service.update_volume_target(self.context, volume_target)
self.assertEqual(new_extra, res.extra)
def test_update_volume_target_node_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
volume_target.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
def test_update_volume_target_volume_type(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(
self.context, node_id=node.id, extra={'vol_id': 'fake-id'})
new_volume_type = 'fibre_channel'
volume_target.volume_type = new_volume_type
res = self.service.update_volume_target(self.context,
volume_target)
self.assertEqual(new_volume_type, res.volume_type)
def test_update_volume_target_uuid(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(
self.context, node_id=node.id)
volume_target.uuid = uuidutils.generate_uuid()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
def test_update_volume_target_duplicate(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target1 = obj_utils.create_test_volume_target(
self.context, node_id=node.id)
volume_target2 = obj_utils.create_test_volume_target(
self.context, node_id=node.id, uuid=uuidutils.generate_uuid(),
boot_index=volume_target1.boot_index + 1)
volume_target2.boot_index = volume_target1.boot_index
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
self.context, volume_target2)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.VolumeTargetBootIndexAlreadyExists,
exc.exc_info[0])
def _test_update_volume_target_exception(self, expected_exc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_OFF)
volume_target = obj_utils.create_test_volume_target(
self.context, node_id=node.id, extra={'vol_id': 'fake-id'})
new_volume_type = 'fibre_channel'
volume_target.volume_type = new_volume_type
with mock.patch.object(objects.VolumeTarget, 'save',
autospec=True) as mock_save:
mock_save.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(expected_exc, exc.exc_info[0])
def test_update_volume_target_node_not_found(self):
self._test_update_volume_target_exception(exception.NodeNotFound)
def test_update_volume_target_not_found(self):
self._test_update_volume_target_exception(
exception.VolumeTargetNotFound)
def test_update_volume_target_node_power_on(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
power_state=states.POWER_ON)
volume_target = obj_utils.create_test_volume_target(self.context,
node_id=node.id)
volume_target.extra = {'foo': 'baz'}
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
self.context, volume_target)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
@mgr_utils.mock_record_keepalive
class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(NodeTraitsTestCase, self).setUp()
self.traits = ['trait1', 'trait2']
self.node = obj_utils.create_test_node(self.context,
driver='fake-hardware')
def test_add_node_traits(self):
self.service.add_node_traits(self.context, self.node.id,
self.traits[:1])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits[:1], [trait.trait for trait in traits])
self.service.add_node_traits(self.context, self.node.id,
self.traits[1:])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits, [trait.trait for trait in traits])
def test_add_node_traits_replace(self):
self.service.add_node_traits(self.context, self.node.id,
self.traits[:1], replace=True)
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits[:1], [trait.trait for trait in traits])
self.service.add_node_traits(self.context, self.node.id,
self.traits[1:], replace=True)
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits[1:], [trait.trait for trait in traits])
def _test_add_node_traits_exception(self, expected_exc):
with mock.patch.object(objects.Trait, 'create',
autospec=True) as mock_create:
mock_create.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.add_node_traits, self.context,
self.node.id, self.traits)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(expected_exc, exc.exc_info[0])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual([], traits.objects)
def test_add_node_traits_invalid_parameter_value(self):
self._test_add_node_traits_exception(exception.InvalidParameterValue)
def test_add_node_traits_node_locked(self):
self._test_add_node_traits_exception(exception.NodeLocked)
def test_add_node_traits_node_not_found(self):
self._test_add_node_traits_exception(exception.NodeNotFound)
def test_remove_node_traits(self):
objects.TraitList.create(self.context, self.node.id, self.traits)
self.service.remove_node_traits(self.context, self.node.id,
self.traits[:1])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits[1:], [trait.trait for trait in traits])
self.service.remove_node_traits(self.context, self.node.id,
self.traits[1:])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual([], traits.objects)
def test_remove_node_traits_all(self):
objects.TraitList.create(self.context, self.node.id, self.traits)
self.service.remove_node_traits(self.context, self.node.id, None)
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual([], traits.objects)
def test_remove_node_traits_empty(self):
objects.TraitList.create(self.context, self.node.id, self.traits)
self.service.remove_node_traits(self.context, self.node.id, [])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits, [trait.trait for trait in traits])
def _test_remove_node_traits_exception(self, expected_exc):
objects.TraitList.create(self.context, self.node.id, self.traits)
with mock.patch.object(objects.Trait, 'destroy',
autospec=True) as mock_destroy:
mock_destroy.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.remove_node_traits,
self.context, self.node.id, self.traits)
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(expected_exc, exc.exc_info[0])
traits = objects.TraitList.get_by_node_id(self.context, self.node.id)
self.assertEqual(self.traits, [trait.trait for trait in traits])
def test_remove_node_traits_node_locked(self):
self._test_remove_node_traits_exception(exception.NodeLocked)
def test_remove_node_traits_node_not_found(self):
self._test_remove_node_traits_exception(exception.NodeNotFound)
def test_remove_node_traits_node_trait_not_found(self):
self._test_remove_node_traits_exception(exception.NodeTraitNotFound)
@mgr_utils.mock_record_keepalive
class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_not_support(self, mock_acquire,
mock_abort, mock_log):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.INSPECTWAIT)
task = task_manager.TaskManager(self.context, node.uuid)
mock_acquire.side_effect = self._get_acquire_side_effect(task)
mock_abort.side_effect = exception.UnsupportedDriverExtension(
driver='fake-hardware', extension='inspect')
self._start_service()
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.do_provisioning_action,
self.context, task.node.uuid,
"abort")
self.assertEqual(exception.UnsupportedDriverExtension,
exc.exc_info[0])
self.assertTrue(mock_log.error.called)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_return_failed(self, mock_acquire,
mock_abort, mock_log):
mock_abort.side_effect = exception.IronicException('Oops')
self._start_service()
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.INSPECTWAIT)
task = task_manager.TaskManager(self.context, node.uuid)
mock_acquire.side_effect = self._get_acquire_side_effect(task)
self.assertRaises(exception.IronicException,
self.service.do_provisioning_action,
self.context, task.node.uuid,
"abort")
node.refresh()
self.assertTrue(mock_log.exception.called)
self.assertIn('Failed to abort inspection.', node.last_error)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_succeeded(self, mock_acquire, mock_abort):
self._start_service()
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.INSPECTWAIT)
task = task_manager.TaskManager(self.context, node.uuid)
mock_acquire.side_effect = self._get_acquire_side_effect(task)
self.service.do_provisioning_action(self.context, task.node.uuid,
"abort")
node.refresh()
self.assertEqual('inspect failed', node.provision_state)
self.assertIn('Inspection was aborted', node.last_error)
| 49.631752
| 79
| 0.624198
|
6fcd069611509b5db5f192819ff45008a88547c8
| 5,039
|
py
|
Python
|
application/__init__.py
|
By-Lucas/Sistema-de-tickets-flask
|
20f8062f16bc264d6784cfd0498c717861a02f37
|
[
"MIT"
] | 95
|
2017-10-31T21:25:11.000Z
|
2022-03-07T05:46:12.000Z
|
application/__init__.py
|
By-Lucas/Sistema-de-tickets-flask
|
20f8062f16bc264d6784cfd0498c717861a02f37
|
[
"MIT"
] | 47
|
2017-05-15T10:52:23.000Z
|
2021-12-13T09:30:52.000Z
|
application/__init__.py
|
By-Lucas/Sistema-de-tickets-flask
|
20f8062f16bc264d6784cfd0498c717861a02f37
|
[
"MIT"
] | 54
|
2017-07-13T03:38:47.000Z
|
2022-02-12T20:10:02.000Z
|
#! usr/bin/python3
# -*- coding: utf8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
"""
Flicket
=======
A simple ticket system using Python and the Flask microframework.
This probably wouldn't have been created without the excellent tutorials written by Miguel Grinberg:
https://blog.miguelgrinberg.com. Many thanks kind sir.
"""
from flask import abort
from flask import Flask
from flask import g
from flask import request
from flask_login import LoginManager
from flask_mail import Mail
from flask_pagedown import PageDown
from flask_sqlalchemy import SQLAlchemy
from flask_babel import Babel
from flaskext.markdown import Markdown
from application.flicket_admin.views import admin_bp
from application.flicket_api.views import bp_api
from application.flicket_errors import bp_errors
from application.flicket.views import flicket_bp
from application.flicket.scripts.jinja2_functions import now_year
__version__ = '0.2.7'
app = Flask(__name__)
app.config.from_object('config.BaseConfiguration')
app.config.update(TEMPLATES_AUTO_RELOAD=True)
db = SQLAlchemy(app)
mail = Mail(app)
pagedown = PageDown(app)
babel = Babel(app)
Markdown(app)
# import jinja function
app.jinja_env.globals.update(now_year=now_year)
# import models so alembic can see them
# noinspection PyPep8
from application.flicket.models import flicket_user, flicket_models
# noinspection PyPep8
from application.flicket_admin.models import flicket_config
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'flicket_bp.login'
# noinspection PyPep8
from .flicket_admin.views import view_admin
# noinspection PyPep8
from .flicket_admin.views import view_config
# noinspection PyPep8
from .flicket_admin.views import view_email_test
# noinspection PyPep8
from .flicket.views import assign
# noinspection PyPep8
from .flicket.views import categories
# noinspection PyPep8
from .flicket.views import edit_status
# noinspection PyPep8
from .flicket.views import claim
# noinspection PyPep8
from .flicket.views import create
# noinspection PyPep8
from .flicket.views import delete
# noinspection PyPep8
from .flicket.views import departments
# noinspection PyPep8
from .flicket.views import edit
# noinspection PyPep8
from .flicket.views import history
# noinspection PyPep8
from .flicket.views import index
# noinspection PyPep8
from .flicket.views import login
# noinspection PyPep8
from .flicket.views import help
# noinspection PyPep8
from .flicket.views import tickets
# noinspection PyPep8
from .flicket.views import release
# noinspection PyPep8
from .flicket.views import render_uploads
# noinspection PyPep8
from .flicket.views import subscribe
# noinspection PyPep8
from .flicket.views import user_edit
# noinspection PyPep8
from .flicket.views import users
# noinspection PyPep8
from .flicket.views import view_ticket
# noinspection PyPep8
from .flicket.views import department_category
# noinspection PyPep8
from .flicket_api.views import actions
# noinspection PyPep8
from .flicket_api.views import categories
# noinspection PyPep8
from .flicket_api.views import departments
# noinspection PyPep8
from .flicket_api.views import histories
# noinspection PyPep8
from .flicket_api.views import posts
# noinspection PyPep8
from .flicket_api.views import priorities
# noinspection PyPep8
from .flicket_api.views import status
# noinspection PyPep8
from .flicket_api.views import subscriptions
# noinspection PyPep8
from .flicket_api.views import tickets
# noinspection PyPep8
from .flicket_api.views import tokens
# noinspection PyPep8
from .flicket_api.views import uploads
# noinspection PyPep8
from .flicket_api.views import users
from .flicket_api.views import department_categories
# noinspection PyPep8
from .flicket_errors import handlers
app.register_blueprint(admin_bp)
app.register_blueprint(flicket_bp)
app.register_blueprint(bp_api)
app.register_blueprint(bp_errors)
# prints url routes for debugging
# for rule in app.url_map.iter_rules():
# print(rule)
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
user = getattr(g, 'user', None)
if hasattr(user, 'locale'):
return user.locale
# otherwise try to guess the language from the user accept
# header the browser transmits. We support de/fr/en in this
# example. The best match wins.
return request.accept_languages.best_match(app.config['SUPPORTED_LANGUAGES'].keys())
@app.url_defaults
def set_language_code(endpoint, values):
if 'lang_code' in values or not g.get('lang_code', None):
return
if app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values['lang_code'] = g.lang_code
@app.url_value_preprocessor
def get_lang_code(endpoint, values):
if values is not None:
g.lang_code = values.pop('lang_code', None)
@app.before_request
def ensure_lang_support():
lang_code = g.get('lang_code', None)
if lang_code and lang_code not in app.config['SUPPORTED_LANGUAGES'].keys():
return abort(404)
| 27.839779
| 104
| 0.794602
|
1bd21a19fe2ab7c730bf485f2028489ad76c87a9
| 3,667
|
py
|
Python
|
pymeasure/instruments/thorlabs/thorlabspro8000.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | 4
|
2020-11-13T08:57:16.000Z
|
2021-09-16T12:45:33.000Z
|
pymeasure/instruments/thorlabs/thorlabspro8000.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | 2
|
2021-12-16T16:15:47.000Z
|
2022-03-27T10:47:13.000Z
|
pymeasure/instruments/thorlabs/thorlabspro8000.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | null | null | null |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument, discreteTruncate
from pymeasure.instruments.validators import strict_discrete_set, \
truncated_discrete_set, truncated_range
import numpy as np
import time
import re
class ThorlabsPro8000(Instrument):
"""Represents Thorlabs Pro 8000 modular laser driver"""
SLOTS = range(1,9)
LDC_POLARITIES = ['AG', 'CG']
STATUS = ['ON','OFF']
def __init__(self, resourceName, **kwargs):
super(ThorlabsPro8000, self).__init__(
resourceName,
"Thorlabs Pro 8000",
**kwargs
)
self.write(':SYST:ANSW VALUE')
# Code for general purpose commands (mother board related)
slot = Instrument.control(":SLOT?", ":SLOT %d",
"Slot selection. Allowed values are: {}""".format(SLOTS),
validator=strict_discrete_set,
values=SLOTS,
map_values=False)
# Code for LDC-xxxx daughter boards (laser driver)
LDCCurrent = Instrument.control(":ILD:SET?", ":ILD:SET %g",
"""Laser current.""")
LDCCurrentLimit = Instrument.control(":LIMC:SET?", ":LIMC:SET %g",
"""Set Software current Limit (value must be lower than hardware current limit).""")
LDCPolarity = Instrument.control(":LIMC:SET?", ":LIMC:SET %s",
"""Set laser diode polarity. Allowed values are: {}""".format(LDC_POLARITIES),
validator=strict_discrete_set,
values=LDC_POLARITIES,
map_values=False)
LDCStatus = Instrument.control(":LASER?", ":LASER %s",
"""Set laser diode status. Allowed values are: {}""".format(STATUS),
validator=strict_discrete_set,
values=STATUS,
map_values=False)
# Code for TED-xxxx daughter boards (TEC driver)
TEDStatus = Instrument.control(":TEC?", ":TEC %s",
"""Set TEC status. Allowed values are: {}""".format(STATUS),
validator=strict_discrete_set,
values=STATUS,
map_values=False)
TEDSetTemperature = Instrument.control(":TEMP:SET?", ":TEMP:SET %g",
"""Set TEC temperature""")
| 45.8375
| 116
| 0.605945
|
6d3946ff4a5b477dd6c3a5bb2c4964cde52f2a18
| 560
|
py
|
Python
|
tests/test_mapper.py
|
frictionlessdata/datapackage-pipelines-bigquery-driver
|
7ed9d002620619a819f73d97e03257dcc715c7a4
|
[
"MIT"
] | 10
|
2017-10-19T00:45:29.000Z
|
2021-11-04T20:19:49.000Z
|
tests/test_mapper.py
|
frictionlessdata/datapackage-pipelines-bigquery-driver
|
7ed9d002620619a819f73d97e03257dcc715c7a4
|
[
"MIT"
] | 22
|
2016-03-26T08:03:09.000Z
|
2017-03-21T17:45:11.000Z
|
tests/test_mapper.py
|
frictionlessdata/tableschema-bigquery-py
|
7ed9d002620619a819f73d97e03257dcc715c7a4
|
[
"MIT"
] | 2
|
2016-09-19T07:45:07.000Z
|
2017-04-08T00:24:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from tableschema_bigquery.mapper import Mapper
# Tests
def test_mapper_convert_bucket():
mapper = Mapper('prefix_')
assert mapper.convert_bucket('bucket') == 'prefix_bucket'
def test_mapper_restore_bucket():
mapper = Mapper('prefix_')
assert mapper.restore_bucket('prefix_bucket') == 'bucket'
assert mapper.restore_bucket('xxxxxx_bucket') == None
| 25.454545
| 61
| 0.769643
|
fe6e83ffc8ab415c085fdc73aa4f00c9a87a7097
| 35
|
py
|
Python
|
btd6_memory_info/generated/SteamNative/HTML_BrowserRestarted_t/html_browser_restarted_t.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/SteamNative/HTML_BrowserRestarted_t/html_browser_restarted_t.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/SteamNative/HTML_BrowserRestarted_t/html_browser_restarted_t.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class HTML_BrowserRestarted_t: pass
| 35
| 35
| 0.914286
|
cf8c07a6ce1fb80959d34328e5848a2b3a8b0fee
| 1,834
|
py
|
Python
|
ml_service/util/attach_compute.py
|
anilkumar-kanasani-diconium/MLOpsAzure
|
a3dfb0571b48c2d2d8742d6bf321a2ff2a6ae971
|
[
"MIT"
] | 1
|
2022-02-23T12:08:22.000Z
|
2022-02-23T12:08:22.000Z
|
ml_service/util/attach_compute.py
|
anilkumar-kanasani-diconium/MLOpsAzure
|
a3dfb0571b48c2d2d8742d6bf321a2ff2a6ae971
|
[
"MIT"
] | null | null | null |
ml_service/util/attach_compute.py
|
anilkumar-kanasani-diconium/MLOpsAzure
|
a3dfb0571b48c2d2d8742d6bf321a2ff2a6ae971
|
[
"MIT"
] | null | null | null |
import traceback
from azureml.core import Workspace
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.exceptions import ComputeTargetException
from ml_service.util.env_variables import Env
def get_compute(workspace: Workspace, compute_name: str, vm_size: str, for_batch_scoring: bool = False): # NOQA E501
try:
if compute_name in workspace.compute_targets:
compute_target = workspace.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found existing compute target " + compute_name + " so using it.") # NOQA
else:
e = Env()
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
vm_priority=e.vm_priority if not for_batch_scoring else e.vm_priority_scoring, # NOQA E501
min_nodes=e.min_nodes if not for_batch_scoring else e.min_nodes_scoring, # NOQA E501
max_nodes=e.max_nodes if not for_batch_scoring else e.max_nodes_scoring, # NOQA E501
idle_seconds_before_scaledown="300"
# #Uncomment the below lines for VNet support
# vnet_resourcegroup_name=vnet_resourcegroup_name,
# vnet_name=vnet_name,
# subnet_name=subnet_name
)
compute_target = ComputeTarget.create(
workspace, compute_name, compute_config
)
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=10
)
return compute_target
except ComputeTargetException:
traceback.print_exc()
print("An error occurred trying to provision compute.")
exit(1)
| 45.85
| 117
| 0.663032
|
f12a3f82af20d1a20966fcf6e7e558b033d66e1c
| 566
|
py
|
Python
|
hackerearth/Algorithms/Cyclic Permutations/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Cyclic Permutations/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Cyclic Permutations/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('sys.stdin.readline', side_effect=[
'2',
'101',
'101',
'111',
'111',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'1\n' +
'3\n')
if __name__ == '__main__':
unittest.main()
| 21.769231
| 46
| 0.55477
|
c649c7f7584165df7226f854fd4148048e5703fc
| 26,342
|
py
|
Python
|
gender_novels/analysis/gender_pronoun_freq_analysis.py
|
elenaboal/gender_novels
|
e14411e5ccc1fcead98169af02e6eb77a4a199e9
|
[
"BSD-3-Clause"
] | 19
|
2018-09-07T19:26:48.000Z
|
2022-02-10T14:10:19.000Z
|
gender_novels/analysis/gender_pronoun_freq_analysis.py
|
elenaboal/gender_novels
|
e14411e5ccc1fcead98169af02e6eb77a4a199e9
|
[
"BSD-3-Clause"
] | 111
|
2018-09-09T13:09:06.000Z
|
2020-12-04T00:26:50.000Z
|
gender_novels/analysis/gender_pronoun_freq_analysis.py
|
elenaboal/gender_novels
|
e14411e5ccc1fcead98169af02e6eb77a4a199e9
|
[
"BSD-3-Clause"
] | 37
|
2018-09-05T21:44:41.000Z
|
2020-12-01T17:19:02.000Z
|
from gender_novels.corpus import Corpus
from gender_novels.analysis.analysis import get_comparative_word_freq
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pprint
palette = "colorblind"
style_name = "white"
style_list = {'axes.edgecolor': '.6', 'grid.color': '.9', 'axes.grid': 'True',
'font.family': 'serif'}
sns.set_color_codes(palette)
sns.set_style(style_name, style_list)
from gender_novels import common
def books_pronoun_freq(corp):
'''
Counts male and female pronouns for every book and finds their relative frequencies per book
Outputs dictionary mapping novel object to the relative frequency
of female pronouns in that book
:param: Corpus object
:return: dictionary with data organized by groups
>>> books_pronoun_freq(Corpus('test_corpus'))
{<Novel (aanrud_longfrock)>: 0.7623169107856191, <Novel (abbott_flatlandromance)>: 0.14321608040201003, <Novel (abbott_indiscreetletter)>: 0.4166666666666667, <Novel (adams_fighting)>: 0.1898395721925134, <Novel (alcott_josboys)>: 0.42152086422368146, <Novel (alcott_littlemen)>: 0.3111248200699157, <Novel (alcott_littlewomen)>: 0.6196978175713487, <Novel (alden_chautauqua)>: 0.7518623169791935, <Novel (austen_emma)>: 0.5662100456621004, <Novel (austen_persuasion)>: 0.5305111461382571}
'''
try:
if (not corp.load_test_corpus):
relative_freq_male = common.load_pickle(f'{corp.corpus_name}_pronoun_freq_male')
relative_freq_female = common.load_pickle(f'{corp.corpus_name}_pronoun_freq_female')
return relative_freq_female
except IOError:
pass
relative_freq_male = {}
relative_freq_female = {}
for book in corp.novels:
he = book.get_word_freq('he')
him = book.get_word_freq('him')
his = book.get_word_freq('his')
male = he + him + his
she = book.get_word_freq('she')
her = book.get_word_freq('her')
hers = book.get_word_freq('hers')
female = she + her + hers
temp_dict = {'male': male, 'female': female}
temp_dict = get_comparative_word_freq(temp_dict)
relative_freq_male[book] = temp_dict['male']
relative_freq_female[book] = temp_dict['female']
book.text = ''
book._word_counts_counter = None
if (not corp.load_test_corpus):
common.store_pickle(relative_freq_male, f'{corp.corpus_name}_pronoun_freq_male')
common.store_pickle(relative_freq_female, f'{corp.corpus_name}_pronoun_freq_female')
return (relative_freq_female)
def subject_vs_object_pronoun_freqs(corp):
'''
Takes in a Corpus of novels
Returns a tuple of two dictionaries, one male and female
Each dictionary maps each Novel in the corpus to the proportion of the pronouns
of the specified gender in that novel that are subject pronouns
#TODO: add doctests
:param corp: Corpus
:return: tuple of two dictionaries (male, female)
>>> subject_vs_object_pronoun_freqs(Corpus('test_corpus'))
({<Novel (aanrud_longfrock)>: 0.793233082706767, <Novel (abbott_flatlandromance)>: 0.6741573033707865, <Novel (abbott_indiscreetletter)>: 0.7906976744186047, <Novel (adams_fighting)>: 0.7184527584020292, <Novel (alcott_josboys)>: 0.6330049261083744, <Novel (alcott_littlemen)>: 0.6451612903225807, <Novel (alcott_littlewomen)>: 0.6577563540753725, <Novel (alden_chautauqua)>: 0.7577030812324931, <Novel (austen_emma)>: 0.7086120401337792, <Novel (austen_persuasion)>: 0.6739130434782609}, {<Novel (aanrud_longfrock)>: 0.5376532399299474, <Novel (abbott_flatlandromance)>: 0.17543859649122806, <Novel (abbott_indiscreetletter)>: 0.4424242424242424, <Novel (adams_fighting)>: 0.43485915492957744, <Novel (alcott_josboys)>: 0.3862487360970678, <Novel (alcott_littlemen)>: 0.4343501326259947, <Novel (alcott_littlewomen)>: 0.4124569980083288, <Novel (alden_chautauqua)>: 0.5461432506887053, <Novel (austen_emma)>: 0.4836730221345606, <Novel (austen_persuasion)>: 0.4872013651877133})
'''
try:
if (not corp.load_test_corpus):
relative_freq_male_sub_v_ob = common.load_pickle(
f'{corp.corpus_name}_sub_v_ob_pronoun_freq_male')
relative_freq_female_sub_v_ob = common.load_pickle(
f'{corp.corpus_name}_sub_v_ob_pronoun_freq_female')
return (relative_freq_male_sub_v_ob, relative_freq_female_sub_v_ob)
except IOError:
pass
relative_freq_male_subject = {}
relative_freq_female_subject = {}
relative_freq_male_object = {}
relative_freq_female_object = {}
for book in corp.novels:
he = book.get_word_freq('he')
him = book.get_word_freq('him')
she = book.get_word_freq('she')
her = book.get_word_freq('her')
temp_dict_male = {'subject': he, 'object': him}
temp_dict_female = {'subject': she, 'object': her}
temp_dict_male = get_comparative_word_freq(temp_dict_male)
temp_dict_female = get_comparative_word_freq(temp_dict_female)
relative_freq_male_subject[book] = temp_dict_male['subject']
relative_freq_female_subject[book] = temp_dict_female['subject']
relative_freq_male_object[book] = temp_dict_male['object']
relative_freq_female_object[book] = temp_dict_female['object']
book.text = ''
book._word_counts_counter = None
if (not corp.load_test_corpus):
common.store_pickle(relative_freq_male_subject,
f'{corp.corpus_name}_sub_v_ob_pronoun_freq_male')
common.store_pickle(relative_freq_female_subject,
f'{corp.corpus_name}_sub_v_ob_pronoun_freq_female')
result_tuple = (relative_freq_male_subject, relative_freq_female_subject)
return result_tuple
def subject_pronouns_gender_comparison(corp, subject_gender):
'''
Takes in a Corpus of novels and a gender.
The gender determines whether the male frequency or female frequency will
be returned
Returns a dictionary of each novel in the Corpus mapped to the portion of
the subject pronouns in the book that are of the specified gender
:param corp: Corpus
:param subject_gender: string 'male' or string 'female'
:return: dictionary
>>> subject_pronouns_gender_comparison(Corpus('test_corpus'), 'male')
{<Novel (aanrud_longfrock)>: 0.2557575757575758, <Novel (abbott_flatlandromance)>: 0.923076923076923, <Novel (abbott_indiscreetletter)>: 0.582857142857143, <Novel (adams_fighting)>: 0.8210144927536231, <Novel (alcott_josboys)>: 0.5736607142857142, <Novel (alcott_littlemen)>: 0.6812652068126521, <Novel (alcott_littlewomen)>: 0.39719502513892563, <Novel (alden_chautauqua)>: 0.2543488481429243, <Novel (austen_emma)>: 0.4343926191696566, <Novel (austen_persuasion)>: 0.45696623870660963}
>>> subject_pronouns_gender_comparison(Corpus('test_corpus'), 'female')
{<Novel (aanrud_longfrock)>: 0.7442424242424243, <Novel (abbott_flatlandromance)>: 0.07692307692307691, <Novel (abbott_indiscreetletter)>: 0.4171428571428572, <Novel (adams_fighting)>: 0.17898550724637682, <Novel (alcott_josboys)>: 0.4263392857142857, <Novel (alcott_littlemen)>: 0.31873479318734793, <Novel (alcott_littlewomen)>: 0.6028049748610743, <Novel (alden_chautauqua)>: 0.7456511518570758, <Novel (austen_emma)>: 0.5656073808303435, <Novel (austen_persuasion)>: 0.5430337612933904}
'''
if not(subject_gender == 'male' or subject_gender == 'female'):
raise ValueError('subject_gender must be \'male\' or \'female\'')
try:
if (not corp.load_test_corpus):
relative_freq_male_subject = common.load_pickle(
f'{corp.corpus_name}_subject_pronoun_freq_male')
relative_freq_female_subject = common.load_pickle(
f'{corp.corpus_name}_subject_pronoun_freq_female')
if subject_gender == 'male':
return relative_freq_male_subject
else:
return relative_freq_female_subject
except IOError:
pass
relative_freq_female_sub = {}
relative_freq_male_sub = {}
for book in corp.novels:
he = book.get_word_freq('he')
she = book.get_word_freq('she')
relative_freq_female_sub[book] = (she)/(he+she)
relative_freq_male_sub[book] = (he)/(he+she)
book.text = ''
book._word_counts_counter = None
if (not corp.load_test_corpus):
common.store_pickle(relative_freq_female_sub,
f'{corp.corpus_name}_subject_pronoun_freq_female')
common.store_pickle(relative_freq_male_sub, f'{corp.corpus_name}_subject_pronoun_freq_male')
if subject_gender == 'male':
return relative_freq_male_sub
elif subject_gender == 'female':
return relative_freq_female_sub
else:
raise ValueError('subject_gender must be \'male\' or \'female\'')
def dict_to_list(d):
'''
Takes in a dictionary and returns a list of the values in the dictionary
If there are repeats in the values, there will be repeats in the list
:param d: dictionary
:return: list of values in the dictionary
>>> d = {'a': 1, 'b': 'bee', 'c': 65}
>>> dict_to_list(d)
[1, 'bee', 65]
>>> d2 = {}
>>> dict_to_list(d2)
[]
'''
L = []
for key, value in d.items():
L.append(value)
return L
def freq_by_author_gender(d):
'''
Takes in a dictionary of novel objects mapped to relative frequencies
(output of above function)
Returns a dictionary with frequencies binned by author gender into lists
List name is mapped to the list of frequencies
list names key:
male_author - male authors
female_author- female authors
:param d: dictionary
:return: dictionary
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Brontë, Anne', 'title': 'The Tenant of Wildfell Hall',
... 'corpus_name': 'sample_novels', 'date': '1848', 'author_gender':'female',
... 'filename': 'bronte_wildfell.txt'}
>>> bronte = novel.Novel(novel_metadata)
>>> novel_metadata = {'author': 'Adams, William Taylor', 'title': 'Fighting for the Right',
... 'corpus_name': 'sample_novels', 'date': '1892', 'author_gender':'male',
... 'filename': 'adams_fighting.txt'}
>>> fighting = novel.Novel(novel_metadata)
>>> d = {}
>>> d[fighting] = 0.3
>>> d[bronte] = 0.6
>>> freq_by_author_gender(d)
{'Male Author': [0.3], 'Female Author': [0.6]}
'''
male_author = []
female_author = []
data = {}
for k, v in d.items():
if k.author_gender == 'male':
male_author.append(v)
if k.author_gender == 'female':
female_author.append(v)
data['Male Author'] = male_author
data['Female Author'] = female_author
return data
def freq_by_date(d):
'''
Takes in a dictionary of novel objects mapped to relative frequencies
(output of above function)
Returns a dictionary with frequencies binned by decades into lists
List name is mapped to the list of frequencies
list names key:
date_to_1810 - publication dates before and not including 1810
date_x_to_y (by decade) - publication dates from x to y
Example: date_1810_to_1819 - publication dates from 1810 to 1819
date_1900_on - publication dates in 1900 and onward
:param d: dictionary
:return: dictionary
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Austen, Jane', 'title': 'Persuasion',
... 'corpus_name': 'sample_novels', 'date': '1818',
... 'filename': 'austen_persuasion.txt'}
>>> austen = novel.Novel(novel_metadata)
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1900',
... 'filename': 'hawthorne_scarlet.txt'}
>>> scarlet = novel.Novel(novel_metadata)
>>> d = {}
>>> d[scarlet] = 0.5
>>> d[austen] = 0.3
>>> freq_by_date(d)
{'1770 to 1810': [], '1810 to 1819': [0.3], '1820 to 1829': [], '1830 to 1839':
[], '1840 to 1849': [], '1850 to 1859': [], '1860 to 1869': [], '1870 to 1879':
[], '1880 to 1889': [], '1890 to 1899': [], '1900 to 1922': [0.5]}
'''
date_to_1810 = []
date_1810_to_1819 = []
date_1820_to_1829 = []
date_1830_to_1839 = []
date_1840_to_1849 = []
date_1850_to_1859 = []
date_1860_to_1869 = []
date_1870_to_1879 = []
date_1880_to_1889 = []
date_1890_to_1899 = []
date_1900_on = []
data = {}
for k, v in d.items():
if k.date < 1810:
date_to_1810.append(v)
elif k.date < 1820:
date_1810_to_1819.append(v)
elif k.date < 1830:
date_1820_to_1829.append(v)
elif k.date < 1840:
date_1830_to_1839.append(v)
elif k.date < 1850:
date_1840_to_1849.append(v)
elif k.date < 1860:
date_1850_to_1859.append(v)
elif k.date < 1870:
date_1860_to_1869.append(v)
elif k.date < 1880:
date_1870_to_1879.append(v)
elif k.date < 1890:
date_1880_to_1889.append(v)
elif k.date < 1900:
date_1890_to_1899
else:
date_1900_on.append(v)
data['1770 to 1810'] = date_to_1810
data['1810 to 1819'] = date_1810_to_1819
data['1820 to 1829'] = date_1820_to_1829
data['1830 to 1839'] = date_1830_to_1839
data['1840 to 1849'] = date_1840_to_1849
data['1850 to 1859'] = date_1850_to_1859
data['1860 to 1869'] = date_1860_to_1869
data['1870 to 1879'] = date_1870_to_1879
data['1880 to 1889'] = date_1880_to_1889
data['1890 to 1899'] = date_1890_to_1899
data['1900 to 1922'] = date_1900_on
return data
def freq_by_location(d):
'''
Takes in a dictionary of novel objects mapped to relative frequencies
(output of above function)
Returns a dictionary with frequencies binned by publication location into lists
List name is mapped to the list of frequencies
list names key:
location_UK - published in the United Kingdom
location_US - published in the US
location_other - published somewhere other than the US and England
:param d: dictionary
:return: dictionary
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Austen, Jane', 'title': 'Persuasion',
... 'corpus_name': 'sample_novels', 'date': '1818',
... 'country_publication': 'United Kingdom', 'filename': 'austen_persuasion.txt'}
>>> austen = novel.Novel(novel_metadata)
>>> novel_metadata2 = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1900',
... 'country_publication': 'United States', 'filename':'hawthorne_scarlet.txt'}
>>> scarlet = novel.Novel(novel_metadata2)
>>> d = {}
>>> d[scarlet] = 0.5
>>> d[austen] = 0.3
>>> freq_by_location(d)
{'UK': [0.3], 'US': [0.5], 'Other': []}
'''
location_UK = []
location_US = []
location_other = []
for k, v in d.items():
if k.country_publication == 'United Kingdom':
location_UK.append(v)
elif k.country_publication == 'United States':
location_US.append(v)
else:
location_other.append(v)
data = {}
data['UK'] = location_UK
data['US'] = location_US
data['Other'] = location_other
return data
def get_mean(data_dict):
'''
Takes in a dictionary matching some object to lists and returns a dictionary of the
original keys mapped to the mean of the lists
:param data_dict: dictionary matching some object to lists
:return: dictionary with original key mapped to an average of the input list
>>> d = {}
>>> d['fives'] = [5,5,5]
>>> d['halfway'] = [0,1]
>>> d['nothing'] = [0]
>>> get_mean(d)
{'fives': 5.0, 'halfway': 0.5, 'nothing': 0.0}
'''
mean_dict = {}
for k, v in data_dict.items():
try:
mean_dict[k] = np.mean(v)
except:
mean_dict[k + "*"] = 0.5
return mean_dict
def sort_every_year(frequency_dict):
'''
Takes in a dictionary of novels mapped to pronoun frequencies and returns a dictionay of
years mapped to lists of pronoun frequencies
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Austen, Jane', 'title': 'Persuasion',
... 'corpus_name': 'sample_novels', 'date': '1818',
... 'filename': 'austen_persuasion.txt'}
>>> austen = novel.Novel(novel_metadata)
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1900',
... 'filename': 'hawthorne_scarlet.txt'}
>>> scarlet = novel.Novel(novel_metadata)
>>> d = {}
>>> d[scarlet] = 0.5
>>> d[austen] = 0.3
>>> sorted_years = sort_every_year(d)
>>> print(sorted_years)
{1900: [0.5], 1818: [0.3]}
:param frequency_dict: dictionary of novels mapped to pronoun frequencies
:return: dictionary of years mapped to lists of pronoun frequencies
'''
every_year_dict = {}
for key,value in frequency_dict.items():
frequency_list = [frequency_dict[key]]
if key.date not in every_year_dict.keys():
every_year_dict[key.date] = frequency_list
elif key.date in every_year_dict.keys():
every_year_dict[key.date].append(frequency_dict[key])
return every_year_dict
def box_gender_pronoun_freq(freq_dict, my_pal, title, x="N/A"):
"""
Takes in a frequency dictionaries and exports its values as a bar-and-whisker graph
:param freq_dict: dictionary of frequencies grouped up
:param my_pal: palette to be used
:param title: title of exported graph
:param x: name of x-vars
:return:
"""
plt.clf()
groups = []
val = []
for k, v in freq_dict.items():
temp = [k]*len(v)
groups.extend(temp)
val.extend(v)
df = pd.DataFrame({x: groups, 'Frequency': val})
df = df[[x, 'Frequency']]
sns.boxplot(x=df[x], y=df['Frequency'],
palette=my_pal).set_title("Relative Frequency of Female Pronouns to Total Pronouns")
plt.xticks(rotation=90)
# plt.show()
filepng = "visualizations/" + title + ".png"
filepdf = "visualizations/" + title + ".pdf"
plt.savefig(filepng, bbox_inches='tight')
plt.savefig(filepdf, bbox_inches='tight')
def bar_sub_obj_freq(she_freq_dict, he_freq_dict, title, x="N/A"):
"""
Creates a bar graph give male/female subject/object frequencies. Meant to be run with data
sorted by 'freq_by_author_gender', 'freq_by_date', or 'freq_by_location'
:param she_freq_dict:
:param he_freq_dict:
:param title: name of the exported file
:param x: value of x axis
:return:
"""
fig, ax = plt.subplots()
plt.ylim(0, 1)
key = []
for k, v in she_freq_dict.items():
key.append(k)
m_freq = dict_to_list(he_freq_dict)
f_freq = dict_to_list(she_freq_dict)
index = np.arange(len(she_freq_dict.keys()))
bar_width = 0.35
opacity = 0.4
ax.bar(index, [1]*len(m_freq), bar_width, alpha=opacity, color='c', label="Male Object")
ax.bar(index, m_freq, bar_width, alpha=opacity, color='b', label='Male Subject')
ax.bar(index + bar_width, [1]*len(f_freq), bar_width, alpha=opacity, color='#DE8F05',
label="Female Object")
ax.bar(index + bar_width, f_freq, bar_width, alpha=opacity, color='r', label='Female Subject')
ax.set_xlabel(x)
ax.set_ylabel('Frequency')
ax.set_title('Relative Frequencies of Subject to Object Pronouns')
ax.set_xticks(index + bar_width / 2)
plt.xticks(fontsize=8, rotation=90)
ax.set_xticklabels(key)
ax.legend()
fig.tight_layout()
filepng = "visualizations/" + title + ".png"
filepdf = "visualizations/" + title + ".pdf"
plt.savefig(filepng, bbox_inches='tight')
plt.savefig(filepdf, bbox_inches='tight')
def run_pronoun_freq(corpus):
"""
Runs a program that uses the instance distance analysis on all novels existing in a given
corpus, and outputs the data as graphs
:return:
"""
all_data = books_pronoun_freq(corpus)
gender = freq_by_author_gender(all_data)
box_gender_pronoun_freq(gender, my_pal={"Male Author": "b", "Female Author": "r"},
title="she_freq_by_author_gender_sample", x="Author Gender")
# date = freq_by_date(all_data)
# box_gender_pronoun_freq(date, my_pal="Greens", title="she_freq_by_date_sample", x="Years")
# location = freq_by_location(all_data)
# box_gender_pronoun_freq(location, my_pal="Blues", title="she_freq_by_location_sample",
# x="Location")
sub_v_ob = subject_vs_object_pronoun_freqs(corpus)
female_gender_sub_v_ob = get_mean(freq_by_author_gender(sub_v_ob[1]))
male_gender_sub_v_ob = get_mean(freq_by_author_gender(sub_v_ob[0]))
bar_sub_obj_freq(female_gender_sub_v_ob, male_gender_sub_v_ob, "obj_sub_by_auth_gender_sample",
"Author Gender")
'''
female_date_sub_v_ob = get_mean(freq_by_date(sub_v_ob[1]))
male_date_sub_v_ob = get_mean(freq_by_date(sub_v_ob[0]))
bar_sub_obj_freq(female_date_sub_v_ob, male_date_sub_v_ob, "obj_sub_by_year_sample",
"Years")
female_loc_sub_v_ob = get_mean(freq_by_location(sub_v_ob[1]))
male_loc_sub_v_ob = get_mean(freq_by_location(sub_v_ob[0]))
bar_sub_obj_freq(female_loc_sub_v_ob, male_loc_sub_v_ob, "obk_sub_by_location_sample",
"Location")
'''
def overall_mean(d):
'''
Returns the average of all the values in a dictionary
:param dictionary with numbers as values
:return: float: average of all the values
>>> c = Corpus('test_corpus')
>>> freq_dict = books_pronoun_freq(c)
>>> overall_mean(freq_dict)
0.4712966240691306
'''
l = dict_to_list(d)
mean = np.mean(l)
return mean
def stat_analysis(corpus_name='sample_novels'):
corpus = Corpus(corpus_name)
tot_female_dict = books_pronoun_freq(corpus)
author_to_freq_dict = freq_by_author_gender(tot_female_dict)
author_gender_pronoun_analysis = get_p_and_ttest_value(author_to_freq_dict['male_author'],author_to_freq_dict[
"female_author"])
print("values for gender pronoun stats: ", author_gender_pronoun_analysis)
sub_v_ob_tuple = subject_vs_object_pronoun_freqs(corpus)
sub_v_ob_male_dict = sub_v_ob_tuple[0]
sub_v_ob_male_list = dict_to_list(sub_v_ob_male_dict)
sub_v_ob_female_dict = sub_v_ob_tuple[1]
sub_v__ob_female_list = dict_to_list(sub_v_ob_female_dict)
author_gender_sub_v_ob_correlation = get_p_and_ttest_value(sub_v_ob_male_list, sub_v__ob_female_list)
print("values for subject vs object pronouns between male and female authors: ", author_gender_sub_v_ob_correlation)
#subject_pronouns_gender_comparison(Corpus('gutenberg'),'female')
def run_all_analyses():
'''
Runs analyses for:
Female and Male pronoun frequency for:
author gender, publication date, publication, publication location
Female and Male Subject Object frequency Comparison for:
author gender, publication date, publication, publication location
Prints results nicely
:return: None
'''
all_data = books_pronoun_freq(Corpus('gutenberg'))
gender = freq_by_author_gender(all_data)
date = freq_by_date(all_data)
location = freq_by_location(all_data)
print('Male/Female pronoun comparison: ')
print('By author gender: ')
print(get_mean(gender))
print('\n By date: ')
print(get_mean(date))
print('\n By location: ')
print(get_mean(location))
sub_v_ob = subject_vs_object_pronoun_freqs(Corpus('gutenberg'))
female_gender_sub_v_ob = freq_by_author_gender(sub_v_ob[1])
female_date_sub_v_ob = freq_by_date(sub_v_ob[1])
female_loc_sub_v_ob = freq_by_location(sub_v_ob[1])
male_gender_sub_v_ob = freq_by_author_gender(sub_v_ob[0])
male_date_sub_v_ob = freq_by_date(sub_v_ob[0])
male_loc_sub_v_ob = freq_by_location(sub_v_ob[0])
male_tot = dict_to_list(sub_v_ob[0])
female_tot = dict_to_list(sub_v_ob[1])
print('Subject/Object comparisons: ')
print('Male vs Female in the subject: ')
print('Male: ')
pprint.pprint(np.mean(male_tot))
print('Female: ')
pprint.pprint(np.mean(female_tot))
print('\n Female pronouns: ')
print('By author gender: ')
pprint.pprint(get_mean(female_gender_sub_v_ob))
print('By date: ')
pprint.pprint(get_mean(female_date_sub_v_ob))
print('By location: ')
pprint.pprint(get_mean(female_loc_sub_v_ob))
print('\n Male pronouns: ')
print('By author gender: ')
pprint.pprint(get_mean(male_gender_sub_v_ob))
print('By date:')
pprint.pprint(get_mean(male_date_sub_v_ob))
print('By location: ')
pprint.pprint(get_mean(male_loc_sub_v_ob))
sub_comp_gender = subject_pronouns_gender_comparison(Corpus('gutenberg'), 'female')
sub_comp_gender_list = dict_to_list(sub_comp_gender)
print('Overall comparative female freq:')
pprint.pprint(np.mean(sub_comp_gender_list))
print('By author gender:')
pprint.pprint(get_mean(freq_by_author_gender(sub_comp_gender)))
print('By date: ')
pprint.pprint(get_mean(freq_by_date(sub_comp_gender)))
print('By location: ')
pprint.pprint(get_mean(freq_by_location(sub_comp_gender)))
if __name__ == '__main__':
# from dh_testers.testRunner import main_test
# main_test()
# TODO: change 'sample_novels' to 'gutenberg' and graph titles from 'sample' to 'gutenberg'
# from dh_testers.testRunner import main_test
# main_test()
# print("mean relative female freq across corpus:")
# print(relative_frequency_overall(Corpus('sample_novels')))
| 37.956772
| 983
| 0.666844
|
f0cea6a86e078a5f98a3179859b0151a34cb4e6f
| 17,544
|
py
|
Python
|
tests/integration/test_server.py
|
skytotwo/asynction
|
e060d0e488b4adb67b8d0bc89b65d905dea9c841
|
[
"MIT"
] | 1
|
2022-03-29T12:37:43.000Z
|
2022-03-29T12:37:43.000Z
|
tests/integration/test_server.py
|
skytotwo/asynction
|
e060d0e488b4adb67b8d0bc89b65d905dea9c841
|
[
"MIT"
] | null | null | null |
tests/integration/test_server.py
|
skytotwo/asynction
|
e060d0e488b4adb67b8d0bc89b65d905dea9c841
|
[
"MIT"
] | null | null | null |
import base64
from enum import Enum
import pytest
import yaml
from faker import Faker
from flask import Flask
import asynction
from asynction.exceptions import BindingsValidationException
from asynction.exceptions import MessageAckValidationException
from asynction.exceptions import PayloadValidationException
from asynction.server import resolve_references
from tests.fixtures import FixturePaths
from tests.utils import AsynctionFactory
class FactoryFixture(Enum):
ASYNCTION_SOCKET_IO = "asynction_socketio_server_factory"
MOCK_ASYNCTION_SOCKET_IO = "mock_asynction_socketio_server_factory"
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_can_successfully_connect(
factory_fixture: FactoryFixture,
flask_app: Flask,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory()
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client
)
assert socketio_test_client.is_connected()
def test_client_emits_and_receives_message_successfully(
asynction_socketio_server_factory: AsynctionFactory,
flask_app: Flask,
faker: Faker,
fixture_paths: FixturePaths,
):
socketio_server = asynction_socketio_server_factory(spec_path=fixture_paths.echo)
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client
)
socketio_test_client.get_received()
message_to_echo = faker.pystr()
socketio_test_client.emit("echo", message_to_echo)
received = socketio_test_client.get_received()
assert len(received) == 1
received_args = received[0]["args"]
assert len(received_args) == 1
assert received_args[0] == message_to_echo
@pytest.mark.xfail(reason="https://github.com/dedoussis/asynction/issues/205")
def test_client_emitting_tuple_vs_array(
asynction_socketio_server_factory: AsynctionFactory,
flask_app: Flask,
fixture_paths: FixturePaths,
):
socketio_server = asynction_socketio_server_factory(
spec_path=fixture_paths.array_vs_tuple
)
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client
)
socketio_test_client.get_received()
# array validation
socketio_test_client.emit("array", [1, 2, 3])
received = socketio_test_client.get_received()
assert len(received) == 1
received_args = received[0]["args"]
assert len(received_args) == 1
assert received_args[0] == [1, 2, 3]
with pytest.raises(PayloadValidationException):
socketio_test_client.emit("array", 1, 2, 3)
# tuple validation
socketio_test_client.emit("tuple", 1, "foo")
received = socketio_test_client.get_received()
assert len(received) == 1
received_args = received[0]["args"]
print(received[0])
assert len(received_args) == 2
assert received_args[0] == 1
assert received_args[1] == "foo"
with pytest.raises(PayloadValidationException):
socketio_test_client.emit("tuple", [1, "foo"])
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_emitting_invalid_message(
factory_fixture: FactoryFixture,
flask_app: Flask,
faker: Faker,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(spec_path=fixture_paths.echo)
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client
)
socketio_test_client.get_received()
with pytest.raises(PayloadValidationException):
socketio_test_client.emit("echo", faker.pyint())
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_server_emitting_invalid_message(
factory_fixture: FactoryFixture,
fixture_paths: FixturePaths,
faker: Faker,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(spec_path=fixture_paths.echo)
with pytest.raises(PayloadValidationException):
socketio_server.emit("echo", faker.pyint())
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connecting_with_valid_bindings(
factory_fixture: FactoryFixture,
fixture_paths: FixturePaths,
flask_app: Flask,
faker: Faker,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(spec_path=fixture_paths.echo)
flask_test_client = flask_app.test_client()
restricted_namespace = "/admin"
socketio_test_client = socketio_server.test_client(
flask_app,
namespace=restricted_namespace,
query_string=f"?token={faker.pystr()}",
flask_test_client=flask_test_client,
)
socketio_test_client.get_received(restricted_namespace)
assert True
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connecting_with_invalid_bindings(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(spec_path=fixture_paths.echo)
flask_test_client = flask_app.test_client()
with pytest.raises(BindingsValidationException):
socketio_server.test_client(
flask_app,
namespace="/admin",
query_string="",
flask_test_client=flask_test_client,
)
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_can_connect_to_server_that_uses_server_name_and_render_docs(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.simple_with_servers, server_name="production"
)
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app,
flask_test_client=flask_test_client,
)
socketio_test_client.get_received()
assert True
resp = flask_test_client.get("/api/docs")
assert resp.status_code == 200
assert resp.mimetype == "text/html"
assert "AsyncApiStandalone.hydrate" in resp.data.decode()
def test_client_emits_invalid_msg_and_server_emits_back_via_validation_error_handler(
asynction_socketio_server_factory: AsynctionFactory,
flask_app: Flask,
fixture_paths: FixturePaths,
faker: Faker,
):
socketio_server = asynction_socketio_server_factory(
spec_path=fixture_paths.echo,
)
flask_test_client = flask_app.test_client()
namespace_with_error_feedback = "/echo_with_error_feedback"
socketio_test_client = socketio_server.test_client(
flask_app,
namespace=namespace_with_error_feedback,
flask_test_client=flask_test_client,
)
socketio_test_client.get_received(namespace_with_error_feedback)
message_to_echo = faker.pyint()
socketio_test_client.emit(
"echo", message_to_echo, namespace=namespace_with_error_feedback
)
received = socketio_test_client.get_received(namespace_with_error_feedback)
assert len(received) == 1
assert received[0]["name"] == "echo errors"
def test_client_emits_valid_msg_and_server_returns_invalid_ack(
asynction_socketio_server_factory: AsynctionFactory,
flask_app: Flask,
fixture_paths: FixturePaths,
faker: Faker,
):
socketio_server = asynction_socketio_server_factory(
spec_path=fixture_paths.echo,
)
flask_test_client = flask_app.test_client()
socketio_test_client = socketio_server.test_client(
flask_app,
flask_test_client=flask_test_client,
)
def cb(ack_data: bool):
assert isinstance(ack_data, bool)
with pytest.raises(MessageAckValidationException):
socketio_test_client.emit("echo with invalid ack", faker.pystr(), callback=cb)
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_docs_rendered_html_endpoint(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
_ = server_factory(
spec_path=fixture_paths.simple,
)
flask_test_client = flask_app.test_client()
resp = flask_test_client.get("/docs")
assert resp.status_code == 200
assert resp.mimetype == "text/html"
assert "AsyncApiStandalone.hydrate" in resp.data.decode()
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_docs_raw_specification_endpoint(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
_ = server_factory(
spec_path=fixture_paths.simple,
)
flask_test_client = flask_app.test_client()
resp = flask_test_client.get("/docs/asyncapi.json")
with fixture_paths.simple.open() as f:
assert resolve_references(yaml.safe_load(f.read())) == resp.json
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_fails_to_connect_with_no_auth(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.security, server_name="test"
)
flask_test_client = flask_app.test_client()
with pytest.raises(ConnectionRefusedError):
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client
)
assert socketio_test_client.is_connected() is False
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connects_with_http_basic_auth(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.security, server_name="test"
)
flask_test_client = flask_app.test_client()
basic_auth = base64.b64encode("username:password".encode()).decode()
headers = {"Authorization": f"basic {basic_auth}"}
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client, headers=headers
)
assert socketio_test_client.is_connected() is True
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connects_with_http_bearer_auth(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.security, server_name="test"
)
flask_test_client = flask_app.test_client()
basic_auth = base64.b64encode("username:password".encode()).decode()
headers = {"Authorization": f"bearer {basic_auth}"}
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client, headers=headers
)
assert socketio_test_client.is_connected() is True
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connects_with_http_api_key_auth(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.security, server_name="test"
)
flask_test_client = flask_app.test_client()
basic_auth = base64.b64encode("username:password".encode()).decode()
query = f"api_key={basic_auth}"
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client, query_string=query
)
assert socketio_test_client.is_connected() is True
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connects_with_oauth2(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.security_oauth2, server_name="test"
)
flask_test_client = flask_app.test_client()
basic_auth = base64.b64encode("username:password".encode()).decode()
headers = {"Authorization": f"bearer {basic_auth}"}
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client, headers=headers
)
assert socketio_test_client.is_connected() is True
@pytest.mark.parametrize(
argnames="factory_fixture",
argvalues=[
FactoryFixture.ASYNCTION_SOCKET_IO,
FactoryFixture.MOCK_ASYNCTION_SOCKET_IO,
],
ids=["server", "mock_server"],
)
def test_client_connects_with_namespace_security(
factory_fixture: FactoryFixture,
flask_app: Flask,
fixture_paths: FixturePaths,
request: pytest.FixtureRequest,
):
server_factory: AsynctionFactory = request.getfixturevalue(factory_fixture.value)
socketio_server = server_factory(
spec_path=fixture_paths.namespace_security, server_name="test"
)
flask_test_client = flask_app.test_client()
# connect to default namespace which is secured with basic auth
basic_auth = base64.b64encode("username:password".encode()).decode()
headers = {"Authorization": f"basic {basic_auth}"}
socketio_test_client = socketio_server.test_client(
flask_app, flask_test_client=flask_test_client, headers=headers
)
assert socketio_test_client.is_connected() is True
socketio_test_client.disconnect()
secure_namespace = "/bearer_secured"
# now try to use basic auth on the bearer_secured namespace
# this should fail because the namespace security should have
# overwritten the server security scheme
with pytest.raises(asynction.SecurityException):
socketio_server.test_client(
flask_app,
namespace=secure_namespace,
flask_test_client=flask_test_client,
headers=headers,
)
# now try to connect to the bearer_secured namespace with bearer auth
headers = {"Authorization": f"bearer {basic_auth}"}
socketio_test_client = socketio_server.test_client(
flask_app,
namespace=secure_namespace,
flask_test_client=flask_test_client,
headers=headers,
)
assert socketio_test_client.is_connected(secure_namespace) is True
| 31.272727
| 86
| 0.740196
|
a9a3e07dbda92fedbdab62b5ceff8fd46771360a
| 261
|
py
|
Python
|
bookmanager02/book02/views.py
|
lichenguang1205/django_base
|
29a3144369fe38dcc842fab85839e767cc75015a
|
[
"MIT"
] | null | null | null |
bookmanager02/book02/views.py
|
lichenguang1205/django_base
|
29a3144369fe38dcc842fab85839e767cc75015a
|
[
"MIT"
] | null | null | null |
bookmanager02/book02/views.py
|
lichenguang1205/django_base
|
29a3144369fe38dcc842fab85839e767cc75015a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.http import HttpRequest
from django.http import HttpResponse
def index(request):
context = {
'name': '大聪明'
}
return render(request, 'book02/index.html', context)
| 17.4
| 56
| 0.701149
|
5ccf93a09331a26c6827b55c3b0f144c2bc652b5
| 3,075
|
py
|
Python
|
tests/test_traverse_invoke.py
|
danlkv/traverse-invoke
|
a65236c19852229ad422c707759466750e7c6139
|
[
"MIT"
] | null | null | null |
tests/test_traverse_invoke.py
|
danlkv/traverse-invoke
|
a65236c19852229ad422c707759466750e7c6139
|
[
"MIT"
] | null | null | null |
tests/test_traverse_invoke.py
|
danlkv/traverse-invoke
|
a65236c19852229ad422c707759466750e7c6139
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Tests for `traverse_invoke` package."""
import pytest
from click.testing import CliRunner
import traverse_invoke
from traverse_invoke import cli
import pprint as pp
pprint = pp.pprint
entry = traverse_invoke.entry_traverse
traverse_invoke.core.enable_logging()
invocations = []
def _genfunc(fname):
def x(**args):
invocations.append((fname, args))
print(f">>In {fname} have {pp.pformat(args)}")
return x
def test_liblike():
_modules = {
'basemod':_genfunc('basemod'),
'installed':{
'testfoo1':_genfunc('testfoo1'),
'testfoo2':_genfunc('testfoo2'),
},
'lib':{
'sys':_genfunc('sys')
}
}
funcs = {
'log_config':_genfunc('log_config'),
'entry':{
'pre':_genfunc('pre'),
'modules':_modules,
'post':_genfunc('post')
},
'infolib':_genfunc('infolib')
}
modspec = 'log_config.entry.pre.modules.basemod.{}.post'
path1 = modspec.format('installed.testfoo1')
path2 = modspec.format('installed.testfoo2')
libspec = 'log_config.entry.pre.modules.basemod.lib.{}.post'
syspath = libspec.format('sys')
def gen_config():
config = {
'everywhere':'happiness',
'modules':{
'clash':'post-spec',
},
'testfoo1':{
'foospec':12,
'clash':'foo-spec'
# in Descent case, this will overwrite var in modules
# and we'll have it in post
},
'log_config':{
'param': 'log nicely, please',
'clash': 'log-spec'
}
}
return config
entry(gen_config(), syspath.split('.'), funcs)
fnames = [i[0] for i in invocations]
assert fnames == ['log_config', 'pre', 'basemod', 'sys', 'post']
invocations.clear()
entry(gen_config(), path1.split('.'), funcs)
param = {k:v for k,v in invocations}
assert param['testfoo1']['clash']=='foo-spec'
assert param['post']['clash']=='log-spec'
invocations.clear()
path1 += '.modules.installed.testfoo2.post.log_config.entry.pre'
entry(gen_config(), path1.split('.'), funcs)
def test_silly():
funcs = {
'print':lambda **x: print(x),
'foo':lambda **x: print('>>foo',x),
'bar':lambda **x: print('>>Bar',x)
}
path = ['print', 'foo', 'ba']
config = {
'print':{
'foo':{
'bar':{
'x':1,
'y':2
},
'default':7
}
}
}
entry(config, path, funcs)
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ['sys'])
assert result.exit_code == 0
assert 'sys' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 25.204918
| 70
| 0.533008
|
a6e31d59ac1081460d081b71f97bee5e7355f2c2
| 527
|
py
|
Python
|
Chapter04_code/ch04_r12_abstract/models/library_book.py
|
PacktPublishing/Odoo-Development-Cookbook
|
5553110c0bc352c4541f11904e236cad3c443b8b
|
[
"MIT"
] | 55
|
2016-05-23T16:05:50.000Z
|
2021-07-19T00:16:46.000Z
|
Chapter04_code/ch04_r12_abstract/models/library_book.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 1
|
2016-12-09T02:14:21.000Z
|
2018-07-02T09:02:20.000Z
|
Chapter04_code/ch04_r12_abstract/models/library_book.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 52
|
2016-06-01T20:03:59.000Z
|
2020-10-31T23:58:25.000Z
|
# -*- coding: utf-8 -*-
from openerp import models, fields
class BaseArchive(models.AbstractModel):
_name = 'base.archive'
active = fields.Boolean(default=True)
def do_archive(self):
for record in self:
record.active = not record.active
class LibraryBook(models.Model):
_name = 'library.book'
_inherit = ['base.archive']
name = fields.Char('Title', required=True)
date_release = fields.Date('Release Date')
author_ids = fields.Many2many('res.partner', string='Authors')
| 26.35
| 66
| 0.669829
|
43e44f104a5fcb22a807800cc9c481f33bff43f4
| 3,044
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_pragma727.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma727.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma727.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=21
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=20
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma727.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 34.988506
| 92
| 0.653088
|
b500d9b5caf47f652935a14109b5b1f35aef01ed
| 21,604
|
py
|
Python
|
deepchem/data/data_loader.py
|
zealseeker/deepchem
|
a44decc033c727e2da681b1461c3d57fdd53aca0
|
[
"MIT"
] | 1
|
2020-05-17T10:26:52.000Z
|
2020-05-17T10:26:52.000Z
|
deepchem/data/data_loader.py
|
xli15/deepchem
|
51f426b3b6acb876703e49d24ef4f16cb0b44b43
|
[
"MIT"
] | null | null | null |
deepchem/data/data_loader.py
|
xli15/deepchem
|
51f426b3b6acb876703e49d24ef4f16cb0b44b43
|
[
"MIT"
] | null | null | null |
"""
Process an input dataset into a format suitable for machine learning.
"""
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
import time
import sys
import logging
import warnings
from deepchem.utils.save import load_csv_files
from deepchem.utils.save import load_sdf_files
from deepchem.utils.genomics import encode_fasta_sequence
from deepchem.feat import UserDefinedFeaturizer
from deepchem.data import DiskDataset, NumpyDataset, ImageDataset
import zipfile
from PIL import Image
logger = logging.getLogger(__name__)
def _convert_df_to_numpy(df, tasks):
"""Transforms a dataframe containing deepchem input into numpy arrays
This is a private helper method intended to help parse labels and
weights arrays from a pandas dataframe. Here `df` is a dataframe
which has columns for each task in `tasks`. These labels are
extracted into a labels array `y`. Weights `w` are initialized to
all ones, but weights for any missing labels are set to 0.
Parameters
----------
df: pd.DataFrame
Pandas dataframe with columns for all tasks
tasks: list
List of tasks
"""
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
# ids = df[id_field].values
# Set missing data to have weight zero
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return y.astype(float), w.astype(float)
def _featurize_smiles_df(df, featurizer, field, log_every_n=1000):
"""Featurize individual compounds in dataframe.
Private helper that given a featurizer that operates on individual
chemical compounds or macromolecules, compute & add features for
that compound to the features dataframe
Parameters
----------
df: pd.DataFrame
DataFrame that holds SMILES strings
featurizer: Featurizer
A featurizer object
field: str
The name of a column in `df` that holds SMILES strings
log_every_n: int, optional (default 1000)
Emit a logging statement every `log_every_n` rows.
"""
sample_elems = df[field].tolist()
features = []
from rdkit import Chem
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# TODO (ytz) this is a bandage solution to reorder the atoms
# so that they're always in the same canonical order.
# Presumably this should be correctly implemented in the
# future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_n == 0:
logger.info("Featurizing sample %d" % ind)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features), axis=1), valid_inds
def _get_user_specified_features(df, featurizer):
"""Extract and merge user specified features.
Private helper methods that merges features included in dataset
provided by user into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
Parameters
----------
df: pd.DataFrame
DataFrame that holds SMILES strings
featurizer: Featurizer
A featurizer object
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df[featurizer.feature_fields].to_numpy()
time2 = time.time()
logger.info(
"TIMING: user specified processing took %0.3f s" % (time2 - time1))
return X_shard
def _featurize_mol_df(df, featurizer, field, log_every_n=1000):
"""Featurize individual compounds in dataframe.
Used when processing .sdf files, so the 3-D structure should be
preserved. We use the rdkit "mol" object created from .sdf
instead of smiles string. Some featurizers such as
CoulombMatrix also require a 3-D structure. Featurizing from
.sdf is currently the only way to perform CM feautization.
Parameters
----------
df: Pandas Dataframe
Should be created by dc.utils.save.load_sdf_files.
featurizer: dc.feat.MolecularFeaturizer
Featurizer for molecules.
log_every_n: int, optional
Controls how often logging statements are emitted.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_n == 0:
logger.info("Featurizing sample %d" % ind)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""Handles loading/featurizing of data from disk.
The main use of `DataLoader` and its child classes is to make it
easier to load large datasets into `Dataset` objects.`
`DataLoader` is an abstract superclass that provides a
general framework for loading data into DeepChem. This class should
never be instantiated directly. To load your own type of data, make
a subclass of `DataLoader` and provide your own implementation for
the `create_dataset()` method.
To construct a `Dataset` from input data, first instantiate a
concrete data loader (that is, an object which is an instance of a
subclass of `DataLoader`) with a given `Featurizer` object. Then
call the data loader's `create_dataset()` method on a list of input
files that hold the source data to process. Note that each subclass
of `DataLoader` is specialized to handle one type of input data so
you will have to pick the loader class suitable for your input data
type.
Note that it isn't necessary to use a data loader to process input
data. You can directly use `Featurizer` objects to featurize
provided input into numpy arrays, but note that this calculation
will be performed in memory, so you will have to write generators
that walk the source files and write featurized data to disk
yourself. `DataLoader` and its subclasses make this process easier
for you by performing this work under the hood.
"""
def __init__(self, tasks, id_field=None, featurizer=None, log_every_n=1000):
"""Construct a DataLoader object.
This constructor is provided as a template mainly. You
shouldn't ever call this constructor directly as a user.
Parameters
----------
tasks: list[str]
List of task names
id_field: str, optional
Name of field that holds sample identifier. Note that the
meaning of "field" depends on the input data type and can have a
different meaning in different subclasses. For example, a CSV
file could have a field as a column, and an SDF file could have
a field as molecular property.
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
if self.__class__ is DataLoader:
raise ValueError(
"DataLoader should never be instantiated directly. Use a subclass instead."
)
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self, input_files, data_dir=None, shard_size=8192):
"""Featurize provided files and write to specified location.
DEPRECATED: This method is now a wrapper for `create_dataset()`
and calls that method under the hood.
For large datasets, automatically shards into smaller chunks
for convenience. This implementation assumes that the helper
methods `_get_shards` and `_featurize_shard` are implemented and
that each shard returned by `_get_shards` is a pandas dataframe.
You may choose to reuse or override this method in your subclass
implementations.
Parameters
----------
input_files: list
List of input filenames.
data_dir: str, optional
Directory to store featurized dataset.
shard_size: int, optional
Number of examples stored in each shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`.
"""
warnings.warn(
"featurize() is deprecated and has been renamed to create_dataset(). featurize() will be removed in DeepChem 3.0",
FutureWarning)
return self.create_dataset(input_files, data_dir, shard_size)
def create_dataset(self, input_files, data_dir=None, shard_size=8192):
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `input_files` and uses `self.featurizer` to featurize the
data in these input files. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
input_files: list
List of input filenames.
data_dir: str, optional
Directory to store featurized dataset.
shard_size: int, optional
Number of examples stored in each shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %d" % shard_size)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self._get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, input_files, shard_size):
"""Stub for children classes.
Should implement a generator that walks over the source data in
`input_files` and returns a "shard" at a time. Here a shard is a
chunk of input data that can reasonably be handled in memory. For
example, this may be a set of rows from a CSV file or a set of
molecules from a SDF file. To re-use the
`DataLoader.create_dataset()` method, each shard must be a pandas
dataframe.
If you chose to override `create_dataset()` directly you don't
need to override this helper method.
Parameters
----------
input_files: list
List of input filenames.
shard_size: int, optional
Number of examples stored in each shard.
"""
raise NotImplementedError
def _featurize_shard(self, shard):
"""Featurizes a shard of input data.
Recall a shard is a chunk of input data that can reasonably be
handled in memory. For example, this may be a set of rows from a
CSV file or a set of molecules from a SDF file. Featurize this
shard in memory and return the results.
"""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Creates `Dataset` objects from input CSF files.
This class provides conveniences to load data from CSV files.
It's possible to directly featurize data from CSV files using
pandas, but this class may prove useful if you're processing
large CSV files that you don't want to manipulate directly in
memory.
"""
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
featurizer=None,
log_every_n=1000):
"""Initializes CSVLoader.
Parameters
----------
tasks: list[str]
List of task names
smiles_field: str, optional
Name of field that holds smiles string
id_field: str, optional
Name of field that holds sample identifier
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
#self.mol_field = mol_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
return _featurize_smiles_df(
shard,
self.featurizer,
field=self.smiles_field,
log_every_n=self.log_every_n)
class UserCSVLoader(CSVLoader):
"""
Handles loading of CSV files with user-defined featurizers.
"""
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
X = _get_user_specified_features(shard, self.featurizer)
return (X, np.ones(len(X), dtype=bool))
class SDFLoader(DataLoader):
"""
Creates `Dataset` from SDF input files.
This class provides conveniences to load data from SDF files.
"""
def __init__(self, tasks, sanitize=False, featurizer=None, log_every_n=1000):
"""Initialize SDF Loader
Parameters
----------
tasks: list[str]
List of tasknames. These will be loaded from the SDF file.
sanitize: bool, optional
Whether to sanitize molecules.
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
self.featurizer = featurizer
self.sanitize = sanitize
self.tasks = tasks
# The field in which dc.utils.save.load_sdf_files stores
# RDKit mol objects
self.mol_field = "mol"
# The field in which load_sdf_files return value stores
# smiles
self.id_field = "smiles"
self.log_every_n = log_every_n
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_sdf_files(input_files, self.sanitize, tasks=self.tasks)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
logger.info("Currently featurizing feature_type: %s" %
self.featurizer.__class__.__name__)
return _featurize_mol_df(
shard,
self.featurizer,
field=self.mol_field,
log_every_n=self.log_every_n)
class FASTALoader(DataLoader):
"""Handles loading of FASTA files.
FASTA files are commonly used to hold sequence data. This
class provides convenience files to lead FASTA data and
one-hot encode the genomic sequences for use in downstream
learning tasks.
"""
def __init__(self):
"""Initialize loader."""
pass
def create_dataset(self, input_files, data_dir=None, shard_size=None):
"""Creates a `Dataset` from input FASTA files.
At present, FASTA support is limited and only allows for one-hot
featurization, and doesn't allow for sharding.
Parameters
----------
input_files: list
List of fasta files.
data_dir: str, optional
Name of directory where featurized data is stored.
shard_size: int, optional
For now, this argument is ignored and each FASTA file gets its
own shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`.
"""
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for input_file in input_files:
X = encode_fasta_sequence(input_file)
ids = np.ones(len(X))
# (X, y, w, ids)
yield X, None, None, ids
return DiskDataset.create_dataset(shard_generator(), data_dir)
class ImageLoader(DataLoader):
"""Handles loading of image files.
This class allows for loading of images in various formats.
For user convenience, also accepts zip-files and directories
of images and uses some limited intelligence to attempt to
traverse subdirectories which contain images.
"""
def __init__(self, tasks=None):
"""Initialize image loader.
At present, custom image featurizers aren't supported by this
loader class.
Parameters
----------
tasks: list[str]
List of task names for image labels.
"""
if tasks is None:
tasks = []
self.tasks = tasks
def create_dataset(self,
input_files,
labels=None,
weights=None,
in_memory=False):
"""Creates and returns a `Dataset` object by featurizing provided image files and labels/weights.
Parameters
----------
input_files: list
Each file in this list should either be of a supported
image format (.png, .tif only for now) or of a compressed
folder of image files (only .zip for now).
labels: optional
If provided, a numpy ndarray of image labels
weights: optional
If provided, a numpy ndarray of image weights
in_memory: bool
If true, return in-memory NumpyDataset. Else return ImageDataset.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`, `labels`, and `weights`.
"""
if not isinstance(input_files, list):
input_files = [input_files]
image_files = []
# Sometimes zip files contain directories within. Traverse directories
while len(input_files) > 0:
remainder = []
for input_file in input_files:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
# TODO(rbharath): Add support for more extensions
if os.path.isdir(input_file):
dirfiles = [
os.path.join(input_file, subfile)
for subfile in os.listdir(input_file)
]
remainder += dirfiles
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(input_file, 'r')
zip_ref.extractall(path=zip_dir)
zip_ref.close()
zip_files = [
os.path.join(zip_dir, name) for name in zip_ref.namelist()
]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".png", ".tif"]:
image_files.append(zip_file)
elif extension in [".png", ".tif"]:
image_files.append(input_file)
else:
raise ValueError("Unsupported file format")
input_files = remainder
if in_memory:
return NumpyDataset(
self.load_img(image_files), y=labels, w=weights, ids=image_files)
else:
return ImageDataset(image_files, y=labels, w=weights, ids=image_files)
@staticmethod
def load_img(image_files):
images = []
for image_file in image_files:
_, extension = os.path.splitext(image_file)
extension = extension.lower()
if extension == ".png":
image = np.array(Image.open(image_file))
images.append(image)
elif extension == ".tif":
im = Image.open(image_file)
imarray = np.array(im)
images.append(imarray)
else:
raise ValueError("Unsupported image filetype for %s" % image_file)
return np.array(images)
| 33.65109
| 122
| 0.68853
|
9db11e80ef1f9f5d10abc289680335fb825638ce
| 8,367
|
py
|
Python
|
test/functional/feature_proxy.py
|
Flight-myPPL/Flight
|
9537d128ffcbc886a8c502131b17047067d70386
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
Flight-myPPL/Flight
|
9537d128ffcbc886a8c502131b17047067d70386
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
Flight-myPPL/Flight
|
9537d128ffcbc886a8c502131b17047067d70386
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Flightd with different proxy configuration.
Test plan:
- Start Flightd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on Flightd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create Flightds that connect to them
- Manipulate the Flightds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9456", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9456)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9456", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9456)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.420792
| 121
| 0.625672
|
6026b2c67c418ab803401e83bb12390c10e9bda5
| 442
|
py
|
Python
|
sources/stats/wavs_duration.py
|
ciro97sa/LessonAble_Speech_Dataset_Generator
|
bf85a4bcb13f854b2ed22082c61c3cbf62becf03
|
[
"MIT"
] | null | null | null |
sources/stats/wavs_duration.py
|
ciro97sa/LessonAble_Speech_Dataset_Generator
|
bf85a4bcb13f854b2ed22082c61c3cbf62becf03
|
[
"MIT"
] | null | null | null |
sources/stats/wavs_duration.py
|
ciro97sa/LessonAble_Speech_Dataset_Generator
|
bf85a4bcb13f854b2ed22082c61c3cbf62becf03
|
[
"MIT"
] | null | null | null |
import os
from pydub import AudioSegment
def wavsDuration(folderPath) -> int:
total_duration = 0
for file in os.listdir(folderPath):
if file.endswith('.wav'):
current_duration = len(AudioSegment.from_wav(folderPath + '/' + file))
total_duration += current_duration
total_duration = total_duration / 1000 # to seconds
total_duration = total_duration / 60 # to minutes
return total_duration
| 34
| 82
| 0.692308
|
0fb21940f9c4b53e03157de12bbd0b17e356b8bd
| 395
|
py
|
Python
|
Python-3.5/printing_out.py
|
GenericError/Personal-Interest-Project
|
9307c7c815fc5c59f30bacccd3d03a839757c904
|
[
"MIT"
] | null | null | null |
Python-3.5/printing_out.py
|
GenericError/Personal-Interest-Project
|
9307c7c815fc5c59f30bacccd3d03a839757c904
|
[
"MIT"
] | null | null | null |
Python-3.5/printing_out.py
|
GenericError/Personal-Interest-Project
|
9307c7c815fc5c59f30bacccd3d03a839757c904
|
[
"MIT"
] | null | null | null |
# Python 3.5 Printing to the command line
print("Lorem ipsum dolor sit amet, consectetur adipiscing elit.")
print("Donec purus ligula, gravida nec dui sit amet, pretium iaculis erat.")
print("Sed porta turpis sit amet velit ornare sollicitudin.")
print("Curabitur mollis elit orci, eu eleifend libero sodales vestibulum.")
print("Nam urna mi, consectetur eu purus et, imperdiet luctus lacus.")
| 49.375
| 76
| 0.777215
|
c9f6841022e5331a832b055cbdfc8a22076bf01e
| 3,414
|
py
|
Python
|
growlee/src/GrowleeConnection.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | 2
|
2020-09-02T18:25:39.000Z
|
2020-09-02T18:39:07.000Z
|
growlee/src/GrowleeConnection.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | null | null | null |
growlee/src/GrowleeConnection.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | 4
|
2015-02-15T16:58:18.000Z
|
2016-07-09T11:09:05.000Z
|
# -*- coding: utf-8 -*-
from Components.config import config
from Tools import Notifications
from Screens.MessageBox import MessageBox
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from . import NOTIFICATIONID
def emergencyDisable(*args, **kwargs):
if args:
try: args[0].printTraceback()
except Exception: pass
global growleeConnection
if growleeConnection:
growleeConnection.stop()
if hasattr(Notifications, 'notificationQueue'):
addedList = Notifications.notificationQueue.addedCB
else:
addedList = Notifications.notificationAdded
if gotNotification in addedList:
addedList.remove(gotNotification)
Notifications.AddPopup(
_("Network error.\nDisabling Growlee until next restart!"),
MessageBox.TYPE_ERROR,
10
)
def gotNotification():
if hasattr(Notifications, 'notificationQueue'):
notifications = Notifications.notificationQueue.queue
def handler(note):
return note.fnc, note.screen, note.args, note.kwargs, note.id
else:
notifications = Notifications.notifications
handler = lambda note: note
if notifications:
_, screen, args, kwargs, id = handler(notifications[-1])
if screen is MessageBox and id != NOTIFICATIONID:
# NOTE: priority is in [-2; 2] but type is [0; 3] so map it
# XXX: maybe priority==type-2 would be more appropriate
priority = kwargs.get("type", 0) - 1
timeout = kwargs.get("timeout", -1)
if "text" in kwargs:
description = kwargs["text"]
else:
description = args[0]
description = description
growleeConnection.sendNotification(title="Dreambox", description=description, priority=priority, timeout=timeout, id=id)
class GrowleeConnection:
connections = []
pending = 0
def sendNotification(self, title="Dreambox", description='', priority=-1, timeout=-1, id=""):
for connection, host in self.connections:
try:
level = int(host.level.value)
except ValueError:
level = -1
if connection and id not in host.blacklist.value and not priority < level:
connection.sendNotification(title=title, description=description, priority=priority, timeout=timeout)
def listen(self):
if self.connections:
return
for host in config.plugins.growlee.hosts:
if not (host.enable_outgoing.value or host.enable_incoming.value):
continue
proto = host.protocol.value
if proto == "prowl":
from Prowl import ProwlAPI
connection = ProwlAPI(host)
elif proto == "growl":
from GrowlTalk import GrowlTalkAbstraction
connection = GrowlTalkAbstraction(host)
elif proto == "gntp":
from GNTP import GNTPAbstraction
connection = GNTPAbstraction(host)
elif proto == "snarl":
from SNP import SnarlNetworkProtocolAbstraction
connection = SnarlNetworkProtocolAbstraction(host)
else: # proto == "syslog":
from Syslog import SyslogAbstraction
connection = SyslogAbstraction(host)
self.connections.append((connection, host))
def maybeClose(self, resOrFail, defer = None):
self.pending -= 1
if self.pending == 0:
if defer: defer.callback(True)
def stop(self):
defer = Deferred()
self.pending = 0
for connection, host in self.connections:
d = connection.stop()
if d is not None:
self.pending += 1
d.addBoth(self.maybeClose, defer = defer)
del self.connections[:]
if self.pending == 0:
reactor.callLater(1, defer, True)
return defer
growleeConnection = GrowleeConnection()
| 28.932203
| 123
| 0.730814
|
fec2436434386751bc549dcf8a56c1375c0868cf
| 24,736
|
py
|
Python
|
Packs/FireEyeEX/Integrations/FireEyeEX/FireEyeEX.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | null | null | null |
Packs/FireEyeEX/Integrations/FireEyeEX/FireEyeEX.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 60
|
2022-02-24T14:54:47.000Z
|
2022-03-31T10:38:41.000Z
|
Packs/FireEyeEX/Integrations/FireEyeEX/FireEyeEX.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | null | null | null |
from typing import Tuple
from CommonServerPython import *
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_NAME = 'FireEye Email Security'
INTEGRATION_COMMAND_NAME = 'fireeye-ex'
INTEGRATION_CONTEXT_NAME = 'FireEyeEX'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
class Client:
"""
The integration's client
"""
def __init__(self, base_url: str, username: str, password: str, verify: bool, proxy: bool):
self.fe_client: FireEyeClient = FireEyeClient(base_url=base_url, username=username, password=password,
verify=verify, proxy=proxy)
@logger
def run_test_module(client: Client) -> str:
"""
Test module by getting alerts from the last day.
"""
start_time = to_fe_datetime_converter('1 day')
client.fe_client.get_alerts_request({
'info_level': 'concise',
'start_time': start_time,
'duration': '24_hours',
})
return 'ok'
@logger
def get_alerts(client: Client, args: Dict[str, Any]) -> CommandResults:
def parse_request_params(args: Dict[str, Any]) -> Dict:
alert_id = args.get('alert_id', '')
start_time = args.get('start_time', '')
if start_time:
start_time = to_fe_datetime_converter(start_time)
end_time = args.get('end_time')
if end_time:
end_time = to_fe_datetime_converter(end_time)
duration = args.get('duration')
callback_domain = args.get('callback_domain', '')
dst_ip = args.get('dst_ip', '')
src_ip = args.get('src_ip', '')
file_name = args.get('file_name', '')
file_type = args.get('file_type', '')
malware_name = args.get('malware_name', '')
malware_type = args.get('malware_type', '')
recipient_email = args.get('recipient_email', '')
sender_email = args.get('sender_email', '')
url_ = args.get('url', '')
request_params = {
'info_level': args.get('info_level', 'concise')
}
if start_time:
request_params['start_time'] = start_time
if end_time:
request_params['end_time'] = end_time
if duration:
request_params['duration'] = duration
if alert_id:
request_params['alert_id'] = alert_id
if callback_domain:
request_params['callback_domain'] = callback_domain
if dst_ip:
request_params['dst_ip'] = dst_ip
if src_ip:
request_params['src_ip'] = src_ip
if file_name:
request_params['file_name'] = file_name
if file_type:
request_params['file_type'] = file_type
if malware_name:
request_params['malware_name'] = malware_name
if malware_type:
request_params['malware_type'] = malware_type
if recipient_email:
request_params['recipient_email'] = recipient_email
if sender_email:
request_params['sender_email'] = sender_email
if url_:
request_params['url'] = url_
return request_params
request_params = parse_request_params(args)
limit = int(args.get('limit', '20'))
raw_response = client.fe_client.get_alerts_request(request_params)
alerts = raw_response.get('alert')
if not alerts:
md_ = f'No alerts with the given arguments were found.\n Arguments {str(request_params)}'
else:
alerts = alerts[:limit]
headers = ['id', 'occurred', 'name', 'action', 'smtpMessage', 'src', 'dst', 'alertUrl']
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Alerts:', t=alerts, headers=headers, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=alerts,
raw_response=raw_response
)
@logger
def get_alert_details(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
alert_ids = argToList(args.get('alert_id'))
timeout = int(args.get('timeout', '30'))
command_results: List[CommandResults] = []
headers = ['id', 'occurred', 'name', 'action', 'smtpMessage', 'src', 'dst', 'alertUrl']
for alert_id in alert_ids:
raw_response = client.fe_client.get_alert_details_request(alert_id, timeout)
alert_details = raw_response.get('alert')
if not alert_details:
md_ = f'Alert {alert_id} was not found.'
else:
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Alerts:', t=alert_details, headers=headers, removeNull=True)
command_results.append(CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=alert_details,
raw_response=raw_response
))
return command_results
@logger
def get_artifacts_by_uuid(client: Client, args: Dict[str, Any]):
uuids = argToList(args.get('uuid'))
timeout = int(args.get('timeout', '120'))
for uuid in uuids:
artifact = client.fe_client.get_artifacts_by_uuid_request(uuid, timeout)
demisto.results(fileResult(f'artifacts_{uuid}.zip', data=artifact, file_type=EntryType.ENTRY_INFO_FILE))
@logger
def get_artifacts_metadata_by_uuid(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
uuids: List[str] = argToList(str(args.get('uuid')))
command_results: List[CommandResults] = []
for uuid in uuids:
raw_response = client.fe_client.get_artifacts_metadata_by_uuid_request(uuid)
outputs = raw_response
outputs['uuid'] = uuid # type: ignore
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} {uuid} Artifact metadata:',
t=raw_response.get('artifactsInfoList'), removeNull=True)
command_results.append(CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=outputs,
raw_response=raw_response
))
return command_results
@logger
def get_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
start_time = to_fe_datetime_converter(args.get('start_time', '1 day'))
end_time = to_fe_datetime_converter(args.get('end_time', 'now'))
from_ = args.get('from', '')
subject = args.get('subject', '')
appliance_id = args.get('appliance_id', '')
limit = (args.get('limit', '10000'))
raw_response = client.fe_client.get_quarantined_emails_request(start_time, end_time, from_, subject, appliance_id,
limit)
if not raw_response:
md_ = 'No emails with the given query arguments were found.'
else:
headers = ['email_uuid', 'from', 'subject', 'message_id', 'completed_at']
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Quarantined emails:', t=raw_response,
headers=headers, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.QuarantinedEmail',
outputs_key_field='email_uuid',
outputs=raw_response,
raw_response=raw_response
)
@logger
def release_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
queue_ids = argToList(args.get('queue_ids', ''))
raw_response = client.fe_client.release_quarantined_emails_request(queue_ids)
if raw_response.text: # returns 200 either way. if operation is successful than resp is empty
raise DemistoException(raw_response.json())
else:
md_ = f'{INTEGRATION_NAME} released emails successfully.'
return CommandResults(
readable_output=md_,
raw_response=raw_response
)
@logger
def delete_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
queue_ids = argToList(args.get('queue_ids', ''))
raw_response = client.fe_client.delete_quarantined_emails_request(queue_ids)
if raw_response.text: # returns 200 either way. if operation is successful than resp is empty
raise DemistoException(raw_response.json())
else:
md_ = f'{INTEGRATION_NAME} deleted emails successfully.'
return CommandResults(
readable_output=md_,
raw_response=raw_response
)
@logger
def download_quarantined_emails(client: Client, args: Dict[str, Any]):
queue_id = args.get('queue_id', '')
timeout = int(args.get('timeout', '120'))
raw_response = client.fe_client.download_quarantined_emails_request(queue_id, timeout)
demisto.results(fileResult(f'quarantined_email_{queue_id}.eml', data=raw_response, file_type=EntryType.FILE))
@logger
def get_reports(client: Client, args: Dict[str, Any]):
report_type = args.get('report_type', '')
start_time = to_fe_datetime_converter(args.get('start_time', '1 week'))
end_time = to_fe_datetime_converter(args.get('end_time', 'now'))
limit = args.get('limit', '100')
interface = args.get('interface', '')
alert_id = args.get('alert_id', '')
infection_id = args.get('infection_id', '')
infection_type = args.get('infection_type', '')
timeout = int(args.get('timeout', '120'))
if report_type == 'alertDetailsReport': # validate arguments
# can use either alert_id, or infection_type and infection_id
err_str = 'The alertDetailsReport can be retrieved using alert_id argument alone, ' \
'or by infection_type and infection_id'
if alert_id:
if infection_id or infection_type:
raise DemistoException(err_str)
else:
if not infection_id and not infection_type:
raise DemistoException(err_str)
try:
raw_response = client.fe_client.get_reports_request(report_type, start_time, end_time, limit, interface,
alert_id, infection_type, infection_id, timeout)
csv_reports = {'empsEmailAVReport', 'empsEmailHourlyStat', 'mpsCallBackServer', 'mpsInfectedHostsTrend',
'mpsWebAVReport'}
prefix = 'csv' if report_type in csv_reports else 'pdf'
demisto.results(fileResult(f'report_{report_type}_{datetime.now().timestamp()}.{prefix}', data=raw_response,
file_type=EntryType.ENTRY_INFO_FILE))
except Exception as err:
if 'WSAPI_REPORT_ALERT_NOT_FOUND' in str(err):
return CommandResults(readable_output=f'Report {report_type} was not found with the given arguments.')
else:
raise
@logger
def list_allowedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
limit = int(args.get('limit', '20'))
raw_response = client.fe_client.list_allowedlist_request(type_)
allowed_list = []
if not raw_response:
md_ = f'No allowed lists with the given type {type_} were found.'
else:
allowed_list = raw_response[:limit]
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Allowed lists. showing {limit} of {len(raw_response)}:',
t=allowed_list, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Allowedlists',
outputs_key_field='name',
outputs=allowed_list,
raw_response=raw_response
)
@logger
def create_allowedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
matches = int(args.get('matches', '0'))
# check that the entry_value does not exist
current_allowed_list = client.fe_client.list_allowedlist_request(type_)
for entry in current_allowed_list:
if entry_value == entry.get('name'):
raise DemistoException(str(f'Cannot create the entry_value {entry_value} as it is already exist in the '
f'Allowedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.create_allowedlist_request(type_, entry_value, matches)
return CommandResults(
readable_output=f'Allowedlist entry {entry_value} of type {type_} was created.'
)
@logger
def update_allowedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
matches = int(args.get('matches', '0'))
# check that the entry_value does exist
exist = False
current_allowed_list = client.fe_client.list_allowedlist_request(type_)
for entry in current_allowed_list:
if entry_value == entry.get('name'):
exist = True
if not exist:
raise DemistoException(str(f'Cannot update the entry_value {entry_value} as it does not exist in the '
f'Allowedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.update_allowedlist_request(type_, entry_value, matches)
return CommandResults(
readable_output=f'Allowedlist entry {entry_value} of type {type_} was updated.'
)
@logger
def delete_allowedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
# check that the entry_value does exist
exist = False
current_allowed_list = client.fe_client.list_allowedlist_request(type_)
for entry in current_allowed_list:
if entry_value == entry.get('name'):
exist = True
if not exist:
raise DemistoException(str(f'Cannot delete the entry_value {entry_value} as it does not exist in the '
f'Allowedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.delete_allowedlist_request(type_, entry_value)
return CommandResults(
readable_output=f'Allowedlist entry {entry_value} of type {type_} was deleted.'
)
@logger
def list_blockedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
limit = int(args.get('limit', '20'))
raw_response = client.fe_client.list_blockedlist_request(type_)
blocked_list = []
if not raw_response:
md_ = f'No blocked lists with the given type {type_} were found.'
else:
blocked_list = raw_response[:limit]
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Blocked lists. showing {limit} of {len(raw_response)}:',
t=blocked_list, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Blockedlists',
outputs_key_field='name',
outputs=blocked_list,
raw_response=raw_response
)
@logger
def create_blockedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
matches = int(args.get('matches', '0'))
# check that the entry_value does not exist
current_blocked_list = client.fe_client.list_blockedlist_request(type_)
for entry in current_blocked_list:
if entry_value == entry.get('name'):
raise DemistoException(str(f'Cannot create the entry_value {entry_value} as it is already exist in the '
f'Blockedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.create_blockedlist_request(type_, entry_value, matches)
return CommandResults(
readable_output=f'Blockedlist entry {entry_value} of type {type_} was created.'
)
@logger
def update_blockedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
matches = int(args.get('matches', '0'))
# check that the entry_value does exist
exist = False
current_allowed_list = client.fe_client.list_blockedlist_request(type_)
for entry in current_allowed_list:
if entry_value == entry.get('name'):
exist = True
if not exist:
raise DemistoException(str(f'Cannot update the entry_value {entry_value} as it does not exist in the '
f'Blockedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.update_blockedlist_request(type_, entry_value, matches)
return CommandResults(
readable_output=f'Blockedlist entry {entry_value} of type {type_} was updated.'
)
@logger
def delete_blockedlist(client: Client, args: Dict[str, Any]) -> CommandResults:
type_ = args.get('type', '')
entry_value = args.get('entry_value', '')
# check that the entry_value does exist
exist = False
current_allowed_list = client.fe_client.list_blockedlist_request(type_)
for entry in current_allowed_list:
if entry_value == entry.get('name'):
exist = True
if not exist:
raise DemistoException(str(f'Cannot delete the entry_value {entry_value} as it does not exist in the '
f'Blockedlist of type {type_}.'))
# gets 200 back without content if successful
client.fe_client.delete_blockedlist_request(type_, entry_value)
return CommandResults(
readable_output=f'Blockedlist entry {entry_value} of type {type_} was deleted.'
)
@logger
def fetch_incidents(client: Client, last_run: dict, first_fetch: str, max_fetch: int = 50,
info_level: str = 'concise') -> Tuple[dict, list]:
if not last_run: # if first time fetching
next_run = {
'time': to_fe_datetime_converter(first_fetch),
'last_alert_ids': []
}
else:
next_run = last_run
demisto.info(f'{INTEGRATION_NAME} executing fetch with: {str(next_run.get("time"))}')
raw_response = client.fe_client.get_alerts_request(request_params={
'start_time': to_fe_datetime_converter(next_run['time']), # type: ignore
'info_level': info_level,
'duration': '48_hours'
})
all_alerts = raw_response.get('alert')
if not all_alerts:
demisto.info(f'{INTEGRATION_NAME} no alerts were fetched from FireEye server at: {str(next_run)}')
# as no alerts occurred in the window of 48 hours from the given start time, update last_run window to the next
# 48 hours. If it is later than now -10 minutes take the latter (to avoid missing events).
two_days_from_last_search = (dateparser.parse(next_run['time']) + timedelta(hours=48))
now_minus_ten_minutes = dateparser.parse('10 minutes').astimezone(two_days_from_last_search.tzinfo)
next_search = min(two_days_from_last_search, now_minus_ten_minutes)
next_run = {
'time': next_search.isoformat(),
'last_alert_ids': []
}
demisto.info(f'{INTEGRATION_NAME} setting next run to: {str(next_run)}')
return next_run, []
alerts = all_alerts[:max_fetch]
last_alert_ids = last_run.get('last_alert_ids', [])
incidents = []
for alert in alerts:
alert_id = str(alert.get('id'))
if alert_id not in last_alert_ids: # check that event was not fetched in the last fetch
incident = {
'name': f'{INTEGRATION_NAME} Alert: {alert_id}',
'occurred': dateparser.parse(alert.get('occurred')).strftime(DATE_FORMAT),
'severity': alert_severity_to_dbot_score(alert.get('severity')),
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
last_alert_ids.append(alert_id)
if not incidents:
demisto.info(f'{INTEGRATION_NAME} no new alerts were collected at: {str(next_run)}.')
# As no incidents were collected, we know that all the fetched alerts for 48 hours starting in the 'start_time'
# already exists in our system, thus update last_run time to look for the next 48 hours. If it is later than
# now -10 minutes take the latter (to avoid missing events)
two_days_from_last_incident = dateparser.parse(alerts[-1].get('occurred')) + timedelta(hours=48)
now_minus_ten_minutes = dateparser.parse('10 minutes').astimezone(two_days_from_last_incident.tzinfo)
next_search = min(two_days_from_last_incident, now_minus_ten_minutes)
next_run['time'] = next_search.isoformat()
demisto.info(f'{INTEGRATION_NAME} Setting next_run to: {next_run["time"]}')
return next_run, []
# as alerts occurred till now, update last_run time accordingly to the that of latest fetched alert
next_run = {
'time': alerts[-1].get('occurred'),
'last_alert_ids': last_alert_ids # save the alert IDs from the last fetch
}
demisto.info(f'{INTEGRATION_NAME} Fetched {len(incidents)}. last fetch at: {str(next_run)}')
return next_run, incidents
def main() -> None:
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
# there is also a v1.2.0 which holds different paths and params, we support only the newest API version
base_url = urljoin(params.get('url'), '/wsapis/v2.0.0/')
verify = not argToBoolean(params.get('insecure', 'false'))
proxy = argToBoolean(params.get('proxy'))
# # fetch params
max_fetch = int(params.get('max_fetch', '50'))
first_fetch = params.get('first_fetch', '3 days').strip()
info_level = params.get('info_level', 'concise')
command = demisto.command()
args = demisto.args()
LOG(f'Command being called is {command}')
try:
client = Client(base_url=base_url, username=username, password=password, verify=verify, proxy=proxy)
commands = {
f'{INTEGRATION_COMMAND_NAME}-get-alerts': get_alerts,
f'{INTEGRATION_COMMAND_NAME}-get-alert-details': get_alert_details,
f'{INTEGRATION_COMMAND_NAME}-get-artifacts-by-uuid': get_artifacts_by_uuid,
f'{INTEGRATION_COMMAND_NAME}-get-artifacts-metadata-by-uuid': get_artifacts_metadata_by_uuid,
f'{INTEGRATION_COMMAND_NAME}-get-quarantined-emails': get_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-release-quarantined-emails': release_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-delete-quarantined-emails': delete_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-download-quarantined-emails': download_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-list-allowedlist': list_allowedlist,
f'{INTEGRATION_COMMAND_NAME}-create-allowedlist': create_allowedlist,
f'{INTEGRATION_COMMAND_NAME}-update-allowedlist': update_allowedlist,
f'{INTEGRATION_COMMAND_NAME}-delete-allowedlist': delete_allowedlist,
f'{INTEGRATION_COMMAND_NAME}-list-blockedlist': list_blockedlist,
f'{INTEGRATION_COMMAND_NAME}-create-blockedlist': create_blockedlist,
f'{INTEGRATION_COMMAND_NAME}-update-blockedlist': update_blockedlist,
f'{INTEGRATION_COMMAND_NAME}-delete-blocklist': delete_blockedlist,
}
if command == 'test-module':
return_results(run_test_module(client))
elif command == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch=first_fetch,
max_fetch=max_fetch,
info_level=info_level
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command == f'{INTEGRATION_COMMAND_NAME}-get-artifacts-by-uuid':
get_artifacts_by_uuid(client, args)
elif command == f'{INTEGRATION_COMMAND_NAME}-get-reports':
get_reports(client, args)
elif command == f'{INTEGRATION_COMMAND_NAME}-download-quarantined-emails':
download_quarantined_emails(client, args)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(str(err), err)
from FireEyeApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 40.818482
| 120
| 0.660293
|
829704e0b6a4fb4f19db28b92ac761a4a402df83
| 3,264
|
py
|
Python
|
test/utils/multi_objective/test_scalarization.py
|
SamuelMarks/botorch
|
7801e2f56dc447322b2b6c92cab683d8900e4c7f
|
[
"MIT"
] | 2
|
2021-01-11T18:16:27.000Z
|
2021-11-30T09:34:44.000Z
|
test/utils/multi_objective/test_scalarization.py
|
SamuelMarks/botorch
|
7801e2f56dc447322b2b6c92cab683d8900e4c7f
|
[
"MIT"
] | 17
|
2020-12-11T20:07:22.000Z
|
2022-03-27T16:46:42.000Z
|
test/utils/multi_objective/test_scalarization.py
|
SamuelMarks/botorch
|
7801e2f56dc447322b2b6c92cab683d8900e4c7f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.testing import BotorchTestCase
from botorch.utils.transforms import normalize
class TestGetChebyshevScalarization(BotorchTestCase):
def test_get_chebyshev_scalarization(self):
tkwargs = {"device": self.device}
Y_train = torch.rand(4, 2, **tkwargs)
Y_bounds = torch.stack(
[
Y_train.min(dim=-2, keepdim=True).values,
Y_train.max(dim=-2, keepdim=True).values,
],
dim=0,
)
for dtype in (torch.float, torch.double):
for batch_shape in (torch.Size([]), torch.Size([3])):
tkwargs["dtype"] = dtype
Y_test = torch.rand(batch_shape + torch.Size([5, 2]), **tkwargs)
Y_train = Y_train.to(**tkwargs)
Y_bounds = Y_bounds.to(**tkwargs)
normalized_Y_test = normalize(Y_test, Y_bounds)
# test wrong shape
with self.assertRaises(BotorchTensorDimensionError):
get_chebyshev_scalarization(
weights=torch.zeros(3, **tkwargs), Y=Y_train
)
weights = torch.ones(2, **tkwargs)
# test batch Y
with self.assertRaises(NotImplementedError):
get_chebyshev_scalarization(weights=weights, Y=Y_train.unsqueeze(0))
# basic test
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = normalized_Y_test.min(
dim=-1
).values + 0.05 * normalized_Y_test.sum(dim=-1)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# test different alpha
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train, alpha=1.0
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = normalized_Y_test.min(
dim=-1
).values + normalized_Y_test.sum(dim=-1)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
# Test different weights
weights = torch.tensor([0.3, 0.7], **tkwargs)
objective_transform = get_chebyshev_scalarization(
weights=weights, Y=Y_train
)
Y_transformed = objective_transform(Y_test)
expected_Y_transformed = (weights * normalized_Y_test).min(
dim=-1
).values + 0.05 * (weights * normalized_Y_test).sum(dim=-1)
self.assertTrue(torch.equal(Y_transformed, expected_Y_transformed))
| 45.971831
| 88
| 0.590686
|
636b2f32f98f6c1badb0ec0d3c4fa9d9d0f72fe2
| 1,608
|
py
|
Python
|
Moving-to-Melbourne/code.py
|
sukamal1928/Greyatom-Projects-Repository
|
1dca71032551b5f65abeb37ba76ca9c7184787f4
|
[
"MIT"
] | null | null | null |
Moving-to-Melbourne/code.py
|
sukamal1928/Greyatom-Projects-Repository
|
1dca71032551b5f65abeb37ba76ca9c7184787f4
|
[
"MIT"
] | null | null | null |
Moving-to-Melbourne/code.py
|
sukamal1928/Greyatom-Projects-Repository
|
1dca71032551b5f65abeb37ba76ca9c7184787f4
|
[
"MIT"
] | null | null | null |
# --------------
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# path- variable storing file path
#Code starts here
df = pd.read_csv(path)
y = df['Price']
X = df.drop('Price',axis=1)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=6)
corr = X_train.corr()
print(corr)
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
r2 = r2_score(y_test,y_pred)
# --------------
from sklearn.linear_model import Lasso
# Code starts here
lasso = Lasso()
lasso.fit(X_train,y_train)
lasso_pred = lasso.predict(X_test)
r2_lasso = r2_score(y_test,lasso_pred)
# --------------
from sklearn.linear_model import Ridge
# Code starts here
ridge = Ridge()
ridge.fit(X_train,y_train)
ridge_pred = ridge.predict(X_test)
r2_ridge = r2_score(y_test,ridge_pred)
# Code ends here
# --------------
from sklearn.model_selection import cross_val_score
#Code starts here
regressor = LinearRegression()
score = cross_val_score(regressor,X_train,y_train,cv=10)
mean_score = np.mean(score)
# --------------
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
#Code starts here
model = make_pipeline(PolynomialFeatures(2),LinearRegression())
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
r2_poly = r2_score(y_test,y_pred)
| 16.57732
| 86
| 0.694652
|
495049210f0c324e2bf3f150ab11bee023b1f38b
| 374
|
py
|
Python
|
scratch_10.py
|
ddsanchezc/PythonSourceCode
|
cc9c6d64bdf5ce5280fb7e0721e60642bb4d50b6
|
[
"Apache-2.0"
] | null | null | null |
scratch_10.py
|
ddsanchezc/PythonSourceCode
|
cc9c6d64bdf5ce5280fb7e0721e60642bb4d50b6
|
[
"Apache-2.0"
] | null | null | null |
scratch_10.py
|
ddsanchezc/PythonSourceCode
|
cc9c6d64bdf5ce5280fb7e0721e60642bb4d50b6
|
[
"Apache-2.0"
] | null | null | null |
numero_leido = int(input("inserta un numero >> "))
numero = int(numero_leido)
contador = 0
verificar= False
for i in range(1,numero+1):
if (numero% i)==0:
contador = contador + 1
if contador >= 3:
verificar=True
break
if contador==2 or verificar==False:
print ("el numero es primo")
else:
print ("el numero no es primo")
| 24.933333
| 51
| 0.606952
|
9ef1cee4ab22e915ed46fcaf3554566304deb053
| 1,754
|
py
|
Python
|
display4D/image_resizer_grtensor.py
|
seVenVo1d/General-Relativity-Tensorial-Calculations
|
6c07823f74840352253c235af2e4dbe60044941a
|
[
"MIT"
] | 1
|
2021-06-16T07:29:30.000Z
|
2021-06-16T07:29:30.000Z
|
display4D/image_resizer_grtensor.py
|
seVenVo1d/General-Relativity-Tensorial-Calculations
|
6c07823f74840352253c235af2e4dbe60044941a
|
[
"MIT"
] | null | null | null |
display4D/image_resizer_grtensor.py
|
seVenVo1d/General-Relativity-Tensorial-Calculations
|
6c07823f74840352253c235af2e4dbe60044941a
|
[
"MIT"
] | 1
|
2021-12-02T15:11:06.000Z
|
2021-12-02T15:11:06.000Z
|
from PIL import Image
def resize_tensor_image(tensor_object, tensor_type=''):
"""
Re-sizing the image of a tensor
Args:
tensor_object [str]: The name of the grtensor object (metric tensor, riemann tensor, etc.)
tensor_type [str]: The type of the tensor. Given in terms of 'u': contravariant
and 'd': covariant
"""
im = Image.open(r'display4D\output images\tensor.png')
if tensor_object == 'Metric Tensor' and tensor_type == 'ud':
size = (400, 400)
elif tensor_object == 'Metric Tensor':
size = (500, 500)
elif tensor_object == 'Inverse Metric Tensor':
size = (500, 500)
elif tensor_object == 'Christoffel Symbol':
size = (1200, 650)
elif tensor_object == 'Riemann Tensor':
size = (900, 450)
elif tensor_object == 'Ricci Tensor':
size = (500, 500)
elif tensor_object == 'Ricci Scalar':
size = (500, 500)
elif tensor_object == 'Traceless Ricci Tensor':
size = (500, 500)
elif tensor_object == 'Weyl Tensor':
size = (900, 450)
elif tensor_object == 'Einstein Tensor':
size = (500, 500)
elif tensor_object == 'Kretschmann Scalar':
size = (500, 500)
im.thumbnail(size, Image.ANTIALIAS)
out_dim = im.size
out_name = r'display4D\output images\tensor.png'
im.save(out_name, "PNG")
im.close()
def resize_tensor_component_image():
"""
Re-sizing the image of a tensor component
"""
im = Image.open(r'display4D\output images\tensor_component.png')
size = (200, 200)
im.thumbnail(size, Image.ANTIALIAS)
out_dim = im.size
out_name = r'display4D\output images\tensor_component.png'
im.save(out_name, "PNG")
im.close()
| 27.40625
| 98
| 0.623717
|
f93f045e79a26c2b24bdec5a6308f57b3dc69ec4
| 1,033
|
py
|
Python
|
myproject/myproject/CBIRtool/encoder/Faiss.py
|
Andyb1ance/iMED_search_engine_module
|
ef65e3ff1361e69896a0d0d3e23acab8d1066e46
|
[
"MIT"
] | null | null | null |
myproject/myproject/CBIRtool/encoder/Faiss.py
|
Andyb1ance/iMED_search_engine_module
|
ef65e3ff1361e69896a0d0d3e23acab8d1066e46
|
[
"MIT"
] | null | null | null |
myproject/myproject/CBIRtool/encoder/Faiss.py
|
Andyb1ance/iMED_search_engine_module
|
ef65e3ff1361e69896a0d0d3e23acab8d1066e46
|
[
"MIT"
] | null | null | null |
import faiss
import numpy as np
'''
the input of faiss should be numpy
'''
def transform(vectors):
#transform list of vectors to numpy
return np.array(vectors,dtype=float).astype('float32')
class Encoder:
def __init__(self,device):
self.device = device
def construct(self,vectors,indexFile):
# if self.device == 'gpu':
# res = faiss.StandardGpuResources()
# index = faiss.index_cpu_to_gpu(res, 0, index)
# assert type(vectors)==list
vectors = transform(vectors)
vectors = np.squeeze(vectors)
index=faiss.IndexFlatL2(vectors.shape[1])
index.add(vectors)
with open(indexFile,'w') as file:
file.write('')
faiss.write_index(index, indexFile)
return index
def update(self,vectors,indexFile):
pass
def search(self,vector,indexFile,k):
vector = transform(vector)
index = faiss.read_index(indexFile)
D, I = index.search(vector, k)
return D,I
| 25.195122
| 62
| 0.613746
|
aa68dddb80b6149e18fb2c116f4caf8c1884fd96
| 117
|
py
|
Python
|
salesforce_streaming/rest_api.py
|
aliaksandr-d/salesforce-streaming
|
5f5bfe731d1323d457b6324dd0ee94c87b809461
|
[
"MIT"
] | null | null | null |
salesforce_streaming/rest_api.py
|
aliaksandr-d/salesforce-streaming
|
5f5bfe731d1323d457b6324dd0ee94c87b809461
|
[
"MIT"
] | null | null | null |
salesforce_streaming/rest_api.py
|
aliaksandr-d/salesforce-streaming
|
5f5bfe731d1323d457b6324dd0ee94c87b809461
|
[
"MIT"
] | 1
|
2018-04-11T13:39:12.000Z
|
2018-04-11T13:39:12.000Z
|
__author__ = 'David C. Dean'
import requests
class RestAPI:
def __init__(self):
print('Hello World')
| 11.7
| 28
| 0.649573
|
e37468d8dd287c61ebd4499c68506c861a46c162
| 95,972
|
py
|
Python
|
test/surface_test.py
|
hljjj/pygame_documentation
|
735a0823f71b28f3379d2d01c45cadd05d245e05
|
[
"Python-2.0",
"OLDAP-2.3"
] | 1
|
2022-01-09T16:07:48.000Z
|
2022-01-09T16:07:48.000Z
|
test/surface_test.py
|
hljjj/pygame_documentation
|
735a0823f71b28f3379d2d01c45cadd05d245e05
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
test/surface_test.py
|
hljjj/pygame_documentation
|
735a0823f71b28f3379d2d01c45cadd05d245e05
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
import os
import unittest
from pygame.tests import test_utils
from pygame.tests.test_utils import example_path
try:
from pygame.tests.test_utils.arrinter import *
except (ImportError, NameError):
pass
import pygame
from pygame.locals import *
from pygame.compat import xrange_, as_bytes, as_unicode
from pygame.bufferproxy import BufferProxy
import platform
import gc
import weakref
import ctypes
IS_PYPY = 'PyPy' == platform.python_implementation()
def intify(i):
"""If i is a long, cast to an int while preserving the bits"""
if 0x80000000 & i:
return int((0xFFFFFFFF & i))
return i
def longify(i):
"""If i is an int, cast to a long while preserving the bits"""
if i < 0:
return 0xFFFFFFFF & i
return long(i)
class SurfaceTypeTest(unittest.TestCase):
def test_set_clip( self ):
""" see if surface.set_clip(None) works correctly.
"""
s = pygame.Surface((800, 600))
r = pygame.Rect(10, 10, 10, 10)
s.set_clip(r)
r.move_ip(10, 0)
s.set_clip(None)
res = s.get_clip()
# this was garbled before.
self.assertEqual(res[0], 0)
self.assertEqual(res[2], 800)
def test_print(self):
surf = pygame.Surface((70,70), 0, 32)
self.assertEqual(repr(surf), '<Surface(70x70x32 SW)>')
def test_keyword_arguments(self):
surf = pygame.Surface((70,70), flags=SRCALPHA, depth=32)
self.assertEqual(surf.get_flags() & SRCALPHA, SRCALPHA)
self.assertEqual(surf.get_bitsize(), 32)
# sanity check to make sure the check below is valid
surf_16 = pygame.Surface((70,70), 0, 16)
self.assertEqual(surf_16.get_bytesize(), 2)
# try again with an argument list
surf_16 = pygame.Surface((70,70), depth=16)
self.assertEqual(surf_16.get_bytesize(), 2)
def test_set_at(self):
#24bit surfaces
s = pygame.Surface( (100, 100), 0, 24)
s.fill((0,0,0))
# set it with a tuple.
s.set_at((0,0), (10,10,10, 255))
r = s.get_at((0,0))
self.assertIsInstance(r, pygame.Color)
self.assertEqual(r, (10,10,10, 255))
# try setting a color with a single integer.
s.fill((0,0,0,255))
s.set_at ((10, 1), 0x0000FF)
r = s.get_at((10,1))
self.assertEqual(r, (0,0,255, 255))
def test_SRCALPHA(self):
# has the flag been passed in ok?
surf = pygame.Surface((70,70), SRCALPHA, 32)
self.assertEqual(surf.get_flags() & SRCALPHA, SRCALPHA)
#24bit surfaces can not have SRCALPHA.
self.assertRaises(ValueError, pygame.Surface, (100, 100), pygame.SRCALPHA, 24)
# if we have a 32 bit surface, the SRCALPHA should have worked too.
surf2 = pygame.Surface((70,70), SRCALPHA)
if surf2.get_bitsize() == 32:
self.assertEqual(surf2.get_flags() & SRCALPHA, SRCALPHA)
def test_masks(self):
def make_surf(bpp, flags, masks):
pygame.Surface((10, 10), flags, bpp, masks)
# With some masks SDL_CreateRGBSurface does not work properly.
masks = (0xFF000000, 0xFF0000, 0xFF00, 0)
self.assertEqual(make_surf(32, 0, masks), None)
# For 24 and 32 bit surfaces Pygame assumes no losses.
masks = (0x7F0000, 0xFF00, 0xFF, 0)
self.assertRaises(ValueError, make_surf, 24, 0, masks)
self.assertRaises(ValueError, make_surf, 32, 0, masks)
# What contiguous bits in a mask.
masks = (0x6F0000, 0xFF00, 0xFF, 0)
self.assertRaises(ValueError, make_surf, 32, 0, masks)
def test_get_bounding_rect (self):
surf = pygame.Surface ((70, 70), SRCALPHA, 32)
surf.fill((0,0,0,0))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.width, 0)
self.assertEqual(bound_rect.height, 0)
surf.set_at((30,30),(255,255,255,1))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.left, 30)
self.assertEqual(bound_rect.top, 30)
self.assertEqual(bound_rect.width, 1)
self.assertEqual(bound_rect.height, 1)
surf.set_at((29,29),(255,255,255,1))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.left, 29)
self.assertEqual(bound_rect.top, 29)
self.assertEqual(bound_rect.width, 2)
self.assertEqual(bound_rect.height, 2)
surf = pygame.Surface ((70, 70), 0, 24)
surf.fill((0,0,0))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.width, surf.get_width())
self.assertEqual(bound_rect.height, surf.get_height())
surf.set_colorkey((0,0,0))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.width, 0)
self.assertEqual(bound_rect.height, 0)
surf.set_at((30,30),(255,255,255))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.left, 30)
self.assertEqual(bound_rect.top, 30)
self.assertEqual(bound_rect.width, 1)
self.assertEqual(bound_rect.height, 1)
surf.set_at((60,60),(255,255,255))
bound_rect = surf.get_bounding_rect()
self.assertEqual(bound_rect.left, 30)
self.assertEqual(bound_rect.top, 30)
self.assertEqual(bound_rect.width, 31)
self.assertEqual(bound_rect.height, 31)
# Issue #180
pygame.display.init()
try:
surf = pygame.Surface((4, 1), 0, 8)
surf.fill((255, 255, 255))
surf.get_bounding_rect() # Segfault.
finally:
pygame.quit()
def test_copy(self):
"""Ensure a surface can be copied."""
color = (25, 25, 25, 25)
s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32)
s1.fill(color)
s2 = s1.copy()
s1rect = s1.get_rect()
s2rect = s2.get_rect()
self.assertEqual(s1rect.size, s2rect.size)
self.assertEqual(s2.get_at((10,10)), color)
def test_fill(self):
"""Ensure a surface can be filled."""
color = (25, 25, 25, 25)
fill_rect = pygame.Rect(0, 0, 16, 16)
s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32)
s1.fill(color, fill_rect)
for pt in test_utils.rect_area_pts(fill_rect):
self.assertEqual(s1.get_at(pt), color)
for pt in test_utils.rect_outer_bounds(fill_rect):
self.assertNotEqual(s1.get_at(pt), color)
def test_fill_negative_coordinates(self):
# negative coordinates should be clipped by fill, and not draw outside the surface.
color = (25, 25, 25, 25)
color2 = (20, 20, 20, 25)
fill_rect = pygame.Rect(-10, -10, 16, 16)
s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32)
r1 = s1.fill(color, fill_rect)
c = s1.get_at((0,0))
self.assertEqual(c, color)
# make subsurface in the middle to test it doesn't over write.
s2 = s1.subsurface((5, 5, 5, 5))
r2 = s2.fill(color2, (-3, -3, 5, 5))
c2 = s1.get_at((4,4))
self.assertEqual(c, color)
# rect returns the area we actually fill.
r3 = s2.fill(color2, (-30, -30, 5, 5))
# since we are using negative coords, it should be an zero sized rect.
self.assertEqual(tuple(r3), (0, 0, 0, 0))
def test_fill_keyword_args(self):
"""Ensure fill() accepts keyword arguments."""
color = (1, 2, 3, 255)
area = (1, 1, 2, 2)
s1 = pygame.Surface((4, 4), 0, 32)
s1.fill(special_flags=pygame.BLEND_ADD, color=color, rect=area)
self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255))
self.assertEqual(s1.get_at((1, 1)), color)
########################################################################
def test_get_alpha(self):
"""Ensure a surface's alpha value can be retrieved."""
s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32)
self.assertEqual(s1.get_alpha(), 255)
for alpha in (0, 32, 127, 255):
s1.set_alpha(alpha)
for t in range(4):
s1.set_alpha(s1.get_alpha())
self.assertEqual(s1.get_alpha(), alpha)
########################################################################
def test_get_bytesize(self):
"""Ensure a surface's bit and byte sizes can be retrieved."""
depth = 32
depth_bytes = 4
s1 = pygame.Surface((32, 32), pygame.SRCALPHA, depth)
self.assertEqual(s1.get_bytesize(), depth_bytes)
self.assertEqual(s1.get_bitsize(), depth)
########################################################################
def test_get_flags(self):
"""Ensure a surface's flags can be retrieved."""
s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32)
self.assertEqual(s1.get_flags(), pygame.SRCALPHA)
########################################################################
def test_get_parent(self):
"""Ensure a surface's parent can be retrieved."""
parent = pygame.Surface((16, 16))
child = parent.subsurface((0,0,5,5))
self.assertIs(child.get_parent(), parent)
########################################################################
def test_get_rect(self):
"""Ensure a surface's rect can be retrieved."""
size = (16, 16)
surf = pygame.Surface(size)
rect = surf.get_rect()
self.assertEqual(rect.size, size)
########################################################################
def test_get_width__size_and_height(self):
"""Ensure a surface's size, width and height can be retrieved."""
for w in xrange_(0, 255, 32):
for h in xrange_(0, 127, 15):
s = pygame.Surface((w, h))
self.assertEqual(s.get_width(), w)
self.assertEqual(s.get_height(), h)
self.assertEqual(s.get_size(), (w, h))
def test_get_view(self):
"""Ensure a buffer view of the surface's pixels can be retrieved."""
# Check that BufferProxys are returned when array depth is supported,
# ValueErrors returned otherwise.
Error = ValueError
s = pygame.Surface((5, 7), 0, 8)
v2 = s.get_view('2')
self.assertRaises(Error, s.get_view, '0')
self.assertRaises(Error, s.get_view, '1')
self.assertIsInstance(v2, BufferProxy)
self.assertRaises(Error, s.get_view, '3')
s = pygame.Surface((8, 7), 0, 8)
length = s.get_bytesize() * s.get_width() * s.get_height()
v0 = s.get_view('0')
v1 = s.get_view('1')
self.assertIsInstance(v0, BufferProxy)
self.assertEqual(v0.length, length)
self.assertIsInstance(v1, BufferProxy)
self.assertEqual(v1.length, length)
s = pygame.Surface((5, 7), 0, 16)
v2 = s.get_view('2')
self.assertRaises(Error, s.get_view, '0')
self.assertRaises(Error, s.get_view, '1')
self.assertIsInstance(v2, BufferProxy)
self.assertRaises(Error, s.get_view, '3')
s = pygame.Surface((8, 7), 0, 16)
length = s.get_bytesize() * s.get_width() * s.get_height()
v0 = s.get_view('0')
v1 = s.get_view('1')
self.assertIsInstance(v0, BufferProxy)
self.assertEqual(v0.length, length)
self.assertIsInstance(v1, BufferProxy)
self.assertEqual(v1.length, length)
s = pygame.Surface((5, 7), pygame.SRCALPHA, 16)
v2 = s.get_view('2')
self.assertIsInstance(v2, BufferProxy)
self.assertRaises(Error, s.get_view, '3')
s = pygame.Surface((5, 7), 0, 24)
v2 = s.get_view('2')
v3 = s.get_view('3')
self.assertRaises(Error, s.get_view, '0')
self.assertRaises(Error, s.get_view, '1')
self.assertIsInstance(v2, BufferProxy)
self.assertIsInstance(v3, BufferProxy)
s = pygame.Surface((8, 7), 0, 24)
length = s.get_bytesize() * s.get_width() * s.get_height()
v0 = s.get_view('0')
v1 = s.get_view('1')
self.assertIsInstance(v0, BufferProxy)
self.assertEqual(v0.length, length)
self.assertIsInstance(v1, BufferProxy)
self.assertEqual(v1.length, length)
s = pygame.Surface((5, 7), 0, 32)
length = s.get_bytesize() * s.get_width() * s.get_height()
v0 = s.get_view('0')
v1 = s.get_view('1')
v2 = s.get_view('2')
v3 = s.get_view('3')
self.assertIsInstance(v0, BufferProxy)
self.assertEqual(v0.length, length)
self.assertIsInstance(v1, BufferProxy)
self.assertEqual(v1.length, length)
self.assertIsInstance(v2, BufferProxy)
self.assertIsInstance(v3, BufferProxy)
s2 = s.subsurface((0, 0, 4, 7))
self.assertRaises(Error, s2.get_view, '0')
self.assertRaises(Error, s2.get_view, '1')
s2 = None
s = pygame.Surface((5, 7), pygame.SRCALPHA, 32)
for kind in ('2', '3', 'a', 'A', 'r', 'R', 'g', 'G', 'b', 'B'):
self.assertIsInstance(s.get_view(kind), BufferProxy)
# Check default argument value: '2'
s = pygame.Surface((2, 4), 0, 32)
v = s.get_view()
if not IS_PYPY:
ai = ArrayInterface(v)
self.assertEqual(ai.nd, 2)
# Check locking.
s = pygame.Surface((2, 4), 0, 32)
self.assertFalse(s.get_locked())
v = s.get_view('2')
self.assertFalse(s.get_locked())
c = v.__array_interface__
self.assertTrue(s.get_locked())
c = None
gc.collect()
self.assertTrue(s.get_locked())
v = None
gc.collect()
self.assertFalse(s.get_locked())
# Check invalid view kind values.
s = pygame.Surface((2, 4), pygame.SRCALPHA, 32)
self.assertRaises(TypeError, s.get_view, '')
self.assertRaises(TypeError, s.get_view, '9')
self.assertRaises(TypeError, s.get_view, 'RGBA')
self.assertRaises(TypeError, s.get_view, 2)
# Both unicode and bytes strings are allowed for kind.
s = pygame.Surface((2, 4), 0, 32)
s.get_view(as_unicode('2'))
s.get_view(as_bytes('2'))
# Garbage collection
s = pygame.Surface((2, 4), 0, 32)
weak_s = weakref.ref(s)
v = s.get_view('3')
weak_v = weakref.ref(v)
gc.collect()
self.assertTrue(weak_s() is s)
self.assertTrue(weak_v() is v)
del v
gc.collect()
self.assertTrue(weak_s() is s)
self.assertTrue(weak_v() is None)
del s
gc.collect()
self.assertTrue(weak_s() is None)
def test_get_buffer(self):
# Check that get_buffer works for all pixel sizes and for a subsurface.
# Check for all pixel sizes
for bitsize in [8, 16, 24, 32]:
s = pygame.Surface((5, 7), 0, bitsize)
length = s.get_pitch() * s.get_height()
v = s.get_buffer()
self.assertIsInstance(v, BufferProxy)
self.assertEqual(v.length, length)
self.assertEqual(repr(v), "<BufferProxy(" + str(length) + ")>")
# Check for a subsurface (not contiguous)
s = pygame.Surface((7, 10), 0, 32)
s2 = s.subsurface((1, 2, 5, 7))
length = s2.get_pitch() * s2.get_height()
v = s2.get_buffer()
self.assertIsInstance(v, BufferProxy)
self.assertEqual(v.length, length)
# Check locking.
s = pygame.Surface((2, 4), 0, 32)
v = s.get_buffer()
self.assertTrue(s.get_locked())
v = None
gc.collect()
self.assertFalse(s.get_locked())
OLDBUF = hasattr(pygame.bufferproxy, 'get_segcount')
@unittest.skipIf(not OLDBUF, 'old buffer not available')
def test_get_buffer_oldbuf(self):
from pygame.bufferproxy import get_segcount, get_write_buffer
s = pygame.Surface((2, 4), pygame.SRCALPHA, 32)
v = s.get_buffer()
segcount, buflen = get_segcount(v)
self.assertEqual(segcount, 1)
self.assertEqual(buflen, s.get_pitch() * s.get_height())
seglen, segaddr = get_write_buffer(v, 0)
self.assertEqual(segaddr, s._pixels_address)
self.assertEqual(seglen, buflen)
@unittest.skipIf(not OLDBUF, 'old buffer not available')
def test_get_view_oldbuf(self):
from pygame.bufferproxy import get_segcount, get_write_buffer
s = pygame.Surface((2, 4), pygame.SRCALPHA, 32)
v = s.get_view('1')
segcount, buflen = get_segcount(v)
self.assertEqual(segcount, 8)
self.assertEqual(buflen, s.get_pitch() * s.get_height())
seglen, segaddr = get_write_buffer(v, 7)
self.assertEqual(segaddr, s._pixels_address + s.get_bytesize() * 7)
self.assertEqual(seglen, s.get_bytesize())
def test_set_colorkey(self):
# __doc__ (as of 2008-06-25) for pygame.surface.Surface.set_colorkey:
# Surface.set_colorkey(Color, flags=0): return None
# Surface.set_colorkey(None): return None
# Set the transparent colorkey
s = pygame.Surface((16,16), pygame.SRCALPHA, 32)
colorkeys = ((20,189,20, 255),(128,50,50,255), (23, 21, 255,255))
for colorkey in colorkeys:
s.set_colorkey(colorkey)
for t in range(4):
s.set_colorkey(s.get_colorkey())
self.assertEqual(s.get_colorkey(), colorkey)
def test_set_masks(self):
s = pygame.Surface((32,32))
r,g,b,a = s.get_masks()
s.set_masks((b,g,r,a))
r2,g2,b2,a2 = s.get_masks()
self.assertEqual((r,g,b,a), (b2,g2,r2,a2))
def test_set_shifts(self):
s = pygame.Surface((32,32))
r,g,b,a = s.get_shifts()
s.set_shifts((b,g,r,a))
r2,g2,b2,a2 = s.get_shifts()
self.assertEqual((r,g,b,a), (b2,g2,r2,a2))
def test_blit_keyword_args(self):
color = (1, 2, 3, 255)
s1 = pygame.Surface((4, 4), 0, 32)
s2 = pygame.Surface((2, 2), 0, 32)
s2.fill((1, 2, 3))
s1.blit(special_flags=BLEND_ADD, source=s2,
dest=(1, 1), area=s2.get_rect())
self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255))
self.assertEqual(s1.get_at((1, 1)), color)
def todo_test_blit(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.blit:
# Surface.blit(source, dest, area=None, special_flags = 0): return Rect
# draw one image onto another
#
# Draws a source Surface onto this Surface. The draw can be positioned
# with the dest argument. Dest can either be pair of coordinates
# representing the upper left corner of the source. A Rect can also be
# passed as the destination and the topleft corner of the rectangle
# will be used as the position for the blit. The size of the
# destination rectangle does not effect the blit.
#
# An optional area rectangle can be passed as well. This represents a
# smaller portion of the source Surface to draw.
#
# An optional special flags is for passing in new in 1.8.0: BLEND_ADD,
# BLEND_SUB, BLEND_MULT, BLEND_MIN, BLEND_MAX new in 1.8.1:
# BLEND_RGBA_ADD, BLEND_RGBA_SUB, BLEND_RGBA_MULT, BLEND_RGBA_MIN,
# BLEND_RGBA_MAX BLEND_RGB_ADD, BLEND_RGB_SUB, BLEND_RGB_MULT,
# BLEND_RGB_MIN, BLEND_RGB_MAX With other special blitting flags
# perhaps added in the future.
#
# The return rectangle is the area of the affected pixels, excluding
# any pixels outside the destination Surface, or outside the clipping
# area.
#
# Pixel alphas will be ignored when blitting to an 8 bit Surface.
# special_flags new in pygame 1.8.
self.fail()
def test_blit__SRCALPHA_opaque_source(self):
src = pygame.Surface( (256,256), SRCALPHA ,32)
dst = src.copy()
for i, j in test_utils.rect_area_pts(src.get_rect()):
dst.set_at( (i,j), (i,0,0,j) )
src.set_at( (i,j), (0,i,0,255) )
dst.blit(src, (0,0))
for pt in test_utils.rect_area_pts(src.get_rect()):
self.assertEqual(dst.get_at(pt)[1], src.get_at(pt)[1])
def todo_test_blit__blit_to_self(self): #TODO
src = pygame.Surface( (256,256), SRCALPHA, 32)
rect = src.get_rect()
for pt, color in test_utils.gradient(rect.width, rect.height):
src.set_at(pt, color)
src.blit(src, (0, 0))
def todo_test_blit__SRCALPHA_to_SRCALPHA_non_zero(self): #TODO
# " There is no unit test for blitting a SRCALPHA source with non-zero
# alpha to a SRCALPHA destination with non-zero alpha " LL
w,h = size = 32,32
s = pygame.Surface(size, pygame.SRCALPHA, 32)
s2 = s.copy()
s.fill((32,32,32,111))
s2.fill((32,32,32,31))
s.blit(s2, (0,0))
# TODO:
# what is the correct behaviour ?? should it blend? what algorithm?
self.assertEqual(s.get_at((0,0)), (32,32,32,31))
def test_blit__SRCALPHA32_to_8(self):
# Bug: fatal
# SDL_DisplayConvert segfaults when video is uninitialized.
target = pygame.Surface((11, 8), 0, 8)
color = target.get_palette_at(2)
source = pygame.Surface((1, 1), pygame.SRCALPHA, 32)
source.set_at((0, 0), color)
target.blit(source, (0, 0))
def test_image_convert_bug_131(self):
# Bitbucket bug #131: Unable to Surface.convert(32) some 1-bit images.
# https://bitbucket.org/pygame/pygame/issue/131/unable-to-surfaceconvert-32-some-1-bit
# Skip test_image_convert_bug_131 for headless tests.
if os.environ.get('SDL_VIDEODRIVER') == 'dummy':
return
pygame.display.init()
pygame.display.set_mode((640,480))
im = pygame.image.load(example_path(os.path.join("data", "city.png")))
im2 = pygame.image.load(example_path(os.path.join("data", "brick.png")))
self.assertEqual(im.get_palette(),
((0, 0, 0, 255), (255, 255, 255, 255)))
self.assertEqual(im2.get_palette(), ((0, 0, 0, 255), (0, 0, 0, 255)))
self.assertEqual(repr(im.convert(32)), '<Surface(24x24x32 SW)>')
self.assertEqual(repr(im2.convert(32)), '<Surface(469x137x32 SW)>')
# Ensure a palette format to palette format works.
im3 = im.convert(8)
self.assertEqual(repr(im3), '<Surface(24x24x8 SW)>')
self.assertEqual(im3.get_palette(), im.get_palette())
# It is still an error when the target format really does have
# an empty palette (all the entries are black).
self.assertRaises(pygame.error, im2.convert, 8)
self.assertEqual(pygame.get_error(), "Empty destination palette")
def test_convert_init(self):
""" Ensure initialization exceptions are raised
for surf.convert()."""
surf = pygame.Surface((1, 1))
self.assertRaisesRegexp(pygame.error,
'display initialized', surf.convert)
pygame.display.init()
try:
if os.environ.get('SDL_VIDEODRIVER') != 'dummy':
try:
surf.convert(32)
surf.convert(pygame.Surface((1, 1)))
except pygame.error:
self.fail("convert() should not raise an exception here.")
self.assertRaisesRegexp(pygame.error, 'No video mode', surf.convert)
pygame.display.set_mode((640,480))
try:
surf.convert()
except pygame.error:
self.fail("convert() should not raise an exception here.")
finally:
pygame.display.quit()
def test_convert_alpha_init(self):
""" Ensure initialization exceptions are raised
for surf.convert_alpha()."""
surf = pygame.Surface((1, 1))
self.assertRaisesRegexp(pygame.error,
'display initialized', surf.convert_alpha)
pygame.display.init()
try:
self.assertRaisesRegexp(pygame.error, 'No video mode', surf.convert_alpha)
pygame.display.set_mode((640,480))
try:
surf.convert_alpha()
except pygame.error:
self.fail("convert_alpha() should not raise an exception here.")
finally:
pygame.display.quit()
def todo_test_convert(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.convert:
# Surface.convert(Surface): return Surface
# Surface.convert(depth, flags=0): return Surface
# Surface.convert(masks, flags=0): return Surface
# Surface.convert(): return Surface
# change the pixel format of an image
#
# Creates a new copy of the Surface with the pixel format changed. The
# new pixel format can be determined from another existing Surface.
# Otherwise depth, flags, and masks arguments can be used, similar to
# the pygame.Surface() call.
#
# If no arguments are passed the new Surface will have the same pixel
# format as the display Surface. This is always the fastest format for
# blitting. It is a good idea to convert all Surfaces before they are
# blitted many times.
#
# The converted Surface will have no pixel alphas. They will be
# stripped if the original had them. See Surface.convert_alpha() for
# preserving or creating per-pixel alphas.
#
self.fail()
def todo_test_convert_alpha(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.convert_alpha:
# Surface.convert_alpha(Surface): return Surface
# Surface.convert_alpha(): return Surface
# change the pixel format of an image including per pixel alphas
#
# Creates a new copy of the surface with the desired pixel format. The
# new surface will be in a format suited for quick blitting to the
# given format with per pixel alpha. If no surface is given, the new
# surface will be optimized for blitting to the current display.
#
# Unlike the Surface.convert() method, the pixel format for the new
# image will not be exactly the same as the requested source, but it
# will be optimized for fast alpha blitting to the destination.
#
self.fail()
def todo_test_get_abs_offset(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_abs_offset:
# Surface.get_abs_offset(): return (x, y)
# find the absolute position of a child subsurface inside its top level parent
#
# Get the offset position of a child subsurface inside of its top
# level parent Surface. If the Surface is not a subsurface this will
# return (0, 0).
#
self.fail()
def todo_test_get_abs_parent(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_abs_parent:
# Surface.get_abs_parent(): return Surface
# find the top level parent of a subsurface
#
# Returns the parent Surface of a subsurface. If this is not a
# subsurface then this surface will be returned.
#
self.fail()
def test_get_at(self):
surf = pygame.Surface((2, 2), 0, 24)
c00 = pygame.Color(1, 2, 3)
c01 = pygame.Color(5, 10, 15)
c10 = pygame.Color(100, 50, 0)
c11 = pygame.Color(4, 5, 6)
surf.set_at((0, 0), c00)
surf.set_at((0, 1), c01)
surf.set_at((1, 0), c10)
surf.set_at((1, 1), c11)
c = surf.get_at((0, 0))
self.assertIsInstance(c, pygame.Color)
self.assertEqual(c, c00)
self.assertEqual(surf.get_at((0, 1)), c01)
self.assertEqual(surf.get_at((1, 0)), c10)
self.assertEqual(surf.get_at((1, 1)), c11)
for p in [(-1, 0), (0, -1), (2, 0), (0, 2)]:
self.assertRaises(IndexError, surf.get_at, p)
def test_get_at_mapped(self):
color = pygame.Color(10, 20, 30)
for bitsize in [8, 16, 24, 32]:
surf = pygame.Surface((2, 2), 0, bitsize)
surf.fill(color)
pixel = surf.get_at_mapped((0, 0))
self.assertEqual(pixel, surf.map_rgb(color),
"%i != %i, bitsize: %i" %
(pixel, surf.map_rgb(color), bitsize))
def todo_test_get_bitsize(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_bitsize:
# Surface.get_bitsize(): return int
# get the bit depth of the Surface pixel format
#
# Returns the number of bits used to represent each pixel. This value
# may not exactly fill the number of bytes used per pixel. For example
# a 15 bit Surface still requires a full 2 bytes.
#
self.fail()
def todo_test_get_clip(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_clip:
# Surface.get_clip(): return Rect
# get the current clipping area of the Surface
#
# Return a rectangle of the current clipping area. The Surface will
# always return a valid rectangle that will never be outside the
# bounds of the image. If the Surface has had None set for the
# clipping area, the Surface will return a rectangle with the full
# area of the Surface.
#
self.fail()
def todo_test_get_colorkey(self):
surf = pygame.surface((2, 2), 0, 24)
self.assertIsNone(surf.get_colorykey())
colorkey = pygame.Color(20, 40, 60)
surf.set_colorkey(colorkey)
ck = surf.get_colorkey()
self.assertIsInstance(ck, pygame.Color)
self.assertEqual(ck, colorkey)
def todo_test_get_height(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_height:
# Surface.get_height(): return height
# get the height of the Surface
#
# Return the height of the Surface in pixels.
self.fail()
def todo_test_get_locked(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_locked:
# Surface.get_locked(): return bool
# test if the Surface is current locked
#
# Returns True when the Surface is locked. It doesn't matter how many
# times the Surface is locked.
#
self.fail()
def todo_test_get_locks(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_locks:
# Surface.get_locks(): return tuple
# Gets the locks for the Surface
#
# Returns the currently existing locks for the Surface.
self.fail()
def todo_test_get_losses(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_losses:
# Surface.get_losses(): return (R, G, B, A)
# the significant bits used to convert between a color and a mapped integer
#
# Return the least significant number of bits stripped from each color
# in a mapped integer.
#
# This value is not needed for normal Pygame usage.
self.fail()
def todo_test_get_masks(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_masks:
# Surface.get_masks(): return (R, G, B, A)
# the bitmasks needed to convert between a color and a mapped integer
#
# Returns the bitmasks used to isolate each color in a mapped integer.
# This value is not needed for normal Pygame usage.
self.fail()
def todo_test_get_offset(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_offset:
# Surface.get_offset(): return (x, y)
# find the position of a child subsurface inside a parent
#
# Get the offset position of a child subsurface inside of a parent. If
# the Surface is not a subsurface this will return (0, 0).
#
self.fail()
def test_get_palette(self):
pygame.init()
try:
palette = [Color(i, i, i) for i in range(256)]
pygame.display.set_mode((100, 50))
surf = pygame.Surface((2, 2), 0, 8)
surf.set_palette(palette)
palette2 = surf.get_palette()
r,g,b = palette2[0]
self.assertEqual(len(palette2), len(palette))
for c2, c in zip(palette2, palette):
self.assertEqual(c2, c)
for c in palette2:
self.assertIsInstance(c, pygame.Color)
finally:
pygame.quit()
def test_get_palette_at(self):
# See also test_get_palette
pygame.init()
try:
pygame.display.set_mode((100, 50))
surf = pygame.Surface((2, 2), 0, 8)
color = pygame.Color(1, 2, 3, 255)
surf.set_palette_at(0, color)
color2 = surf.get_palette_at(0)
self.assertIsInstance(color2, pygame.Color)
self.assertEqual(color2, color)
self.assertRaises(IndexError, surf.get_palette_at, -1)
self.assertRaises(IndexError, surf.get_palette_at, 256)
finally:
pygame.quit()
def todo_test_get_pitch(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_pitch:
# Surface.get_pitch(): return int
# get the number of bytes used per Surface row
#
# Return the number of bytes separating each row in the Surface.
# Surfaces in video memory are not always linearly packed. Subsurfaces
# will also have a larger pitch than their real width.
#
# This value is not needed for normal Pygame usage.
self.fail()
def todo_test_get_shifts(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_shifts:
# Surface.get_shifts(): return (R, G, B, A)
# the bit shifts needed to convert between a color and a mapped integer
#
# Returns the pixel shifts need to convert between each color and a
# mapped integer.
#
# This value is not needed for normal Pygame usage.
self.fail()
def todo_test_get_size(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_size:
# Surface.get_size(): return (width, height)
# get the dimensions of the Surface
#
# Return the width and height of the Surface in pixels.
self.fail()
def todo_test_lock(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.lock:
# Surface.lock(): return None
# lock the Surface memory for pixel access
#
# Lock the pixel data of a Surface for access. On accelerated
# Surfaces, the pixel data may be stored in volatile video memory or
# nonlinear compressed forms. When a Surface is locked the pixel
# memory becomes available to access by regular software. Code that
# reads or writes pixel values will need the Surface to be locked.
#
# Surfaces should not remain locked for more than necessary. A locked
# Surface can often not be displayed or managed by Pygame.
#
# Not all Surfaces require locking. The Surface.mustlock() method can
# determine if it is actually required. There is no performance
# penalty for locking and unlocking a Surface that does not need it.
#
# All pygame functions will automatically lock and unlock the Surface
# data as needed. If a section of code is going to make calls that
# will repeatedly lock and unlock the Surface many times, it can be
# helpful to wrap the block inside a lock and unlock pair.
#
# It is safe to nest locking and unlocking calls. The surface will
# only be unlocked after the final lock is released.
#
self.fail()
def test_map_rgb(self):
color = Color(0, 128, 255, 64)
surf = pygame.Surface((5, 5), SRCALPHA, 32)
c = surf.map_rgb(color)
self.assertEqual(surf.unmap_rgb(c), color)
self.assertEqual(surf.get_at((0, 0)), (0, 0, 0, 0))
surf.fill(c)
self.assertEqual(surf.get_at((0, 0)), color)
surf.fill((0, 0, 0, 0))
self.assertEqual(surf.get_at((0, 0)), (0, 0, 0, 0))
surf.set_at((0, 0), c)
self.assertEqual(surf.get_at((0, 0)), color)
def todo_test_mustlock(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.mustlock:
# Surface.mustlock(): return bool
# test if the Surface requires locking
#
# Returns True if the Surface is required to be locked to access pixel
# data. Usually pure software Surfaces do not require locking. This
# method is rarely needed, since it is safe and quickest to just lock
# all Surfaces as needed.
#
# All pygame functions will automatically lock and unlock the Surface
# data as needed. If a section of code is going to make calls that
# will repeatedly lock and unlock the Surface many times, it can be
# helpful to wrap the block inside a lock and unlock pair.
#
self.fail()
def test_set_alpha_none(self):
"""surf.set_alpha(None) disables blending"""
s = pygame.Surface((1,1), SRCALPHA, 32)
s.fill((0, 255, 0, 128))
s.set_alpha(None)
self.assertEqual(None, s.get_alpha())
s2 = pygame.Surface((1,1), SRCALPHA, 32)
s2.fill((255, 0, 0, 255))
s2.blit(s, (0, 0))
self.assertEqual(s2.get_at((0, 0))[0], 0, "the red component should be 0")
def test_set_alpha_value(self):
"""surf.set_alpha(x), where x != None, enables blending"""
s = pygame.Surface((1,1), SRCALPHA, 32)
s.fill((0, 255, 0, 128))
s.set_alpha(255)
s2 = pygame.Surface((1,1), SRCALPHA, 32)
s2.fill((255, 0, 0, 255))
s2.blit(s, (0, 0))
self.assertGreater(s2.get_at((0, 0))[0], 0, "the red component should be above 0")
def test_palette_colorkey(self):
""" test bug discovered by robertpfeiffer
https://github.com/pygame/pygame/issues/721
"""
surf = pygame.image.load(example_path(os.path.join("data", "alien2.png")))
key = surf.get_colorkey()
self.assertEqual(surf.get_palette()[surf.map_rgb(key)], key)
def test_palette_colorkey_set_px(self):
surf = pygame.image.load(example_path(os.path.join("data", "alien2.png")))
key = surf.get_colorkey()
surf.set_at((0, 0), key)
self.assertEqual(surf.get_at((0, 0)), key)
def test_palette_colorkey_fill(self):
surf = pygame.image.load(example_path(os.path.join("data", "alien2.png")))
key = surf.get_colorkey()
surf.fill(key)
self.assertEqual(surf.get_at((0, 0)), key)
def test_set_palette(self):
palette = [pygame.Color(i, i, i) for i in range(256)]
palette[10] = tuple(palette[10]) # 4 element tuple
palette[11] = tuple(palette[11])[0:3] # 3 element tuple
surf = pygame.Surface((2, 2), 0, 8)
pygame.init()
try:
pygame.display.set_mode((100, 50))
surf.set_palette(palette)
for i in range(256):
self.assertEqual(surf.map_rgb(palette[i]), i,
"palette color %i" % (i,))
c = palette[i]
surf.fill(c)
self.assertEqual(surf.get_at((0, 0)), c,
"palette color %i" % (i,))
for i in range(10):
palette[i] = pygame.Color(255 - i, 0, 0)
surf.set_palette(palette[0:10])
for i in range(256):
self.assertEqual(surf.map_rgb(palette[i]), i,
"palette color %i" % (i,))
c = palette[i]
surf.fill(c)
self.assertEqual(surf.get_at((0, 0)), c,
"palette color %i" % (i,))
self.assertRaises(ValueError, surf.set_palette,
[Color(1, 2, 3, 254)])
self.assertRaises(ValueError, surf.set_palette,
(1, 2, 3, 254))
finally:
pygame.quit()
def test_set_palette_at(self):
pygame.init()
try:
pygame.display.set_mode((100, 50))
surf = pygame.Surface((2, 2), 0, 8)
original = surf.get_palette_at(10)
replacement = Color(1, 1, 1, 255)
if replacement == original:
replacement = Color(2, 2, 2, 255)
surf.set_palette_at(10, replacement)
self.assertEqual(surf.get_palette_at(10), replacement)
next = tuple(original)
surf.set_palette_at(10, next)
self.assertEqual(surf.get_palette_at(10), next)
next = tuple(original)[0:3]
surf.set_palette_at(10, next)
self.assertEqual(surf.get_palette_at(10), next)
self.assertRaises(IndexError,
surf.set_palette_at,
256, replacement)
self.assertRaises(IndexError,
surf.set_palette_at,
-1, replacement)
finally:
pygame.quit()
def test_subsurface(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.subsurface:
# Surface.subsurface(Rect): return Surface
# create a new surface that references its parent
#
# Returns a new Surface that shares its pixels with its new parent.
# The new Surface is considered a child of the original. Modifications
# to either Surface pixels will effect each other. Surface information
# like clipping area and color keys are unique to each Surface.
#
# The new Surface will inherit the palette, color key, and alpha
# settings from its parent.
#
# It is possible to have any number of subsurfaces and subsubsurfaces
# on the parent. It is also possible to subsurface the display Surface
# if the display mode is not hardware accelerated.
#
# See the Surface.get_offset(), Surface.get_parent() to learn more
# about the state of a subsurface.
#
surf = pygame.Surface((16, 16))
s = surf.subsurface(0,0,1,1)
s = surf.subsurface((0,0,1,1))
#s = surf.subsurface((0,0,1,1), 1)
# This form is not acceptable.
#s = surf.subsurface(0,0,10,10, 1)
self.assertRaises(ValueError, surf.subsurface, (0,0,1,1,666))
self.assertEqual(s.get_shifts(), surf.get_shifts())
self.assertEqual(s.get_masks(), surf.get_masks())
self.assertEqual(s.get_losses(), surf.get_losses())
# Issue 2 at Bitbucket.org/pygame/pygame
surf = pygame.Surface.__new__(pygame.Surface)
self.assertRaises(pygame.error, surf.subsurface, (0, 0, 0, 0))
def todo_test_unlock(self):
# __doc__ (as of 2008-08-02) for pygame.surface.Surface.unlock:
# Surface.unlock(): return None
# unlock the Surface memory from pixel access
#
# Unlock the Surface pixel data after it has been locked. The unlocked
# Surface can once again be drawn and managed by Pygame. See the
# Surface.lock() documentation for more details.
#
# All pygame functions will automatically lock and unlock the Surface
# data as needed. If a section of code is going to make calls that
# will repeatedly lock and unlock the Surface many times, it can be
# helpful to wrap the block inside a lock and unlock pair.
#
# It is safe to nest locking and unlocking calls. The surface will
# only be unlocked after the final lock is released.
#
self.fail()
def test_unmap_rgb(self):
# Special case, 8 bit-per-pixel surface (has a palette).
surf = pygame.Surface((2, 2), 0, 8)
c = (1, 1, 1) # Unlikely to be in a default palette.
i = 67
pygame.init()
try:
pygame.display.set_mode((100, 50))
surf.set_palette_at(i, c)
unmapped_c = surf.unmap_rgb(i)
self.assertEqual(unmapped_c, c)
# Confirm it is a Color instance
self.assertIsInstance(unmapped_c, pygame.Color)
finally:
pygame.quit()
# Remaining, non-pallete, cases.
c = (128, 64, 12, 255)
formats = [(0, 16), (0, 24), (0, 32),
(SRCALPHA, 16), (SRCALPHA, 32)]
for flags, bitsize in formats:
surf = pygame.Surface((2, 2), flags, bitsize)
unmapped_c = surf.unmap_rgb(surf.map_rgb(c))
surf.fill(c)
comparison_c = surf.get_at((0, 0))
self.assertEqual(unmapped_c, comparison_c,
"%s != %s, flags: %i, bitsize: %i" %
(unmapped_c, comparison_c, flags, bitsize))
# Confirm it is a Color instance
self.assertIsInstance(unmapped_c, pygame.Color)
def test_scroll(self):
scrolls = [(8, 2, 3),
(16, 2, 3),
(24, 2, 3),
(32, 2, 3),
(32, -1, -3),
(32, 0, 0),
(32, 11, 0),
(32, 0, 11),
(32, -11, 0),
(32, 0, -11),
(32, -11, 2),
(32, 2, -11)]
for bitsize, dx, dy in scrolls:
surf = pygame.Surface((10, 10), 0, bitsize)
surf.fill((255, 0, 0))
surf.fill((0, 255, 0), (2, 2, 2, 2,))
comp = surf.copy()
comp.blit(surf, (dx, dy))
surf.scroll(dx, dy)
w, h = surf.get_size()
for x in range(w):
for y in range(h):
self.assertEqual(surf.get_at((x, y)),
comp.get_at((x, y)),
"%s != %s, bpp:, %i, x: %i, y: %i" %
(surf.get_at((x, y)),
comp.get_at((x, y)),
bitsize, dx, dy))
# Confirm clip rect containment
surf = pygame.Surface((20, 13), 0, 32)
surf.fill((255, 0, 0))
surf.fill((0, 255, 0), (7, 1, 6, 6))
comp = surf.copy()
clip = Rect(3, 1, 8, 14)
surf.set_clip(clip)
comp.set_clip(clip)
comp.blit(surf, (clip.x + 2, clip.y + 3), surf.get_clip())
surf.scroll(2, 3)
w, h = surf.get_size()
for x in range(w):
for y in range(h):
self.assertEqual(surf.get_at((x, y)),
comp.get_at((x, y)))
# Confirm keyword arguments and per-pixel alpha
spot_color = (0, 255, 0, 128)
surf = pygame.Surface((4, 4), pygame.SRCALPHA, 32)
surf.fill((255, 0, 0, 255))
surf.set_at((1, 1), spot_color)
surf.scroll(dx=1)
self.assertEqual(surf.get_at((2, 1)), spot_color)
surf.scroll(dy=1)
self.assertEqual(surf.get_at((2, 2)), spot_color)
surf.scroll(dy=1, dx=1)
self.assertEqual(surf.get_at((3, 3)), spot_color)
surf.scroll(dx=-3, dy=-3)
self.assertEqual(surf.get_at((0, 0)), spot_color)
class SurfaceSubtypeTest(unittest.TestCase):
"""Issue #280: Methods that return a new Surface preserve subclasses"""
def setUp(self):
pygame.display.init()
def tearDown(self):
pygame.display.quit()
class MySurface(pygame.Surface):
def __init__(self, *args, **kwds):
super(SurfaceSubtypeTest.MySurface, self).__init__(*args, **kwds)
self.an_attribute = True
def test_copy(self):
"""Ensure method copy() preserves the surface's class
When Surface is subclassed, the inherited copy() method will return
instances of the subclass. Non Surface fields are uncopied, however.
This includes instance attributes.
"""
ms1 = self.MySurface((32, 32), pygame.SRCALPHA, 32)
ms2 = ms1.copy()
self.assertTrue(isinstance(ms2, self.MySurface))
self.assertTrue(ms1.an_attribute)
self.assertRaises(AttributeError, getattr, ms2, "an_attribute")
def test_convert(self):
"""Ensure method convert() preserves the surface's class
When Surface is subclassed, the inherited convert() method will return
instances of the subclass. Non Surface fields are omitted, however.
This includes instance attributes.
"""
ms1 = self.MySurface((32, 32), 0, 24)
ms2 = ms1.convert(24)
self.assertTrue(ms2 is not ms1)
self.assertTrue(isinstance(ms2, self.MySurface))
self.assertTrue(ms1.an_attribute)
self.assertRaises(AttributeError, getattr, ms2, "an_attribute")
def test_convert_alpha(self):
"""Ensure method convert_alpha() preserves the surface's class
When Surface is subclassed, the inherited convert_alpha() method will
return instances of the subclass. Non Surface fields are omitted,
however. This includes instance attributes.
"""
pygame.display.set_mode((40, 40))
s = pygame.Surface((32, 32), pygame.SRCALPHA, 16)
ms1 = self.MySurface((32, 32), pygame.SRCALPHA, 32)
ms2 = ms1.convert_alpha(s)
self.assertTrue(ms2 is not ms1)
self.assertTrue(isinstance(ms2, self.MySurface))
self.assertTrue(ms1.an_attribute)
self.assertRaises(AttributeError, getattr, ms2, "an_attribute")
def test_subsurface(self):
"""Ensure method subsurface() preserves the surface's class
When Surface is subclassed, the inherited subsurface() method will
return instances of the subclass. Non Surface fields are uncopied,
however. This includes instance attributes.
"""
ms1 = self.MySurface((32, 32), pygame.SRCALPHA, 32)
ms2 = ms1.subsurface((4, 5, 10, 12))
self.assertTrue(isinstance(ms2, self.MySurface))
self.assertTrue(ms1.an_attribute)
self.assertRaises(AttributeError, getattr, ms2, "an_attribute")
class SurfaceGetBufferTest(unittest.TestCase):
# These tests requires ctypes. They are disabled if ctypes
# is not installed.
#
try:
ArrayInterface
except NameError:
__tags__ = ('ignore', 'subprocess_ignore')
lilendian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
def _check_interface_2D(self, s):
s_w, s_h = s.get_size()
s_bytesize = s.get_bytesize();
s_pitch = s.get_pitch()
s_pixels = s._pixels_address
# check the array interface structure fields.
v = s.get_view('2')
if not IS_PYPY:
flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE
if (s.get_pitch() == s_w * s_bytesize):
flags |= PAI_FORTRAN
inter = ArrayInterface(v)
self.assertEqual(inter.two, 2)
self.assertEqual(inter.nd, 2)
self.assertEqual(inter.typekind, 'u')
self.assertEqual(inter.itemsize, s_bytesize)
self.assertEqual(inter.shape[0], s_w)
self.assertEqual(inter.shape[1], s_h)
self.assertEqual(inter.strides[0], s_bytesize)
self.assertEqual(inter.strides[1], s_pitch)
self.assertEqual(inter.flags, flags)
self.assertEqual(inter.data, s_pixels);
def _check_interface_3D(self, s):
s_w, s_h = s.get_size()
s_bytesize = s.get_bytesize();
s_pitch = s.get_pitch()
s_pixels = s._pixels_address
s_shifts = list(s.get_shifts())
# Check for RGB or BGR surface.
if s_shifts[0:3] == [0, 8, 16]:
if self.lilendian:
# RGB
offset = 0
step = 1
else:
# BGR
offset = s_bytesize - 1
step = -1
elif s_shifts[0:3] == [8, 16, 24]:
if self.lilendian:
# xRGB
offset = 1
step = 1
else:
# BGRx
offset = s_bytesize - 2
step = -1
elif s_shifts[0:3] == [16, 8, 0]:
if self.lilendian:
# BGR
offset = 2
step = -1
else:
# RGB
offset = s_bytesize - 3
step = 1
elif s_shifts[0:3] == [24, 16, 8]:
if self.lilendian:
# BGRx
offset = 2
step = -1
else:
# RGBx
offset = s_bytesize - 4
step = -1
else:
return
# check the array interface structure fields.
v = s.get_view('3')
if not IS_PYPY:
inter = ArrayInterface(v)
flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE
self.assertEqual(inter.two, 2)
self.assertEqual(inter.nd, 3)
self.assertEqual(inter.typekind, 'u')
self.assertEqual(inter.itemsize, 1)
self.assertEqual(inter.shape[0], s_w)
self.assertEqual(inter.shape[1], s_h)
self.assertEqual(inter.shape[2], 3)
self.assertEqual(inter.strides[0], s_bytesize)
self.assertEqual(inter.strides[1], s_pitch)
self.assertEqual(inter.strides[2], step)
self.assertEqual(inter.flags, flags)
self.assertEqual(inter.data, s_pixels + offset);
def _check_interface_rgba(self, s, plane):
s_w, s_h = s.get_size()
s_bytesize = s.get_bytesize();
s_pitch = s.get_pitch()
s_pixels = s._pixels_address
s_shifts = s.get_shifts()
s_masks = s.get_masks()
# Find the color plane position within the pixel.
if not s_masks[plane]:
return
alpha_shift = s_shifts[plane]
offset = alpha_shift // 8
if not self.lilendian:
offset = s_bytesize - offset - 1
# check the array interface structure fields.
v = s.get_view('rgba'[plane])
if not IS_PYPY:
inter = ArrayInterface(v)
flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE
self.assertEqual(inter.two, 2)
self.assertEqual(inter.nd, 2)
self.assertEqual(inter.typekind, 'u')
self.assertEqual(inter.itemsize, 1)
self.assertEqual(inter.shape[0], s_w)
self.assertEqual(inter.shape[1], s_h)
self.assertEqual(inter.strides[0], s_bytesize)
self.assertEqual(inter.strides[1], s_pitch)
self.assertEqual(inter.flags, flags)
self.assertEqual(inter.data, s_pixels + offset);
def test_array_interface(self):
self._check_interface_2D(pygame.Surface((5, 7), 0, 8))
self._check_interface_2D(pygame.Surface((5, 7), 0, 16))
self._check_interface_2D(pygame.Surface((5, 7), pygame.SRCALPHA, 16))
self._check_interface_3D(pygame.Surface((5, 7), 0, 24))
self._check_interface_3D(pygame.Surface((8, 4), 0, 24)) # No gaps
self._check_interface_2D(pygame.Surface((5, 7), 0, 32))
self._check_interface_3D(pygame.Surface((5, 7), 0, 32))
self._check_interface_2D(pygame.Surface((5, 7), pygame.SRCALPHA, 32))
self._check_interface_3D(pygame.Surface((5, 7), pygame.SRCALPHA, 32))
def test_array_interface_masks(self):
"""Test non-default color byte orders on 3D views"""
sz = (5, 7)
# Reversed RGB byte order
s = pygame.Surface(sz, 0, 32)
s_masks = list(s.get_masks())
masks = [0xff, 0xff00, 0xff0000]
if s_masks[0:3] == masks or s_masks[0:3] == masks[::-1]:
masks = s_masks[2::-1] + s_masks[3:4]
self._check_interface_3D(pygame.Surface(sz, 0, 32, masks))
s = pygame.Surface(sz, 0, 24)
s_masks = list(s.get_masks())
masks = [0xff, 0xff00, 0xff0000]
if s_masks[0:3] == masks or s_masks[0:3] == masks[::-1]:
masks = s_masks[2::-1] + s_masks[3:4]
self._check_interface_3D(pygame.Surface(sz, 0, 24, masks))
masks = [0xff00, 0xff0000, 0xff000000, 0]
self._check_interface_3D(pygame.Surface(sz, 0, 32, masks))
# Unsupported RGB byte orders
masks = [0xff00, 0xff, 0xff0000, 0]
self.assertRaises(ValueError,
pygame.Surface(sz, 0, 24, masks).get_view, '3')
def test_array_interface_alpha(self):
for shifts in [[0, 8, 16, 24], [8, 16, 24, 0],
[24, 16, 8, 0], [16, 8, 0, 24]]:
masks = [0xff << s for s in shifts]
s = pygame.Surface((4, 2), pygame.SRCALPHA, 32, masks)
self._check_interface_rgba(s, 3)
def test_array_interface_rgb(self):
for shifts in [[0, 8, 16, 24], [8, 16, 24, 0],
[24, 16, 8, 0], [16, 8, 0, 24]]:
masks = [0xff << s for s in shifts]
masks[3] = 0
for plane in range(3):
s = pygame.Surface((4, 2), 0, 24)
self._check_interface_rgba(s, plane)
s = pygame.Surface((4, 2), 0, 32)
self._check_interface_rgba(s, plane)
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_bytes(self):
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((10, 6), 0, 32)
a = s.get_buffer()
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertFalse(b.readonly)
b = Importer(a, buftools.PyBUF_FORMAT)
self.assertEqual(b.ndim, 0)
self.assertEqual(b.format, 'B')
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, 1)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertEqual(b.shape, (a.length,))
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 1)
self.assertTrue(b.format is None)
self.assertEqual(b.strides, (1,))
s2 = s.subsurface((1, 1, 7, 4)) # Not contiguous
a = s2.get_buffer()
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s2._pixels_address)
b = Importer(a, buftools.PyBUF_C_CONTIGUOUS)
self.assertEqual(b.ndim, 1)
self.assertEqual(b.strides, (1,))
b = Importer(a, buftools.PyBUF_F_CONTIGUOUS)
self.assertEqual(b.ndim, 1)
self.assertEqual(b.strides, (1,))
b = Importer(a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertEqual(b.ndim, 1)
self.assertEqual(b.strides, (1,))
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_0D(self):
# This is the same handler as used by get_buffer(), so just
# confirm that it succeeds for one case.
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((10, 6), 0, 32)
a = s.get_view('0')
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_1D(self):
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((10, 6), 0, 32)
a = s.get_view('1')
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, s.get_bytesize())
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertFalse(b.readonly)
b = Importer(a, buftools.PyBUF_FORMAT)
self.assertEqual(b.ndim, 0)
self.assertEqual(b.format, '=I')
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, 1)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, s.get_bytesize())
self.assertEqual(b.shape, (s.get_width() * s.get_height(),))
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 1)
self.assertTrue(b.format is None)
self.assertEqual(b.strides, (s.get_bytesize(),))
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_2D(self):
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((10, 6), 0, 32)
a = s.get_view('2')
# Non dimensional requests, no PyDEF_ND, are handled by the
# 1D surface buffer code, so only need to confirm a success.
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, s.get_bytesize())
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
# Uniquely 2D
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, s.get_bytesize())
self.assertEqual(b.shape, s.get_size())
self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch()))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address)
b = Importer(a, buftools.PyBUF_RECORDS_RO)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, '=I')
self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch()))
b = Importer(a, buftools.PyBUF_RECORDS)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, '=I')
self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch()))
b = Importer(a, buftools.PyBUF_F_CONTIGUOUS)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, None)
self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch()))
b = Importer(a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, None)
self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch()))
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_C_CONTIGUOUS)
s2 = s.subsurface((1, 1, 7, 4)) # Not contiguous
a = s2.get_view('2')
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, s2.get_bytesize())
self.assertEqual(b.shape, s2.get_size())
self.assertEqual(b.strides, (s2.get_bytesize(), s.get_pitch()))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s2._pixels_address)
b = Importer(a, buftools.PyBUF_RECORDS)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, '=I')
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_ANY_CONTIGUOUS)
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_3D(self):
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((12, 6), 0, 24)
rmask, gmask, bmask, amask = s.get_masks()
if self.lilendian:
if rmask == 0x0000ff:
color_step = 1
addr_offset = 0
else:
color_step = -1
addr_offset = 2
else:
if (rmask == 0xff0000):
color_step = 1
addr_offset = 0
else:
color_step = -1
addr_offset = 2
a = s.get_view('3')
b = Importer(a, buftools.PyBUF_STRIDES)
w, h = s.get_size()
shape = w, h, 3
strides = 3, s.get_pitch(), color_step
self.assertEqual(b.ndim, 3)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertEqual(b.shape, shape)
self.assertEqual(b.strides, strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address + addr_offset)
b = Importer(a, buftools.PyBUF_RECORDS_RO)
self.assertEqual(b.ndim, 3)
self.assertEqual(b.format, 'B')
self.assertEqual(b.strides, strides)
b = Importer(a, buftools.PyBUF_RECORDS)
self.assertEqual(b.ndim, 3)
self.assertEqual(b.format, 'B')
self.assertEqual(b.strides, strides)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_ANY_CONTIGUOUS)
@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented')
def test_newbuf_PyBUF_flags_rgba(self):
# All color plane views are handled by the same routine,
# so only one plane need be checked.
from pygame.tests.test_utils import buftools
Importer = buftools.Importer
s = pygame.Surface((12, 6), 0, 24)
rmask, gmask, bmask, amask = s.get_masks()
if self.lilendian:
if rmask == 0x0000ff:
addr_offset = 0
else:
addr_offset = 2
else:
if rmask == 0xff0000:
addr_offset = 0
else:
addr_offset = 2
a = s.get_view('R')
b = Importer(a, buftools.PyBUF_STRIDES)
w, h = s.get_size()
shape = w, h
strides = s.get_bytesize(), s.get_pitch()
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 1)
self.assertEqual(b.shape, shape)
self.assertEqual(b.strides, strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, s._pixels_address + addr_offset)
b = Importer(a, buftools.PyBUF_RECORDS_RO)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, 'B')
self.assertEqual(b.strides, strides)
b = Importer(a, buftools.PyBUF_RECORDS)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, 'B')
self.assertEqual(b.strides, strides)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a,
buftools.PyBUF_ANY_CONTIGUOUS)
class SurfaceBlendTest(unittest.TestCase):
def setUp(self):
# Needed for 8 bits-per-pixel color palette surface tests.
pygame.init()
def tearDown(self):
pygame.quit()
_test_palette = [(0, 0, 0, 255),
(10, 30, 60, 0),
(25, 75, 100, 128),
(200, 150, 100, 200),
(0, 100, 200, 255)]
surf_size = (10, 12)
_test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2),
((5, 5), 2), ((0, 11), 3), ((4, 6), 3),
((9, 11), 4), ((5, 6), 4)]
def _make_surface(self, bitsize, srcalpha=False, palette=None):
if palette is None:
palette = self._test_palette
flags = 0
if srcalpha:
flags |= SRCALPHA
surf = pygame.Surface(self.surf_size, flags, bitsize)
if bitsize == 8:
surf.set_palette([c[:3] for c in palette])
return surf
def _fill_surface(self, surf, palette=None):
if palette is None:
palette = self._test_palette
surf.fill(palette[1], (0, 0, 5, 6))
surf.fill(palette[2], (5, 0, 5, 6))
surf.fill(palette[3], (0, 6, 5, 6))
surf.fill(palette[4], (5, 6, 5, 6))
def _make_src_surface(self, bitsize, srcalpha=False, palette=None):
surf = self._make_surface(bitsize, srcalpha, palette)
self._fill_surface(surf, palette)
return surf
def _assert_surface(self, surf, palette=None, msg=""):
if palette is None:
palette = self._test_palette
if surf.get_bitsize() == 16:
palette = [surf.unmap_rgb(surf.map_rgb(c)) for c in palette]
for posn, i in self._test_points:
self.assertEqual(surf.get_at(posn), palette[i],
"%s != %s: flags: %i, bpp: %i, posn: %s%s" %
(surf.get_at(posn),
palette[i], surf.get_flags(),
surf.get_bitsize(), posn, msg))
def test_blit_blend(self):
sources = [self._make_src_surface(8),
self._make_src_surface(16),
self._make_src_surface(16, srcalpha=True),
self._make_src_surface(24),
self._make_src_surface(32),
self._make_src_surface(32, srcalpha=True)]
destinations = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True)]
blend = [('BLEND_ADD', (0, 25, 100, 255),
lambda a, b: min(a + b, 255)),
('BLEND_SUB', (100, 25, 0, 100),
lambda a, b: max(a - b, 0)),
('BLEND_MULT', (100, 200, 0, 0),
lambda a, b: (a * b) // 256),
('BLEND_MIN', (255, 0, 0, 255), min),
('BLEND_MAX', (0, 255, 0, 255), max)]
for src in sources:
src_palette = [src.unmap_rgb(src.map_rgb(c))
for c in self._test_palette]
for dst in destinations:
for blend_name, dst_color, op in blend:
dc = dst.unmap_rgb(dst.map_rgb(dst_color))
p = []
for sc in src_palette:
c = [op(dc[i], sc[i]) for i in range(3)]
if dst.get_masks()[3]:
c.append(dc[3])
else:
c.append(255)
c = dst.unmap_rgb(dst.map_rgb(c))
p.append(c)
dst.fill(dst_color)
dst.blit(src,
(0, 0),
special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p,
(", op: %s, src bpp: %i"
", src flags: %i" %
(blend_name,
src.get_bitsize(),
src.get_flags())))
src = self._make_src_surface(32)
masks = src.get_masks()
dst = pygame.Surface(src.get_size(), 0, 32,
[masks[1], masks[2], masks[0], masks[3]])
for blend_name, dst_color, op in blend:
p = []
for src_color in self._test_palette:
c = [op(dst_color[i], src_color[i]) for i in range(3)]
c.append(255)
p.append(tuple(c))
dst.fill(dst_color)
dst.blit(src,
(0, 0),
special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p, ", %s" % blend_name)
# Blend blits are special cased for 32 to 32 bit surfaces.
#
# Confirm that it works when the rgb bytes are not the
# least significant bytes.
pat = self._make_src_surface(32)
masks = pat.get_masks()
if min(masks) == intify(0xFF000000):
masks = [longify(m) >> 8 for m in masks]
else:
masks = [intify(m << 8) for m in masks]
src = pygame.Surface(pat.get_size(), 0, 32, masks)
self._fill_surface(src)
dst = pygame.Surface(src.get_size(), 0, 32, masks)
for blend_name, dst_color, op in blend:
p = []
for src_color in self._test_palette:
c = [op(dst_color[i], src_color[i]) for i in range(3)]
c.append(255)
p.append(tuple(c))
dst.fill(dst_color)
dst.blit(src,
(0, 0),
special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p, ", %s" % blend_name)
def test_blit_blend_rgba(self):
sources = [self._make_src_surface(8),
self._make_src_surface(16),
self._make_src_surface(16, srcalpha=True),
self._make_src_surface(24),
self._make_src_surface(32),
self._make_src_surface(32, srcalpha=True)]
destinations = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True)]
blend = [('BLEND_RGBA_ADD', (0, 25, 100, 255),
lambda a, b: min(a + b, 255)),
('BLEND_RGBA_SUB', (0, 25, 100, 255),
lambda a, b: max(a - b, 0)),
('BLEND_RGBA_MULT', (0, 7, 100, 255),
lambda a, b: (a * b) // 256),
('BLEND_RGBA_MIN', (0, 255, 0, 255), min),
('BLEND_RGBA_MAX', (0, 255, 0, 255), max)]
for src in sources:
src_palette = [src.unmap_rgb(src.map_rgb(c))
for c in self._test_palette]
for dst in destinations:
for blend_name, dst_color, op in blend:
dc = dst.unmap_rgb(dst.map_rgb(dst_color))
p = []
for sc in src_palette:
c = [op(dc[i], sc[i]) for i in range(4)]
if not dst.get_masks()[3]:
c[3] = 255
c = dst.unmap_rgb(dst.map_rgb(c))
p.append(c)
dst.fill(dst_color)
dst.blit(src,
(0, 0),
special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p,
(", op: %s, src bpp: %i"
", src flags: %i" %
(blend_name,
src.get_bitsize(),
src.get_flags())))
# Blend blits are special cased for 32 to 32 bit surfaces
# with per-pixel alpha.
#
# Confirm the general case is used instead when the formats differ.
src = self._make_src_surface(32, srcalpha=True)
masks = src.get_masks()
dst = pygame.Surface(src.get_size(), SRCALPHA, 32,
(masks[1], masks[2], masks[3], masks[0]))
for blend_name, dst_color, op in blend:
p = [tuple([op(dst_color[i], src_color[i]) for i in range(4)])
for src_color in self._test_palette]
dst.fill(dst_color)
dst.blit(src,
(0, 0),
special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p, ", %s" % blend_name)
# Confirm this special case handles subsurfaces.
src = pygame.Surface((8, 10), SRCALPHA, 32)
dst = pygame.Surface((8, 10), SRCALPHA, 32)
tst = pygame.Surface((8, 10), SRCALPHA, 32)
src.fill((1, 2, 3, 4))
dst.fill((40, 30, 20, 10))
subsrc = src.subsurface((2, 3, 4, 4))
subdst = dst.subsurface((2, 3, 4, 4))
subdst.blit(subsrc, (0, 0), special_flags=BLEND_RGBA_ADD)
tst.fill((40, 30, 20, 10))
tst.fill((41, 32, 23, 14), (2, 3, 4, 4))
for x in range(8):
for y in range(10):
self.assertEqual(dst.get_at((x, y)), tst.get_at((x, y)),
"%s != %s at (%i, %i)" %
(dst.get_at((x, y)), tst.get_at((x, y)),
x, y))
def test_blit_blend_big_rect(self):
""" test that an oversized rect works ok.
"""
color = (1, 2, 3, 255)
area = (1, 1, 30, 30)
s1 = pygame.Surface((4, 4), 0, 32)
r = s1.fill(special_flags=pygame.BLEND_ADD, color=color, rect=area)
self.assertEqual(pygame.Rect((1, 1, 3, 3)), r)
self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255))
self.assertEqual(s1.get_at((1, 1)), color)
black = pygame.Color("black")
red = pygame.Color("red")
self.assertNotEqual(black, red)
surf = pygame.Surface((10, 10), 0, 32)
surf.fill(black)
subsurf = surf.subsurface(pygame.Rect(0, 1, 10, 8))
self.assertEqual(surf.get_at((0, 0)), black)
self.assertEqual(surf.get_at((0, 9)), black)
subsurf.fill(red, (0, -1, 10, 1), pygame.BLEND_RGB_ADD)
self.assertEqual(surf.get_at((0, 0)), black)
self.assertEqual(surf.get_at((0, 9)), black)
subsurf.fill(red, (0, 8, 10, 1), pygame.BLEND_RGB_ADD)
self.assertEqual(surf.get_at((0, 0)), black)
self.assertEqual(surf.get_at((0, 9)), black)
def test_GET_PIXELVALS(self):
# surface.h GET_PIXELVALS bug regarding whether of not
# a surface has per-pixel alpha. Looking at the Amask
# is not enough. The surface's SRCALPHA flag must also
# be considered. Fix rev. 1923.
src = self._make_surface(32, srcalpha=True)
src.fill((0, 0, 0, 128))
src.set_alpha(None) # Clear SRCALPHA flag.
dst = self._make_surface(32, srcalpha=True)
dst.blit(src, (0, 0), special_flags=BLEND_RGBA_ADD)
self.assertEqual(dst.get_at((0, 0)), (0, 0, 0, 255))
def test_fill_blend(self):
destinations = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True)]
blend = [('BLEND_ADD', (0, 25, 100, 255),
lambda a, b: min(a + b, 255)),
('BLEND_SUB', (0, 25, 100, 255),
lambda a, b: max(a - b, 0)),
('BLEND_MULT', (0, 7, 100, 255),
lambda a, b: (a * b) // 256),
('BLEND_MIN', (0, 255, 0, 255), min),
('BLEND_MAX', (0, 255, 0, 255), max)]
for dst in destinations:
dst_palette = [dst.unmap_rgb(dst.map_rgb(c))
for c in self._test_palette]
for blend_name, fill_color, op in blend:
fc = dst.unmap_rgb(dst.map_rgb(fill_color))
self._fill_surface(dst)
p = []
for dc in dst_palette:
c = [op(dc[i], fc[i]) for i in range(3)]
if dst.get_masks()[3]:
c.append(dc[3])
else:
c.append(255)
c = dst.unmap_rgb(dst.map_rgb(c))
p.append(c)
dst.fill(fill_color, special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p, ", %s" % blend_name)
def test_fill_blend_rgba(self):
destinations = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True)]
blend = [('BLEND_RGBA_ADD', (0, 25, 100, 255),
lambda a, b: min(a + b, 255)),
('BLEND_RGBA_SUB', (0, 25, 100, 255),
lambda a, b: max(a - b, 0)),
('BLEND_RGBA_MULT', (0, 7, 100, 255),
lambda a, b: (a * b) // 256),
('BLEND_RGBA_MIN', (0, 255, 0, 255), min),
('BLEND_RGBA_MAX', (0, 255, 0, 255), max)]
for dst in destinations:
dst_palette = [dst.unmap_rgb(dst.map_rgb(c))
for c in self._test_palette]
for blend_name, fill_color, op in blend:
fc = dst.unmap_rgb(dst.map_rgb(fill_color))
self._fill_surface(dst)
p = []
for dc in dst_palette:
c = [op(dc[i], fc[i]) for i in range(4)]
if not dst.get_masks()[3]:
c[3] = 255
c = dst.unmap_rgb(dst.map_rgb(c))
p.append(c)
dst.fill(fill_color, special_flags=getattr(pygame, blend_name))
self._assert_surface(dst, p, ", %s" % blend_name)
class SurfaceSelfBlitTest(unittest.TestCase):
"""Blit to self tests.
This test case is in response to MotherHamster Bugzilla Bug 19.
"""
def setUp(self):
# Needed for 8 bits-per-pixel color palette surface tests.
pygame.init()
def tearDown(self):
pygame.quit()
_test_palette = [(0, 0, 0, 255),
(255, 0, 0, 0),
(0, 255, 0, 255)]
surf_size = (9, 6)
def _fill_surface(self, surf, palette=None):
if palette is None:
palette = self._test_palette
surf.fill(palette[1])
surf.fill(palette[2], (1, 2, 1, 2))
def _make_surface(self, bitsize, srcalpha=False, palette=None):
if palette is None:
palette = self._test_palette
flags = 0
if srcalpha:
flags |= SRCALPHA
surf = pygame.Surface(self.surf_size, flags, bitsize)
if bitsize == 8:
surf.set_palette([c[:3] for c in palette])
self._fill_surface(surf, palette)
return surf
def _assert_same(self, a, b):
w, h = a.get_size()
for x in range(w):
for y in range(h):
self.assertEqual(a.get_at((x, y)), b.get_at((x, y)),
("%s != %s, bpp: %i" %
(a.get_at((x, y)), b.get_at((x, y)),
a.get_bitsize())))
def test_overlap_check(self):
# Ensure overlapping blits are properly detected. There are two
# places where this is done, within SoftBlitPyGame() in alphablit.c
# and PySurface_Blit() in surface.c. SoftBlitPyGame should catch the
# per-pixel alpha surface, PySurface_Blit the colorkey and blanket
# alpha surface. per-pixel alpha and blanket alpha self blits are
# not properly handled by SDL 1.2.13, so Pygame does them.
bgc = (0, 0, 0, 255)
rectc_left = (128, 64, 32, 255)
rectc_right = (255, 255, 255, 255)
colors = [(255, 255, 255, 255), (128, 64, 32, 255)]
overlaps = [(0, 0, 1, 0, (50, 0)),
(0, 0, 49, 1, (98, 2)),
(0, 0, 49, 49, (98, 98)),
(49, 0, 0, 1, (0, 2)),
(49, 0, 0, 49, (0, 98))]
surfs = [pygame.Surface((100, 100), SRCALPHA, 32)]
surf = pygame.Surface((100, 100), 0, 32)
surf.set_alpha(255)
surfs.append(surf)
surf = pygame.Surface((100, 100), 0, 32)
surf.set_colorkey((0, 1, 0))
surfs.append(surf)
for surf in surfs:
for s_x, s_y, d_x, d_y, test_posn in overlaps:
surf.fill(bgc)
surf.fill(rectc_right, (25, 0, 25, 50))
surf.fill(rectc_left, (0, 0, 25, 50))
surf.blit(surf, (d_x, d_y), (s_x, s_y, 50, 50))
self.assertEqual(surf.get_at(test_posn), rectc_right)
def test_colorkey(self):
# Check a workaround for an SDL 1.2.13 surface self-blit problem
# (MotherHamster Bugzilla bug 19).
if 'ppc64le' in platform.uname():
# skip https://github.com/pygame/pygame/issues/370#issuecomment-364625291
return
pygame.display.set_mode((100, 50)) # Needed for 8bit surface
bitsizes = [8, 16, 24, 32]
for bitsize in bitsizes:
surf = self._make_surface(bitsize)
surf.set_colorkey(self._test_palette[1])
surf.blit(surf, (3, 0))
p = []
for c in self._test_palette:
c = surf.unmap_rgb(surf.map_rgb(c))
p.append(c)
p[1] = (p[1][0], p[1][1], p[1][2], 0)
tmp = self._make_surface(32, srcalpha=True, palette=p)
tmp.blit(tmp, (3, 0))
tmp.set_alpha(None)
comp = self._make_surface(bitsize)
comp.blit(tmp, (0, 0))
self._assert_same(surf, comp)
def test_blanket_alpha(self):
# Check a workaround for an SDL 1.2.13 surface self-blit problem
# (MotherHamster Bugzilla bug 19).
if 'ppc64le' in platform.uname():
# skip https://github.com/pygame/pygame/issues/370#issuecomment-364625291
return
pygame.display.set_mode((100, 50)) # Needed for 8bit surface
bitsizes = [8, 16, 24, 32]
for bitsize in bitsizes:
surf = self._make_surface(bitsize)
surf.set_alpha(128)
surf.blit(surf, (3, 0))
p = []
for c in self._test_palette:
c = surf.unmap_rgb(surf.map_rgb(c))
p.append((c[0], c[1], c[2], 128))
tmp = self._make_surface(32, srcalpha=True, palette=p)
tmp.blit(tmp, (3, 0))
tmp.set_alpha(None)
comp = self._make_surface(bitsize)
comp.blit(tmp, (0, 0))
self._assert_same(surf, comp)
def test_pixel_alpha(self):
bitsizes = [16, 32]
for bitsize in bitsizes:
surf = self._make_surface(bitsize, srcalpha=True)
comp = self._make_surface(bitsize, srcalpha=True)
comp.blit(surf, (3, 0))
surf.blit(surf, (3, 0))
self._assert_same(surf, comp)
def test_blend(self):
bitsizes = [8, 16, 24, 32]
blends = ['BLEND_ADD',
'BLEND_SUB',
'BLEND_MULT',
'BLEND_MIN',
'BLEND_MAX']
for bitsize in bitsizes:
surf = self._make_surface(bitsize)
comp = self._make_surface(bitsize)
for blend in blends:
self._fill_surface(surf)
self._fill_surface(comp)
comp.blit(surf, (3, 0),
special_flags=getattr(pygame, blend))
surf.blit(surf, (3, 0),
special_flags=getattr(pygame, blend))
self._assert_same(surf, comp)
def test_blend_rgba(self):
bitsizes = [16, 32]
blends = ['BLEND_RGBA_ADD',
'BLEND_RGBA_SUB',
'BLEND_RGBA_MULT',
'BLEND_RGBA_MIN',
'BLEND_RGBA_MAX']
for bitsize in bitsizes:
surf = self._make_surface(bitsize, srcalpha=True)
comp = self._make_surface(bitsize, srcalpha=True)
for blend in blends:
self._fill_surface(surf)
self._fill_surface(comp)
comp.blit(surf, (3, 0),
special_flags=getattr(pygame, blend))
surf.blit(surf, (3, 0),
special_flags=getattr(pygame, blend))
self._assert_same(surf, comp)
def test_subsurface(self):
# Blitting a surface to its subsurface is allowed.
surf = self._make_surface(32, srcalpha=True)
comp = surf.copy()
comp.blit(surf, (3, 0))
sub = surf.subsurface((3, 0, 6, 6))
sub.blit(surf, (0, 0))
del sub
self._assert_same(surf, comp)
# Blitting a subsurface to its owner is forbidden because of
# lock conficts. This limitation allows the overlap check
# in PySurface_Blit of alphablit.c to be simplified.
def do_blit(d, s):
d.blit(s, (0, 0))
sub = surf.subsurface((1, 1, 2, 2))
self.assertRaises(pygame.error, do_blit, surf, sub)
class SurfaceFillTest(unittest.TestCase):
def setUp(self):
pygame.init()
def tearDown(self):
pygame.quit()
def test_fill(self):
screen = pygame.display.set_mode((640, 480))
# Green and blue test pattern
screen.fill((0, 255, 0), (0, 0, 320, 240))
screen.fill((0, 255, 0), (320, 240, 320, 240))
screen.fill((0, 0, 255), (320, 0, 320, 240))
screen.fill((0, 0, 255), (0, 240, 320, 240))
# Now apply a clip rect, such that only the left side of the
# screen should be effected by blit opperations.
screen.set_clip((0, 0, 320, 480))
# Test fills with each special flag, and additionaly without any.
screen.fill((255, 0, 0, 127), (160, 0, 320, 30), 0)
screen.fill((255, 0, 0, 127), (160, 30, 320, 30), pygame.BLEND_ADD)
screen.fill((0, 127, 127, 127), (160, 60, 320, 30), pygame.BLEND_SUB)
screen.fill((0, 63, 63, 127), (160, 90, 320, 30), pygame.BLEND_MULT)
screen.fill((0, 127, 127, 127), (160, 120, 320, 30), pygame.BLEND_MIN)
screen.fill((127, 0, 0, 127), (160, 150, 320, 30), pygame.BLEND_MAX)
screen.fill((255, 0, 0, 127), (160, 180, 320, 30), pygame.BLEND_RGBA_ADD)
screen.fill((0, 127, 127, 127), (160, 210, 320, 30), pygame.BLEND_RGBA_SUB)
screen.fill((0, 63, 63, 127), (160, 240, 320, 30), pygame.BLEND_RGBA_MULT)
screen.fill((0, 127, 127, 127), (160, 270, 320, 30), pygame.BLEND_RGBA_MIN)
screen.fill((127, 0, 0, 127), (160, 300, 320, 30), pygame.BLEND_RGBA_MAX)
screen.fill((255, 0, 0, 127), (160, 330, 320, 30), pygame.BLEND_RGB_ADD)
screen.fill((0, 127, 127, 127), (160, 360, 320, 30), pygame.BLEND_RGB_SUB)
screen.fill((0, 63, 63, 127), (160, 390, 320, 30), pygame.BLEND_RGB_MULT)
screen.fill((0, 127, 127, 127), (160, 420, 320, 30), pygame.BLEND_RGB_MIN)
screen.fill((255, 0, 0, 127), (160, 450, 320, 30), pygame.BLEND_RGB_MAX)
# Update the display so we can see the results
pygame.display.flip()
# Compare colors on both sides of window
for y in range(5, 480, 10):
self.assertEqual(screen.get_at((10, y)), screen.get_at((330, 480 - y)))
if __name__ == '__main__':
unittest.main()
| 39.397373
| 94
| 0.56033
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.