hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abad891b06140e2bef4579619552a0ef383015eb
| 1,179
|
py
|
Python
|
nova/tests/unit/api/openstack/compute/test_plugins/basic.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | 7
|
2015-09-22T11:27:16.000Z
|
2015-11-02T12:33:46.000Z
|
nova/tests/unit/api/openstack/compute/test_plugins/basic.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | 2
|
2015-09-07T22:14:46.000Z
|
2020-08-12T08:51:56.000Z
|
nova/tests/unit/api/openstack/compute/test_plugins/basic.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | 4
|
2017-06-23T07:37:43.000Z
|
2020-12-28T09:57:22.000Z
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Basic Test Extension"""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = 'test-basic'
class BasicController(wsgi.Controller):
def index(self, req):
data = {'param': 'val'}
return data
class Basic(extensions.V3APIExtensionBase):
"""Basic Test Extension."""
name = "BasicTest"
alias = ALIAS
version = 1
def get_resources(self):
resource = extensions.ResourceExtension('test', BasicController())
return [resource]
def get_controller_extensions(self):
return []
| 26.795455
| 78
| 0.693808
|
4ab2538ede6f86e9d1c48cb6654cea7a35f46256
| 193
|
py
|
Python
|
sns_client.py
|
advancedcsg/aws-scheduler
|
75a317f31b65e8452320f28cd780577fe4d00f51
|
[
"MIT"
] | null | null | null |
sns_client.py
|
advancedcsg/aws-scheduler
|
75a317f31b65e8452320f28cd780577fe4d00f51
|
[
"MIT"
] | null | null | null |
sns_client.py
|
advancedcsg/aws-scheduler
|
75a317f31b65e8452320f28cd780577fe4d00f51
|
[
"MIT"
] | 1
|
2020-07-10T06:23:57.000Z
|
2020-07-10T06:23:57.000Z
|
import boto3
client = boto3.client('sns')
def publish_sns(arn, payload):
print(f"publishing to an event : {arn}")
client.publish(
TopicArn=arn,
Message=payload
)
| 16.083333
| 44
| 0.626943
|
2aa550331c7f4cb93ebe4d576d3866f2cb68eace
| 387
|
py
|
Python
|
examples/mpl/test.py
|
minrk/bokeh
|
ae4366e508355afc06b5fc62f1ee399635ab909d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mpl/test.py
|
minrk/bokeh
|
ae4366e508355afc06b5fc62f1ee399635ab909d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mpl/test.py
|
minrk/bokeh
|
ae4366e508355afc06b5fc62f1ee399635ab909d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from bokeh import pyplot
from bokeh import plotting
x = np.linspace(-2 * np.pi, 2 * np.pi, 100)
y = np.sin(x)
plt.plot(x, y, "r-")
plt.title("Matplotlib Figure in Bokeh")
# dashed lines work
#plt.plot(x,y,"r-x", linestyle="-.")
pyplot.show_bokeh(plt.gcf(), filename="mpltest.html")
plotting.session().dumpjson(file="mpltest.json")
| 21.5
| 53
| 0.702842
|
799796ace0c75801a5e1895b5daa633b86f12791
| 5,476
|
py
|
Python
|
tensorflow/contrib/distributions/python/ops/poisson.py
|
wangguizhu27/tensorflow1
|
3462966ac7d3884c2153b1655e8528a0f6bac0f4
|
[
"Apache-2.0"
] | 1
|
2020-08-27T08:17:15.000Z
|
2020-08-27T08:17:15.000Z
|
tensorflow/contrib/distributions/python/ops/poisson.py
|
wangguizhu27/tensorflow1
|
3462966ac7d3884c2153b1655e8528a0f6bac0f4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distributions/python/ops/poisson.py
|
wangguizhu27/tensorflow1
|
3462966ac7d3884c2153b1655e8528a0f6bac0f4
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:43:23.000Z
|
2019-06-19T08:43:23.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Poisson distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
"Poisson",
]
_poisson_sample_note = """
Note that the input value must be a non-negative floating point tensor with
dtype `dtype` and whose shape can be broadcast with `self.rate`. `x` is only
legal if it is non-negative and its components are equal to integer values.
"""
class Poisson(distribution.Distribution):
"""Poisson distribution.
The Poisson distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z
Z = exp(lambda).
```
where `rate = lambda` and `Z` is the normalizing constant.
"""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Poisson"):
"""Initialize a batch of Poisson distributions.
Args:
rate: Floating point tensor, the rate parameter of the
distribution(s). `rate` must be positive.
validate_args: Python `Boolean`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[rate]) as ns:
with ops.control_dependencies([check_ops.assert_positive(rate)] if
validate_args else []):
self._rate = array_ops.identity(rate, name="rate")
super(Poisson, self).__init__(
dtype=self._rate.dtype,
is_continuous=False,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._rate],
name=ns)
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.shape(self.rate)
def _batch_shape(self):
return self.rate.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _assert_valid_sample(self, x, check_integer=True):
if not self.validate_args:
return x
dependencies = [check_ops.assert_non_negative(x)]
if check_integer:
dependencies += [distribution_util.assert_integer_form(
x, message="x has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, x)
| 34.440252
| 80
| 0.709094
|
4cc8d74f18244259c2dbe3ab50411d7e005feacd
| 6,616
|
py
|
Python
|
pytracer/interface/api.py
|
zjiayao/pyTracer
|
c2b4ef299ecbdca1c519059488f7cd2438943ee4
|
[
"MIT"
] | 9
|
2017-11-20T18:17:27.000Z
|
2022-01-27T23:00:31.000Z
|
pytracer/interface/api.py
|
zjiayao/pyTracer
|
c2b4ef299ecbdca1c519059488f7cd2438943ee4
|
[
"MIT"
] | 4
|
2021-06-08T19:03:51.000Z
|
2022-03-11T23:18:44.000Z
|
pytracer/interface/api.py
|
zjiayao/pyTracer
|
c2b4ef299ecbdca1c519059488f7cd2438943ee4
|
[
"MIT"
] | 1
|
2017-11-20T22:48:01.000Z
|
2017-11-20T22:48:01.000Z
|
"""
api.py
pytracer.interface package
Interfacing from the scene descriptions.
Created by Jiayao on Aug 21, 2017
"""
from __future__ import absolute_import
from typing import TYPE_CHECKING
import pytracer.utility as util
if TYPE_CHECKING:
from pytracer.interface.option import Option
from pytracer.interface.parameter import Param
from pytracer.transform import Transform
def system_init(option: Option):
from pytracer.spectral import Spectrum
import pytracer.interface as inter
if inter.API_STATUS != inter.API_UNINIT:
raise RuntimeError("system_init() already called.")
inter.API_STATUS = inter.API_OPTIONS
inter.GLOBAL_OPTION = option
inter.RENDER_OPTION = inter.RenderOption()
inter.GRAPHICS_STATE = GraphicsState()
Spectrum.init()
def system_clean():
import pytracer.interface as inter
if inter.API_STATUS == inter.API_UNINIT:
return
inter.API_STATUS = inter.API_UNINIT
inter.GLOBAL_OPTION = None
inter.RENDER_OPTION = None
def check_system_inited(api_func):
import pytracer.interface as inter
if inter.API_STATUS == inter.API_UNINIT:
raise RuntimeError("{}: system not inited.".format(api_func.__name__))
return api_func
@check_system_inited
def trans_identity():
import pytracer.interface as inter
import pytracer.transform as trans
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] = trans.Transform()
@check_system_inited
def trans_translate(dx):
import pytracer.interface as inter
import pytracer.geometry as geo
import pytracer.transform as trans
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] *= trans.Transform.translate(geo.Vector.from_arr(dx))
@check_system_inited
def trans_rotate(angle, dx: list):
import pytracer.interface as inter
import pytracer.geometry as geo
import pytracer.transform as trans
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] *= trans.Transform.rotate(angle, geo.Vector.from_arr(dx))
@check_system_inited
def trans_scale(sc: list):
import pytracer.interface as inter
import pytracer.transform as trans
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] *= trans.Transform.scale(sc[0], sc[1], sc[2])
@check_system_inited
def trans_look_at(eye: list, at: list, up: list):
import pytracer.interface as inter
import pytracer.geometry as geo
import pytracer.transform as trans
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] *= trans.Transform.look_at(geo.Point.from_arr(eye),
geo.Point.from_arr(at),
geo.Vector.from_arr(up))
@check_system_inited
def trans_concat(trans: Transform):
import pytracer.interface as inter
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] *= trans
@check_system_inited
def trans_set(trans: Transform):
import pytracer.interface as inter
for i, _ in enumerate(inter.TRANSFORM_SET):
inter.TRANSFORM_SET[i] = trans
# coordinate system
@check_system_inited
def add_coordinate_system(name: str):
"""Make a named copy of the current transformations."""
import pytracer.interface as inter
inter.COORDINATE_SYSTEM[name] = inter.TRANSFORM_SET
@check_system_inited
def set_coordinate_system(name: str):
"""Make a named copy of the current transformations."""
import pytracer.interface as inter
if name not in inter.COORDINATE_SYSTEM:
raise RuntimeError
inter.TRANSFORM_SET = inter.COORDINATE_SYSTEM[name]
# Render Options
@check_system_inited
def set_transformation_time(start=0., end=1.):
import pytracer.interface as inter
inter.RENDER_OPTION.trans_start_time = start
inter.RENDER_OPTION.trans_end_time = end
# Filters
@check_system_inited
def set_pixel_filter(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.filter_name = name.lower()
inter.RENDER_OPTION.filter_param = param
@check_system_inited
def set_film(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.film_name = name.lower()
inter.RENDER_OPTION.film_param = param
@check_system_inited
def set_camera(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.camera_name = name.lower()
inter.RENDER_OPTION.camera_param = param
inter.RENDER_OPTION.cam2wld = [trans.inverse() for trans in inter.TRANSFORM_SET]
inter.COORDINATE_SYSTEM["camera"] = inter.RENDER_OPTION.cam2wld
@check_system_inited
def set_sampler(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.sampler_name = name.lower()
inter.RENDER_OPTION.sampler_param = param
@check_system_inited
def set_aggregator(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.aggregator_name = name.lower()
inter.RENDER_OPTION.aggregator_param = param
@check_system_inited
def set_renderer(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.renderer_name = name.lower()
inter.RENDER_OPTION.renderer_param = param
@check_system_inited
def set_surface(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.surface_name = name.lower()
inter.RENDER_OPTION.surface_param = param
@check_system_inited
def set_volume(name: str, param: Param):
import pytracer.interface as inter
inter.RENDER_OPTION.volume_name = name.lower()
inter.RENDER_OPTION.volume_param = param
# Scene Description
@check_system_inited
def world_begin():
import pytracer.interface as inter
inter.API_STATUS = inter.API_WORLD
trans_identity()
add_coordinate_system("world")
@check_system_inited
def attribute_begin():
import pytracer.interface as inter
inter.GRAPHICS_STATE_STACK.append(inter.GRAPHICS_STATE)
inter.TRANSFORM_STACK.append(inter.TRANSFORM_SET)
@check_system_inited
def attribute_end():
import pytracer.interface as inter
if len(inter.GRAPHICS_STATE_STACK) == 0:
util.logging('Error', 'Unmatched attribute end, ignoring')
return
inter.GRAPHICS_STATE = inter.GRAPHICS_STATE_STACK.pop()
inter.TRANSFORM_SET = inter.TRANSFORM_STACK.pop()
@check_system_inited
def transform_begin():
import pytracer.interface as inter
inter.TRANSFORM_STACK.append(inter.TRANSFORM_SET)
@check_system_inited
def transform_end():
import pytracer.interface as inter
if len(inter.TRANSFORM_STACK) == 0:
util.logging('Error', 'Unmatched attribute end, ignoring')
return
inter.TRANSFORM_SET = inter.TRANSFORM_STACK.pop()
# Local Classes
class GraphicsState(object):
"""Holds the graphics states."""
def __init__(self):
pass
def __repr__(self):
return "{}\n".format(self.__class__)
| 25.544402
| 82
| 0.778114
|
aa41b134a29d9928d7cac582d19c219b42a7321f
| 5,423
|
py
|
Python
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/audit_topic_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/audit_topic_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/audit_topic_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuditTopicCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'max_cached_messages': 'int',
'name': 'str'
}
attribute_map = {
'max_cached_messages': 'max_cached_messages',
'name': 'name'
}
def __init__(self, max_cached_messages=None, name=None): # noqa: E501
"""AuditTopicCreateParams - a model defined in Swagger""" # noqa: E501
self._max_cached_messages = None
self._name = None
self.discriminator = None
if max_cached_messages is not None:
self.max_cached_messages = max_cached_messages
self.name = name
@property
def max_cached_messages(self):
"""Gets the max_cached_messages of this AuditTopicCreateParams. # noqa: E501
Specifies the maximum number of messages that can be sent and received at the same time. Messages that are sent and received at the same time can be lost if a system crash occurs. You can prevent message loss by setting this property to 0, which sets audit logs to synchronous. # noqa: E501
:return: The max_cached_messages of this AuditTopicCreateParams. # noqa: E501
:rtype: int
"""
return self._max_cached_messages
@max_cached_messages.setter
def max_cached_messages(self, max_cached_messages):
"""Sets the max_cached_messages of this AuditTopicCreateParams.
Specifies the maximum number of messages that can be sent and received at the same time. Messages that are sent and received at the same time can be lost if a system crash occurs. You can prevent message loss by setting this property to 0, which sets audit logs to synchronous. # noqa: E501
:param max_cached_messages: The max_cached_messages of this AuditTopicCreateParams. # noqa: E501
:type: int
"""
if max_cached_messages is not None and max_cached_messages > 16384: # noqa: E501
raise ValueError("Invalid value for `max_cached_messages`, must be a value less than or equal to `16384`") # noqa: E501
if max_cached_messages is not None and max_cached_messages < 0: # noqa: E501
raise ValueError("Invalid value for `max_cached_messages`, must be a value greater than or equal to `0`") # noqa: E501
self._max_cached_messages = max_cached_messages
@property
def name(self):
"""Gets the name of this AuditTopicCreateParams. # noqa: E501
Specifies the name of the audit topic. # noqa: E501
:return: The name of this AuditTopicCreateParams. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AuditTopicCreateParams.
Specifies the name of the audit topic. # noqa: E501
:param name: The name of this AuditTopicCreateParams. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 255:
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuditTopicCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.677632
| 299
| 0.619952
|
6716539bb76a430784548200ee3a8a28dc63b906
| 760
|
py
|
Python
|
salt/output/json_out.py
|
d--j/salt
|
579f900be67a80e1a77674bc6aa21fec836c1c4c
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:56.000Z
|
2021-03-08T18:59:46.000Z
|
salt/output/json_out.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | null | null | null |
salt/output/json_out.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | 1
|
2020-03-07T07:04:55.000Z
|
2020-03-07T07:04:55.000Z
|
'''
The JSON output module converts the return data into JSON.
'''
# Import python libs
import json
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Rename to json
'''
return 'json'
def output(data):
'''
Print the output data in JSON
'''
try:
if 'output_indent' in __opts__:
if __opts__['output_indent'] >= 0:
return json.dumps(data, default=repr, indent=__opts__['output_indent'])
return json.dumps(data, default=repr)
return json.dumps(data, default=repr, indent=4)
except TypeError:
log.debug('An error occurred while outputting JSON', exc_info=True)
# Return valid JSON for unserializable objects
return json.dumps({})
| 23.030303
| 87
| 0.639474
|
0c0c0eeb2eed81d7b9e5589370d3b2472e82bfcf
| 1,779
|
py
|
Python
|
src/main/resources/google/cloud/compute/instance/planningScripts/steps_for_destroy.py
|
s-lal/xld-google-cloud-compute-plugin
|
28389184a6df8bdee33893da6030499384a2b565
|
[
"MIT"
] | null | null | null |
src/main/resources/google/cloud/compute/instance/planningScripts/steps_for_destroy.py
|
s-lal/xld-google-cloud-compute-plugin
|
28389184a6df8bdee33893da6030499384a2b565
|
[
"MIT"
] | null | null | null |
src/main/resources/google/cloud/compute/instance/planningScripts/steps_for_destroy.py
|
s-lal/xld-google-cloud-compute-plugin
|
28389184a6df8bdee33893da6030499384a2b565
|
[
"MIT"
] | null | null | null |
#
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
instance_name = previousDeployed.instanceName if previousDeployed.instanceName else previousDeployed.name
context.addStepWithCheckpoint(steps.jython(
description='Destroy instance {} on {}'.format(instance_name, previousDeployed.container.name),
script="google/cloud/compute/instance/compute_destroy.py",
order=16
), delta)
context.addStep(steps.jython(
description="Wait for instance {} to be fully destroy".format(instance_name),
script="google/cloud/compute/instance/compute_destroy_running.py",
order=17
))
context.addStep(steps.wait(
description="Wait for instance {} to be fully destroyed (2)".format(instance_name),
seconds=previousDeployed.waitOnDestroy,
order=18
))
| 59.3
| 462
| 0.786397
|
9871043e63d1c483d8d8fadf36a596fdc991cb54
| 4,375
|
py
|
Python
|
source/eval.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | 11
|
2020-10-27T00:05:55.000Z
|
2021-08-25T08:42:34.000Z
|
source/eval.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | 1
|
2021-06-02T01:59:03.000Z
|
2021-06-02T01:59:03.000Z
|
source/eval.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | null | null | null |
import os
import argparse
import json
import torch
from models.clustering_models import ClusteringModel
from pipeline.evaluator import Evaluator
from replay_memory.replay_pil import ReplayPILDataset
from config import MemoryConfigPIL, TestingConfig, ClusteringModelConfig
from config import global_config
from tools.logger import init_logging, LOGGER
from tools.coco_tools import save_coco_dataset
def get_args():
parser = argparse.ArgumentParser(
description="self-supervised-objects eval",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"model_folder",
type=str,
help="required trained model folder name",
)
parser.add_argument(
"dataset_folder",
type=str,
help="required dataset folder name",
)
parser.add_argument(
"dataset",
type=int,
help="required dataset type should be 0 (NovelObjects) or 1 (NovelSpaces) and match the one used for training",
)
parser.add_argument(
"-c",
"--checkpoint",
required=False,
default=900,
type=int,
help="checkpoint to evaluate",
)
parser.add_argument(
"-g",
"--model_gpu",
required=False,
default=0,
type=int,
help="gpu id to run model",
)
parser.add_argument(
"-l",
"--loaders_gpu",
required=False,
default=0,
type=int,
help="gpu id to run thor data loaders",
)
parser.add_argument(
"-i",
"--interaction_threshold",
required=False,
default=-100.0,
type=float,
help="interaction logits threshold",
)
parser.add_argument(
"-p",
"--checkpoint_prefix",
required=False,
default="clustering_model_weights_",
type=str,
help="prefix for checkpoints in output folder",
)
parser.add_argument(
"-d",
"--det_file",
required=False,
default=None,
type=str,
help="precomputed detections result",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
init_logging()
LOGGER.info("Running eval with args {}".format(args))
output_folder = os.path.normpath(args.model_folder)
dataset_folder = os.path.normpath(args.dataset_folder)
dataset = args.dataset
assert os.path.isdir(output_folder), 'Output folder does not exist'
assert os.path.isdir(dataset_folder), 'Dataset folder does not exist'
assert dataset in [0, 1], 'Dataset argument should be either 0 (NovelObjects) or 1 (NovelSpaces)'
results_folder = os.path.join(output_folder, "inference")
os.makedirs(results_folder, exist_ok=True)
LOGGER.info("Writing output to {}".format(results_folder))
data_file = ['NovelObjects__test.json', 'NovelSpaces__test.json'][dataset]
data_path = os.path.join(dataset_folder, data_file)
coco_gt_path = save_coco_dataset(data_path, results_folder)
if args.det_file is None:
global_config.model_gpu = args.model_gpu
global_config.actor_gpu = args.loaders_gpu
model = ClusteringModel(ClusteringModelConfig()).cuda(global_config.model_gpu)
cp_name = os.path.join(output_folder, "{}{}.pth".format(args.checkpoint_prefix, args.checkpoint))
LOGGER.info("Loading checkpoint {}".format(cp_name))
model.load_state_dict(torch.load(cp_name, map_location="cpu"))
id = "{}__cp{}".format(os.path.basename(output_folder), args.checkpoint)
eval = Evaluator(model, ReplayPILDataset(MemoryConfigPIL()), loss_function=None, tester_config=TestingConfig())
det_file = eval.inference(data_path, results_folder, args.interaction_threshold, id, interactable_classes=[0, 1, 2])
else:
det_file = args.det_file
LOGGER.info("Using precomputed detections in {}".format(det_file))
results = {}
for anno_type in ['bbox', 'segm', 'mass']:
results.update(Evaluator.evaluate(coco_gt_path, det_file, annotation_type=anno_type))
results_file = det_file.replace("_inf.json", "_results.json")
with open(results_file, "w") as f:
json.dump(results, f, indent=4, sort_keys=True)
LOGGER.info("Full results saved in {}".format(results_file))
LOGGER.info("Eval done")
| 31.702899
| 124
| 0.662171
|
41c29e64d7b1f1207bce2967c2ece9198dd9bf42
| 1,191
|
py
|
Python
|
Aula33/Aula33-4/dao/endereco_db.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Aula33/Aula33-4/dao/endereco_db.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Aula33/Aula33-4/dao/endereco_db.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
#----- Importar biblioteca do Mysql
import MySQLdb
from model.endereco import Endereco
class EnderecoDb:
conexao = MySQLdb.connect(host='mysql.topskills.study', database='topskills01', user='topskills01', passwd='ts2019')
cursor = conexao.cursor()
def listar_todos(self):
comando_sql_select = "SELECT * FROM 01_MDG_ENDERECO"
self.cursor.execute(comando_sql_select)
resultado = self.cursor.fetchall()
lista_endereco_classe = self.converter_tabela_classe(resultado)
return lista_endereco_classe
def buscar_por_id(self, id):
comando_sql_select = f"SELECT * FROM 01_MDG_ENDERECO WHERE ID= {id}"
self.cursor.execute(comando_sql_select)
resultado = self.cursor.fetchone()
return resultado
def converter_tabela_classe(self, lista_tuplas):
lista_endereco = []
for e in lista_tuplas:
e1 = Endereco()
e1.id = e[0]
e1.logradouro = e[1]
e1.numero= e[2]
e1.complemento = e[3]
e1.bairro = e[4]
e1.cidade = e[5]
e1.cep = e[6]
lista_endereco.append(e1)
return lista_endereco
| 34.028571
| 120
| 0.630563
|
b465ea6842f0ea8537f4875ff50c4978b2c7b171
| 5,198
|
py
|
Python
|
pycycle/elements/test/test_mixer.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
pycycle/elements/test/test_mixer.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
pycycle/elements/test/test_mixer.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
""" Tests the duct component. """
import unittest
import os
import numpy as np
import openmdao.api as om
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.constants import AIR_ELEMENTS, AIR_FUEL_ELEMENTS
from pycycle.mp_cycle import Cycle
from pycycle.elements.mixer import Mixer
from pycycle.elements.flow_start import FlowStart
from pycycle.connect_flow import connect_flow
from pycycle.thermo.cea.species_data import janaf
class MixerTestcase(unittest.TestCase):
def test_mix_same(self):
# mix two identical streams and make sure you get twice the area and the same total pressure
p = Problem()
cycle = p.model = Cycle()
cycle.set_input_defaults('P', 17., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('W', 100., units='lbm/s')
cycle.add_subsystem('start1', FlowStart(), promotes=['P', 'T', 'MN', 'W'])
cycle.add_subsystem('start2', FlowStart(), promotes=['P', 'T', 'MN', 'W'])
cycle.add_subsystem('mixer', Mixer(design=True, Fl_I1_elements=AIR_ELEMENTS, Fl_I2_elements=AIR_ELEMENTS))
cycle.pyc_connect_flow('start1.Fl_O', 'mixer.Fl_I1')
cycle.pyc_connect_flow('start2.Fl_O', 'mixer.Fl_I2')
p.set_solver_print(level=-1)
p.setup()
p['mixer.balance.P_tot'] = 17
p.run_model()
tol = 2e-7
assert_near_equal(p['mixer.Fl_O:stat:area'], 2*p['start1.Fl_O:stat:area'], tolerance=tol)
assert_near_equal(p['mixer.Fl_O:tot:P'], p['P'], tolerance=tol)
assert_near_equal(p['mixer.ER'], 1, tolerance=tol)
def test_mix_diff(self):
# mix two identical streams and make sure you get twice the area and the same total pressure
p = Problem()
cycle = p.model = Cycle()
cycle.set_input_defaults('start1.P', 17., units='psi')
cycle.set_input_defaults('start2.P', 15., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('W', 100., units='lbm/s')
cycle.add_subsystem('start1', FlowStart(), promotes=['MN', 'T', 'W'])
cycle.add_subsystem('start2', FlowStart(), promotes=['MN', 'T', 'W'])
cycle.add_subsystem('mixer', Mixer(design=True, Fl_I1_elements=AIR_ELEMENTS, Fl_I2_elements=AIR_ELEMENTS))
cycle.pyc_connect_flow('start1.Fl_O', 'mixer.Fl_I1')
cycle.pyc_connect_flow('start2.Fl_O', 'mixer.Fl_I2')
p.set_solver_print(level=-1)
p.setup()
p.run_model()
tol = 2e-7
assert_near_equal(p['mixer.Fl_O:stat:area'], 653.26524074, tolerance=tol)
assert_near_equal(p['mixer.Fl_O:tot:P'], 15.7943609, tolerance=tol)
assert_near_equal(p['mixer.ER'], 1.1333333333, tolerance=tol)
def _build_problem(self, designed_stream=1, complex=False):
p = Problem()
cycle = p.model = Cycle()
cycle.set_input_defaults('start1.P', 9.218, units='psi')
cycle.set_input_defaults('start1.T', 1524.32, units='degR')
cycle.set_input_defaults('start1.MN', 0.4463)
cycle.set_input_defaults('start1.W', 161.49, units='lbm/s')
cycle.set_input_defaults('start2.P', 8.68, units='psi')
cycle.set_input_defaults('start2.T', 524., units='degR')
cycle.set_input_defaults('start2.MN', 0.4463)
cycle.set_input_defaults('start2.W', 158., units='lbm/s')
cycle.add_subsystem('start1', FlowStart(elements=AIR_FUEL_ELEMENTS))
cycle.add_subsystem('start2', FlowStart(elements=AIR_ELEMENTS))
cycle.add_subsystem('mixer', Mixer(design=True, designed_stream=designed_stream,
Fl_I1_elements=AIR_FUEL_ELEMENTS, Fl_I2_elements=AIR_ELEMENTS))
cycle.pyc_connect_flow('start1.Fl_O', 'mixer.Fl_I1')
cycle.pyc_connect_flow('start2.Fl_O', 'mixer.Fl_I2')
p.setup(force_alloc_complex=complex)
p.set_solver_print(level=-1)
return p
def test_mix_air_with_airfuel(self):
p = self._build_problem(designed_stream=1)
# p.model.mixer.impulse_converge.nonlinear_solver.options['maxiter'] = 10
p.run_model()
tol = 5e-7
assert_near_equal(p['mixer.Fl_O:stat:area'], 2786.86877031, tolerance=tol)
assert_near_equal(p['mixer.Fl_O:tot:P'], 8.8881475, tolerance=tol)
assert_near_equal(p['mixer.ER'], 1.06198157, tolerance=tol)
# p = self._build_problem(designed_stream=2)
# p.model.mixer.impulse_converge.nonlinear_solver.options['maxiter'] = 10
# p.run_model()
def test_mixer_partials(self):
p = self._build_problem(designed_stream=1, complex=True)
p.run_model()
partials = p.check_partials(includes=['mixer.area_calc*', 'mixer.mix_flow*', 'mixer.imp_out*'], out_stream=None, method='cs')
assert_check_partials(partials, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main()
| 37.395683
| 133
| 0.651982
|
55ff229f7aab00af1a551f9bda039dbf18e47754
| 2,411
|
py
|
Python
|
caravaggio_rest_api/utils.py
|
brunohenriquy/django-caravaggio-rest-api
|
25abe3666dae63e88b8f2cec9b4c8deac980207f
|
[
"MIT"
] | 1
|
2019-10-30T20:14:37.000Z
|
2019-10-30T20:14:37.000Z
|
caravaggio_rest_api/utils.py
|
joaomedeiros95/django-caravaggio-rest-api
|
cb2647c55597623174992b555c949f4d08503115
|
[
"MIT"
] | null | null | null |
caravaggio_rest_api/utils.py
|
joaomedeiros95/django-caravaggio-rest-api
|
cb2647c55597623174992b555c949f4d08503115
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
# Copyright (c) 2019 BuildGroup Data Services Inc.
# All rights reserved.
import dateutil
from datetime import datetime
try:
from dse.cqlengine import columns
except ImportError:
from cassandra.cqlengine import columns
from django.db import connections
from django_cassandra_engine.models import DjangoCassandraModel
from django_cassandra_engine.utils import get_engine_from_db_alias
def mk_datetime(datetime_str):
"""
Process ISO 8661 date time formats https://en.wikipedia.org/wiki/ISO_8601
"""
return dateutil.parser.parse(datetime_str)
def quarter(date):
return (date.month - 1) // 3 + 1
def week_of_year(date):
"""
Our weeks starts on Mondays
%W - week number of the current year, starting with the first
Monday as the first day of the first week
:param date: a datetime object
:return: the week of the year
"""
return date.strftime("%W")
def default(o):
"""Used to dump objects into json when the objects have datetime members"""
if type(o) is datetime:
return o.isoformat()
if isinstance(o, (columns.UUID, columns.TimeUUID)):
return str(o)
def get_database(model, alias=None):
if alias:
return connections[alias]
for alias in connections:
engine = get_engine_from_db_alias(alias)
if issubclass(model, DjangoCassandraModel):
if engine == "django_cassandra_engine":
return connections[alias]
elif not engine == "django_cassandra_engine":
return connections[alias]
raise AttributeError("Database not found!")
def get_keyspace(alias=None):
if alias:
return connections[alias]
for alias in connections:
engine = get_engine_from_db_alias(alias)
if engine == "django_cassandra_engine":
return connections[alias].settings_dict.get('NAME', '')
raise AttributeError("Database not found!")
def delete_all_records(model_clazz, database=None):
if issubclass(model_clazz, DjangoCassandraModel):
conn = get_database(model_clazz, database)
conn.connection.execute(
"TRUNCATE {};".format(model_clazz.objects.column_family_name))
else:
model_clazz.objects.all().delete()
def get_primary_keys_values(instance, model):
return {pk: getattr(instance, pk)
for pk in model._primary_keys.keys()}
| 27.089888
| 79
| 0.69017
|
da7aaa3a1eaf77be7695d6a23b385f65806c627e
| 701
|
py
|
Python
|
pyramidvue/tests.py
|
timgates42/pyramidVue
|
4e066ee6c8c23b27d56ebb87b6b32df65c338d4c
|
[
"MIT"
] | 37
|
2017-06-06T18:09:25.000Z
|
2021-05-28T21:22:48.000Z
|
pyramidvue/tests.py
|
timgates42/pyramidVue
|
4e066ee6c8c23b27d56ebb87b6b32df65c338d4c
|
[
"MIT"
] | 8
|
2017-10-08T07:10:25.000Z
|
2022-02-26T03:25:56.000Z
|
pyramidvue/tests.py
|
timgates42/pyramidVue
|
4e066ee6c8c23b27d56ebb87b6b32df65c338d4c
|
[
"MIT"
] | 6
|
2017-07-10T07:10:27.000Z
|
2020-10-26T17:58:02.000Z
|
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'pyramidVue')
class FunctionalTests(unittest.TestCase):
def setUp(self):
from pyramidvue import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertTrue(b'Vue' in res.body)
| 23.366667
| 55
| 0.636234
|
be344feab1b88658cfb3916bda031b9d9b0e2826
| 2,300
|
py
|
Python
|
examples/copy_example.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 85
|
2016-08-07T16:46:58.000Z
|
2022-03-23T01:44:02.000Z
|
examples/copy_example.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 109
|
2016-08-02T18:40:04.000Z
|
2021-08-23T08:08:02.000Z
|
examples/copy_example.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 29
|
2016-11-29T03:42:47.000Z
|
2022-01-23T17:37:11.000Z
|
import argparse
import os
import sys
import logging
from carto.auth import APIKeyAuthClient
from carto.sql import SQLClient
from carto.sql import CopySQLClient
# Logger (better than print)
logging.basicConfig(
level=logging.INFO,
format=' %(asctime)s - %(levelname)s - %(message)s',
datefmt='%I:%M:%S %p')
logger = logging.getLogger()
# set input arguments
parser = argparse.ArgumentParser(description='Example of CopySQLClient usage (file-based interface)')
parser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',
default=os.environ.get('CARTO_API_URL', ''),
help='Set the base URL. For example:' +
' https://username.carto.com/ ' +
'(defaults to env variable CARTO_API_URL)')
parser.add_argument('--api_key', dest='CARTO_API_KEY',
default=os.environ.get('CARTO_API_KEY', ''),
help='Api key of the account' +
' (defaults to env variable CARTO_API_KEY)')
args = parser.parse_args()
# Set authentification to CARTO
if args.CARTO_BASE_URL and args.CARTO_API_KEY:
auth_client = APIKeyAuthClient(
args.CARTO_BASE_URL, args.CARTO_API_KEY)
else:
logger.error('You need to provide valid credentials, run with '
'-h parameter for details')
sys.exit(1)
# Create and cartodbfy a table
sqlClient = SQLClient(auth_client)
sqlClient.send("""
CREATE TABLE IF NOT EXISTS copy_example (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""")
sqlClient.send("SELECT CDB_CartodbfyTable(current_schema, 'copy_example')")
copyClient = CopySQLClient(auth_client)
# COPY FROM example
logger.info("COPY'ing FROM file...")
query = ('COPY copy_example (the_geom, name, age) '
'FROM stdin WITH (FORMAT csv, HEADER true)')
result = copyClient.copyfrom_file_path(query, 'files/copy_from.csv')
logger.info('result = %s' % result)
# COPY TO example
query = 'COPY copy_example TO stdout WITH (FORMAT csv, HEADER true)'
output_file = 'files/copy_export.csv'
copyClient.copyto_file_path(query, output_file)
logger.info('Table copied to %s' % output_file)
# Truncate the table to make this example repeatable
sqlClient.send('TRUNCATE TABLE copy_example RESTART IDENTITY')
| 31.944444
| 101
| 0.687391
|
a15ba0a42d3caf424e0f4de3198694cd5dba355a
| 1,152
|
py
|
Python
|
2021/src/Utils.py
|
nimroha/HashCode
|
a98e341cb1f569bb9baf2005946c9cde168ae362
|
[
"MIT"
] | null | null | null |
2021/src/Utils.py
|
nimroha/HashCode
|
a98e341cb1f569bb9baf2005946c9cde168ae362
|
[
"MIT"
] | null | null | null |
2021/src/Utils.py
|
nimroha/HashCode
|
a98e341cb1f569bb9baf2005946c9cde168ae362
|
[
"MIT"
] | 1
|
2021-02-26T03:08:37.000Z
|
2021-02-26T03:08:37.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
from argparse import ArgumentTypeError
def plotHist(lst, title):
max = np.max(lst)
min = np.min(lst)
plt.hist(lst, np.arange(start=min, stop=max))
plt.title(title)
plt.show()
def savePickle(savePath, obj):
"""
pickle dump wrapper
:param savePath: path to save
:param obj: object to save
"""
with open(savePath, 'wb') as fp:
pickle.dump(obj, fp)
def loadPickle(loadPath):
"""
pickle load wrapper
:param loadPath: path to load
:return: python object
"""
with open(loadPath, 'rb') as fp:
return pickle.load(fp)
def validateInputRange(valid):
"""
validate the input problem arg is valid
:param valid: list of valid problem inputs
:return: the argument if it's valid
:raise ArgumentTypeError: if it's not valid
"""
def func(arg):
if (not isinstance(arg, str) or
len(set(arg) - set(valid)) > 0):
raise ArgumentTypeError(f'input argument must be any subset (in any order) of "{valid}", got "{arg}"')
return arg
return func
| 20.945455
| 114
| 0.626736
|
9f720caa971609bff07006f563d48fb4e57e02a9
| 21,733
|
py
|
Python
|
heat/tests/aws/test_security_group.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 265
|
2015-01-02T09:33:22.000Z
|
2022-03-26T23:19:54.000Z
|
heat/tests/aws/test_security_group.py
|
HyunJin-Jeong/heat
|
8353fddf9ebfb0eca67d6f2b2feb529031acff89
|
[
"Apache-2.0"
] | 8
|
2015-09-01T15:43:19.000Z
|
2021-12-14T05:18:23.000Z
|
heat/tests/aws/test_security_group.py
|
HyunJin-Jeong/heat
|
8353fddf9ebfb0eca67d6f2b2feb529031acff89
|
[
"Apache-2.0"
] | 295
|
2015-01-06T07:00:40.000Z
|
2021-09-06T08:05:06.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from unittest import mock
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutronclient
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
NovaSG = collections.namedtuple('NovaSG',
' '.join([
'name',
'id',
'rules',
'description',
]))
class SecurityGroupTest(common.HeatTestCase):
test_template_neutron = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: HTTP and SSH access
VpcId: aaaa
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort : "80"
ToPort : "80"
CidrIp : 0.0.0.0/0
- IpProtocol: tcp
SourceSecurityGroupId: wwww
SecurityGroupEgress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 10.0.1.0/24
- SourceSecurityGroupName: xxxx
'''
def setUp(self):
super(SecurityGroupTest, self).setUp()
self.m_csg = self.patchobject(neutronclient.Client,
'create_security_group')
self.m_csgr = self.patchobject(
neutronclient.Client, 'create_security_group_rule')
self.m_ssg = self.patchobject(neutronclient.Client,
'show_security_group')
self.m_dsgr = self.patchobject(
neutronclient.Client, 'delete_security_group_rule')
self.m_dsg = self.patchobject(
neutronclient.Client, 'delete_security_group')
self.m_usg = self.patchobject(
neutronclient.Client, 'update_security_group')
self.patchobject(resource.Resource, 'is_using_neutron',
return_value=True)
self.sg_name = utils.PhysName('test_stack', 'the_sg')
def mock_no_neutron(self):
self.patchobject(resource.Resource, 'is_using_neutron',
return_value=False)
def create_stack(self, templ):
self.stack = self.parse_stack(template_format.parse(templ))
self.assertIsNone(self.stack.create())
return self.stack
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = template.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl)
stack.store()
return stack
def assertResourceState(self, rsrc, ref_id, metadata=None):
metadata = metadata or {}
self.assertIsNone(rsrc.validate())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(ref_id, rsrc.FnGetRefId())
self.assertEqual(metadata, dict(rsrc.metadata_get()))
def validate_create_security_group_rule_calls(self):
expected = [
mock.call(
{'security_group_rule': {
'security_group_id': 'aaaa', 'protocol': 'tcp',
'port_range_max': 22, 'direction': 'ingress',
'remote_group_id': None, 'ethertype': 'IPv4',
'remote_ip_prefix': '0.0.0.0/0', 'port_range_min': 22}}
),
mock.call(
{'security_group_rule': {
'security_group_id': 'aaaa', 'protocol': 'tcp',
'port_range_max': 80, 'direction': 'ingress',
'remote_group_id': None, 'ethertype': 'IPv4',
'remote_ip_prefix': '0.0.0.0/0', 'port_range_min': 80}}
),
mock.call(
{'security_group_rule': {
'security_group_id': 'aaaa', 'protocol': 'tcp',
'port_range_max': None, 'direction': 'ingress',
'remote_group_id': 'wwww', 'ethertype': 'IPv4',
'remote_ip_prefix': None, 'port_range_min': None}}
),
mock.call(
{'security_group_rule': {
'security_group_id': 'aaaa', 'protocol': 'tcp',
'port_range_max': 22, 'direction': 'egress',
'remote_group_id': None, 'ethertype': 'IPv4',
'remote_ip_prefix': '10.0.1.0/24', 'port_range_min': 22}}
),
mock.call(
{'security_group_rule': {
'security_group_id': 'aaaa', 'protocol': None,
'port_range_max': None, 'direction': 'egress',
'remote_group_id': 'xxxx', 'ethertype': 'IPv4',
'remote_ip_prefix': None, 'port_range_min': None}})
]
self.assertEqual(expected, self.m_csgr.call_args_list)
def validate_delete_security_group_rule(self):
self.assertEqual(
[mock.call('aaaa-1'),
mock.call('aaaa-2'),
mock.call('bbbb'),
mock.call('cccc'),
mock.call('dddd'),
mock.call('eeee'),
mock.call('ffff'),
],
self.m_dsgr.call_args_list)
def validate_stubout_neutron_create_security_group(self):
self.m_csg.assert_called_once_with({
'security_group': {
'name': self.sg_name,
'description': 'HTTP and SSH access'
}
})
self.validate_delete_security_group_rule()
self.validate_create_security_group_rule_calls()
def stubout_neutron_create_security_group(self, mock_csgr=True):
self.m_csg.return_value = {
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': self.sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [{
"direction": "egress",
"ethertype": "IPv4",
"id": "aaaa-1",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}, {
"direction": "egress",
"ethertype": "IPv6",
"id": "aaaa-2",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "aaaa",
"tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
}],
'id': 'aaaa'
}
}
if mock_csgr:
self.m_csgr.side_effect = [
{
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': 22,
'ethertype': 'IPv4',
'port_range_max': 22,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'bbbb'
}},
{
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': 80,
'ethertype': 'IPv4',
'port_range_max': 80,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'cccc'
}
},
{
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'dddd'
}
},
{
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'port_range_min': 22,
'ethertype': 'IPv4',
'port_range_max': 22,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'eeee'
}
},
{
'security_group_rule': {
'direction': 'egress',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': None,
'security_group_id': 'aaaa',
'id': 'ffff'
}
}
]
def stubout_neutron_get_security_group(self):
self.m_ssg.return_value = {
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': 22,
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 22
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': 80,
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 80
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': 22,
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 22
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'xxxx',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}}
def test_security_group_neutron(self):
# create script
self.stubout_neutron_create_security_group()
self.stubout_neutron_get_security_group()
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
stack.delete()
self.validate_stubout_neutron_create_security_group()
self.m_ssg.assert_called_once_with('aaaa')
self.m_dsg.assert_called_once_with('aaaa')
def test_security_group_neutron_exception(self):
# create script
self.m_csg.return_value = {
'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': self.sg_name,
'description': 'HTTP and SSH access',
'security_group_rules': [],
'id': 'aaaa'
}
}
self.m_csgr.side_effect = neutron_exc.Conflict
# delete script
self.m_dsgr.side_effect = neutron_exc.NeutronClientException(
status_code=404)
self.m_ssg.side_effect = [
{'security_group': {
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'name': 'sc1',
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': 22,
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 22
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': 80,
'id': 'cccc',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 80
}, {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': None,
'id': 'dddd',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': 'wwww',
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}, {
'direction': 'egress',
'protocol': 'tcp',
'port_range_max': 22,
'id': 'eeee',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': '10.0.1.0/24',
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': 22
}, {
'direction': 'egress',
'protocol': None,
'port_range_max': None,
'id': 'ffff',
'ethertype': 'IPv4',
'security_group_id': 'aaaa',
'remote_group_id': None,
'remote_ip_prefix': None,
'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
'port_range_min': None
}],
'id': 'aaaa'}},
neutron_exc.NeutronClientException(status_code=404)]
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
scheduler.TaskRunner(sg.delete)()
sg.state_set(sg.CREATE, sg.COMPLETE, 'to delete again')
sg.resource_id = 'aaaa'
stack.delete()
self.m_csg.assert_called_once_with({
'security_group': {
'name': self.sg_name,
'description': 'HTTP and SSH access'
}
})
self.validate_create_security_group_rule_calls()
self.assertEqual(
[mock.call('aaaa'), mock.call('aaaa')],
self.m_ssg.call_args_list)
self.assertEqual(
[mock.call('bbbb'), mock.call('cccc'), mock.call('dddd'),
mock.call('eeee'), mock.call('ffff')], self.m_dsgr.call_args_list)
def test_security_group_neutron_update(self):
# create script
self.stubout_neutron_create_security_group(mock_csgr=False)
# update script
# delete old not needed rules
self.stubout_neutron_get_security_group()
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
# make updated template
props = copy.deepcopy(sg.properties.data)
props['SecurityGroupIngress'] = [
{'IpProtocol': 'tcp',
'FromPort': '80',
'ToPort': '80',
'CidrIp': '0.0.0.0/0'},
{'IpProtocol': 'tcp',
'FromPort': '443',
'ToPort': '443',
'CidrIp': '0.0.0.0/0'},
{'IpProtocol': 'tcp',
'SourceSecurityGroupId': 'zzzz'},
]
props['SecurityGroupEgress'] = [
{'IpProtocol': 'tcp',
'FromPort': '22',
'ToPort': '22',
'CidrIp': '0.0.0.0/0'},
{'SourceSecurityGroupName': 'xxxx'},
]
after = rsrc_defn.ResourceDefinition(sg.name, sg.type(), props)
# create missing rules
self.m_csgr.side_effect = [
{
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': 443,
'ethertype': 'IPv4',
'port_range_max': 443,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'bbbb'}
}, {
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': 'zzzz',
'remote_ip_prefix': None,
'port_range_min': None,
'ethertype': 'IPv4',
'port_range_max': None,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'dddd'}
}, {
'security_group_rule': {
'direction': 'egress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': 22,
'ethertype': 'IPv4',
'port_range_max': 22,
'protocol': 'tcp',
'security_group_id': 'aaaa',
'id': 'eeee'
}
}
]
scheduler.TaskRunner(sg.update, after)()
self.assertEqual((sg.UPDATE, sg.COMPLETE), sg.state)
self.m_dsgr.assert_has_calls(
[mock.call('aaaa-1'), mock.call('aaaa-2'), mock.call('eeee'),
mock.call('dddd'), mock.call('bbbb')],
any_order=True)
self.m_ssg.assert_called_once_with('aaaa')
def test_security_group_neutron_update_with_empty_rules(self):
# create script
self.stubout_neutron_create_security_group()
# update script
# delete old not needed rules
self.stubout_neutron_get_security_group()
stack = self.create_stack(self.test_template_neutron)
sg = stack['the_sg']
self.assertResourceState(sg, 'aaaa')
# make updated template
props = copy.deepcopy(sg.properties.data)
del props['SecurityGroupEgress']
after = rsrc_defn.ResourceDefinition(sg.name, sg.type(), props)
scheduler.TaskRunner(sg.update, after)()
self.assertEqual((sg.UPDATE, sg.COMPLETE), sg.state)
self.m_ssg.assert_called_once_with('aaaa')
self.m_dsgr.assert_has_calls(
[mock.call('aaaa-1'), mock.call('aaaa-2'), mock.call('eeee'),
mock.call('ffff')],
any_order=True)
| 38.465487
| 79
| 0.473151
|
f359aeb80388c40a4268afd8e21d138dedc28c84
| 2,817
|
py
|
Python
|
altimu10v5/lis3mdl.py
|
SvetoslavKuzmanov/altimu10v5
|
87c10bd9918360632d5ea1a356e5cd3fe06eb33f
|
[
"MIT"
] | 4
|
2018-03-27T05:15:02.000Z
|
2022-03-18T13:17:37.000Z
|
Diddyborg_python/lis3mdl.py
|
EEA-sensors/elec-e8740-project-code-template
|
ef7020a9ed4f2ee25756dd907a16d602bffeb6fe
|
[
"Apache-2.0"
] | 1
|
2018-06-03T23:35:18.000Z
|
2018-06-03T23:35:18.000Z
|
Diddyborg_python/lis3mdl.py
|
EEA-sensors/elec-e8740-project-code-template
|
ef7020a9ed4f2ee25756dd907a16d602bffeb6fe
|
[
"Apache-2.0"
] | 6
|
2018-07-31T09:52:10.000Z
|
2021-08-30T17:54:27.000Z
|
# -*- coding: utf-8 -*-
"""Python library module for LIS3MDL magnetometer.
This module for the Raspberry Pi computer helps interface the LIS3MDL
magnetometer.The library makes it easy to read the raw magnetometer
through I²C interface.
The datasheet for the LSM6DS33 is available at
[https://www.pololu.com/file/download/LIS3MDL.pdf?file_id=0J1089]
"""
from i2c import I2C
from constants import *
class LIS3MDL(I2C):
""" Set up and access LIS3MDL magnetometer.
"""
# Output registers used by the magnetometer
magnetometer_registers = [
LIS3MDL_OUT_X_L, # low byte of X value
LIS3MDL_OUT_X_H, # high byte of X value
LIS3MDL_OUT_Y_L, # low byte of Y value
LIS3MDL_OUT_Y_H, # high byte of Y value
LIS3MDL_OUT_Z_L, # low byte of Z value
LIS3MDL_OUT_Z_H, # high byte of Z value
]
def __init__(self, bus_id=1):
""" Set up I2C connection and initialize some flags and values.
"""
super(LIS3MDL, self).__init__(bus_id)
self.is_magnetometer_enabled = False
def __del__(self):
""" Clean up. """
try:
# Power down magnetometer
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x03)
super(LIS3MDL, self).__del__()
except:
pass
def enable(self):
""" Enable and set up the the magnetometer and determine
whether to auto increment registers during I2C read operations.
"""
# Disable magnetometer and temperature sensor first
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG1, 0x00)
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x03)
# Enable device in continuous conversion mode
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x00)
# Initial value for CTRL_REG1
ctrl_reg1 = 0x00
# Ultra-high-performance mode for X and Y
# Output data rate 10Hz
# binary value -> 01110000b, hex value -> 0x70
ctrl_reg1 += 0x70
# +/- 4 gauss full scale
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG2, 0x00)
# Ultra-high-performance mode for Z
# binary value -> 00001100b, hex value -> 0x0c
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG4, 0x0c)
self.is_magnetometer_enabled = True
# Write calculated value to the CTRL_REG1 register
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG1, ctrl_reg1)
def get_magnetometer_raw(self):
""" Return 3D vector of raw magnetometer data.
"""
# Check if magnetometer has been enabled
if not self.is_magnetometer_enabled:
raise(Exception('Magnetometer is not enabled'))
return self.read_3d_sensor(LIS3MDL_ADDR, self.magnetometer_registers)
| 32.755814
| 77
| 0.665247
|
a3c0d2b152e221651ed9886183d0d558f38ba8b8
| 7,065
|
py
|
Python
|
src/twisted/names/secondary.py
|
clokep/twisted
|
79a26b0aa4b1b81b46cc64d203644b35e455e46b
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/names/secondary.py
|
clokep/twisted
|
79a26b0aa4b1b81b46cc64d203644b35e455e46b
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/names/secondary.py
|
clokep/twisted
|
79a26b0aa4b1b81b46cc64d203644b35e455e46b
|
[
"Unlicense",
"MIT"
] | 1
|
2021-12-13T10:46:13.000Z
|
2021-12-13T10:46:13.000Z
|
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
__all__ = ['SecondaryAuthority', 'SecondaryAuthorityService']
from twisted.internet import task, defer
from twisted.names import dns
from twisted.names import common
from twisted.names import client
from twisted.names import resolve
from twisted.names.authority import FileAuthority
from twisted.python import log, failure
from twisted.python.compat import nativeString
from twisted.application import service
class SecondaryAuthorityService(service.Service):
"""
A service that keeps one or more authorities up to date by doing hourly
zone transfers from a master.
@ivar primary: IP address of the master.
@type primary: L{str}
@ivar domains: An authority for each domain mirrored from the master.
@type domains: L{list} of L{SecondaryAuthority}
"""
calls = None
_port = 53
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@type primary: L{str}
@param domains: A sequence of domain names for which to perform
zone transfers.
@type domains: L{list} of L{bytes}
"""
self.primary = nativeString(primary)
self.domains = [SecondaryAuthority(primary, d) for d in domains]
@classmethod
def fromServerAddressAndDomains(cls, serverAddress, domains):
"""
Construct a new L{SecondaryAuthorityService} from a tuple giving a
server address and a C{str} giving the name of a domain for which this
is an authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{bytes} giving the domain to transfer.
@return: A new instance of L{SecondaryAuthorityService}.
"""
primary, port = serverAddress
service = cls(primary, [])
service._port = port
service.domains = [
SecondaryAuthority.fromServerAddressAndDomain(serverAddress, d)
for d in domains]
return service
def getAuthority(self):
"""
Get a resolver for the transferred domains.
@rtype: L{ResolverChain}
"""
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
class SecondaryAuthority(FileAuthority):
"""
An Authority that keeps itself updated by performing zone transfers.
@ivar primary: The IP address of the server from which zone transfers will
be attempted.
@type primary: C{str}
@ivar _port: The port number of the server from which zone transfers will
be attempted.
@type: C{int}
@ivar domain: The domain for which this is the secondary authority.
@type: C{bytes}
@ivar _reactor: The reactor to use to perform the zone transfers, or
L{None} to use the global reactor.
"""
transferring = False
soa = records = None
_port = 53
_reactor = None
def __init__(self, primaryIP, domain):
"""
@param domain: The domain for which this will be the secondary
authority.
@type domain: L{bytes} or L{str}
"""
# Yep. Skip over FileAuthority.__init__. This is a hack until we have
# a good composition-based API for the complicated DNS record lookup
# logic we want to share.
common.ResolverBase.__init__(self)
self.primary = nativeString(primaryIP)
self.domain = dns.domainString(domain)
@classmethod
def fromServerAddressAndDomain(cls, serverAddress, domain):
"""
Construct a new L{SecondaryAuthority} from a tuple giving a server
address and a C{bytes} giving the name of a domain for which this is an
authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{bytes} giving the domain to transfer.
@type domain: L{bytes}
@return: A new instance of L{SecondaryAuthority}.
"""
primary, port = serverAddress
secondary = cls(primary, domain)
secondary._port = port
return secondary
def transfer(self):
"""
Attempt a zone transfer.
@returns: A L{Deferred} that fires with L{None} when attempted zone
transfer has completed.
"""
# FIXME: This logic doesn't avoid duplicate transfers
# https://twistedmatrix.com/trac/ticket/9754
if self.transferring: # <-- never true
return
self.transfering = True # <-- speling
reactor = self._reactor
if reactor is None:
from twisted.internet import reactor
resolver = client.Resolver(
servers=[(self.primary, self._port)], reactor=reactor)
return resolver.lookupZone(self.domain
).addCallback(self._cbZone
).addErrback(self._ebZone
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
# No transfer has occurred yet. Fail non-authoritatively so that
# the caller can try elsewhere.
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority._lookup(self, name, cls, type, timeout)
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (rec.name.name.lower(), rec.payload)
else:
r.setdefault(rec.name.name.lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg("Updating %s from %s failed during zone transfer" % (self.domain, self.primary))
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg("Transferring %s from %s failed after zone transfer" % (self.domain, self.primary))
log.err(failure)
| 31.968326
| 99
| 0.639207
|
ab5fe50b9feb29d0941d996ad1d085d60838e920
| 626
|
py
|
Python
|
send.py
|
apallath/ClipCross
|
f174d103c581541cb35f7296ff362d944020d72b
|
[
"MIT"
] | null | null | null |
send.py
|
apallath/ClipCross
|
f174d103c581541cb35f7296ff362d944020d72b
|
[
"MIT"
] | null | null | null |
send.py
|
apallath/ClipCross
|
f174d103c581541cb35f7296ff362d944020d72b
|
[
"MIT"
] | null | null | null |
from socket import *
from win32clipboard import *
import sys
OpenClipboard()
try:
text=GetClipboardData(CF_TEXT)
except:
print "Error in getting clipboard data!"
sys.exit()
finally:
CloseClipboard()
print "ClipCross Alpha"
host = raw_input("Enter IP address of machine you want to connect to: ")
port = 6000
server_address = (host,port)
try:
sock = socket()
sock.connect((host,port))
sock.sendto(text, server_address)
s= sock.recv(1024)
print "Message from server: %s" %(s)
sock.close()
except:
print "Could not connect to network"
sys.exit()
| 20.193548
| 73
| 0.653355
|
304e47e0fefda26f06277b9b74915872f0c3bc7a
| 873
|
py
|
Python
|
script.module.urlresolver/lib/urlresolver/plugins/bitvid.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
script.module.urlresolver/lib/urlresolver/plugins/bitvid.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.urlresolver/lib/urlresolver/plugins/bitvid.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 3
|
2019-09-30T19:52:05.000Z
|
2020-04-12T21:20:56.000Z
|
"""
urlresolver XBMC Addon
Copyright (C) 2017 Anis3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __generic_resolver__ import GenericResolver
class BitvidResolver(GenericResolver):
name = "bitvid.sx"
domains = ["bitvid.sx"]
pattern = '(?://|\.)(bitvid\.sx)/(?:embed[/-])?([0-9A-Za-z]+)'
| 37.956522
| 68
| 0.752577
|
6e1d6c30716011c9c6a3ea61cf6be53ee1ee7bc1
| 10,916
|
py
|
Python
|
pydic/tests/test_services.py
|
felixcarmona/pydic
|
e0aa2ec7ca34d8ce1578770bff7e64a2704a9229
|
[
"MIT"
] | null | null | null |
pydic/tests/test_services.py
|
felixcarmona/pydic
|
e0aa2ec7ca34d8ce1578770bff7e64a2704a9229
|
[
"MIT"
] | null | null | null |
pydic/tests/test_services.py
|
felixcarmona/pydic
|
e0aa2ec7ca34d8ce1578770bff7e64a2704a9229
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from pydic import Services, ServicesException, Parameters
class SimpleService:
def __init__(self):
pass
def say(self):
return 'hello'
class SimpleServiceWithConstructorArguments:
def __init__(self, name, surname):
self._name = name
self._surname = surname
def say(self):
return 'hello %s %s' % (self._name, self._surname)
class SimpleServiceWithCallsWithArguments:
def __init__(self):
self._name = None
self._surname = None
def set_name(self, name):
self._name = name
def set_surname(self, surname):
self._surname = surname
def say(self):
return 'hello %s %s' % (self._name, self._surname)
class SimpleServiceWithCallWithArguments:
def __init__(self):
self._name = None
self._surname = None
def set_name_surname(self, name, surname):
self._name = name
self._surname = surname
def say(self):
return 'hello %s %s' % (self._name, self._surname)
class SimpleServiceWithCallsWithoutArguments:
def __init__(self):
self._name = None
self._surname = None
def set_name(self):
self._name = 'Felix'
def set_surname(self):
self._surname = 'Carmona'
def say(self):
return 'hello %s %s' % (self._name, self._surname)
class CarService:
def __init__(self, driver):
self._driver = driver
def drive(self):
return '%s is driving' % self._driver.get_name()
class DriverService:
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
class ServicesTestCase(TestCase):
def test_get_most_simple_service(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello', service.say())
same_service = services.get('simple')
self.assertTrue(service is same_service)
def test_fail_when_tries_to_get_an_unknown_service(self):
services = Services({})
self.assertRaises(ServicesException, services.get, 'unknown_service')
def test_service_definition_referencing_other_service_definition(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
},
'alias_of_service': '@simple'
}
services = Services(definitions)
service = services.get('alias_of_service')
self.assertEqual('hello', service.say())
same_service = services.get('simple')
self.assertTrue(service is same_service)
def test_escape_service(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': {
'name': '@@foo',
'surname': 'abc'
}
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello @foo abc', service.say())
def test_escape_parameter(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': {
'name': "\{\{ foo \}\} {{ surname }}",
'surname': 'abc'
}
}
}
parameters = {
'surname': 'Carmona'
}
services = Services(definitions, Parameters(parameters))
service = services.get('simple')
self.assertEqual('hello {{ foo }} Carmona abc', service.say())
def test_service_definition_with_parameter_argument(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': {
'name': '{{ my_user_name }}',
'surname': '{{ my_user_surname }}xxx'
}
}
}
parameters = Parameters({'my_user_name': 'Felix', 'my_user_surname': 'Carmona'})
services = Services(definitions, parameters)
service = services.get('simple')
self.assertEqual('hello Felix Carmonaxxx', service.say())
def test_fail_when_tries_to_get_a_malformed_definition(self):
definitions = {
'simple': {
'xxx': 'aaa'
}
}
services = Services(definitions)
self.assertRaises(ServicesException, services.get, 'simple')
def test_service_with_constructor_arguments_as_dict(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': {
'name': 'Felix',
'surname': 'Carmona'
}
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello Felix Carmona', service.say())
def test_service_with_constructor_arguments_as_list(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': ['Felix', 'Carmona']
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello Felix Carmona', service.say())
def test_fail_when_definition_arguments_are_not_dict_or_tuple_or_list(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithConstructorArguments',
'arguments': 'Felix'
}
}
services = Services(definitions)
self.assertRaises(ServicesException, services.get, 'simple')
def test_service_with_calls_with_arguments_as_list(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithCallsWithArguments',
'calls': [
['set_name', ['Felix']],
['set_surname', ['Carmona']]
]
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello Felix Carmona', service.say())
def test_service_with_calls_with_arguments_as_dict(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithCallWithArguments',
'calls': [
['set_name_surname', {'surname': 'Carmona', 'name': 'Felix'}]
]
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello Felix Carmona', service.say())
def test_service_with_calls_without_arguments(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithCallsWithoutArguments',
'calls': [
'set_name',
'set_surname'
]
}
}
services = Services(definitions)
service = services.get('simple')
self.assertEqual('hello Felix Carmona', service.say())
def test_service_with_sub_dependency(self):
definitions = {
'car': {
'class': 'pydic.tests.test_services.CarService',
'arguments': ['@driver']
},
'driver': {
'class': 'pydic.tests.test_services.DriverService',
'arguments': ['{{ driver_name }}']
}
}
parameters = Parameters({'driver_name': 'Felix'})
services = Services(definitions, parameters)
service = services.get('car')
self.assertEqual('Felix is driving', service.drive())
def test_fail_when_call_function_arguments_are_malformed(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithCallWithArguments',
'calls': [
['set_name_surname', 1]
]
}
}
services = Services(definitions)
self.assertRaises(ServicesException, services.get, 'simple')
def test_fail_when_call_function_not_exists_in_service(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleServiceWithCallsWithoutArguments',
'calls': [
'set_namex'
]
}
}
services = Services(definitions)
self.assertRaises(ServicesException, services.get, 'simple')
def test_set_service(self):
simple = SimpleService()
services = Services()
services.set('simple', simple)
same_simple = services.get('simple')
self.assertEqual('hello', same_simple.say())
def test_has_service(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
}
}
services = Services(definitions)
self.assertTrue(services.has('simple'))
self.assertFalse(services.has('foo_service'))
def test_remove_service(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
}
}
services = Services(definitions)
services.get('simple')
services.remove('simple')
self.assertFalse(services.has('simple'))
def test_add_services(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
}
}
services = Services(definitions)
services.add({'new_service_one': SimpleService(), 'new_service_two': SimpleService()})
self.assertTrue(services.has('simple'))
self.assertTrue(services.has('new_service_one'))
self.assertTrue(services.has('new_service_two'))
def test_get_keys_services(self):
definitions = {
'simple': {
'class': 'pydic.tests.test_services.SimpleService'
},
'other': {
'class': 'pydic.tests.test_services.SimpleService'
}
}
services = Services(definitions)
services.add({'new_service_one': SimpleService(), 'new_service_two': SimpleService()})
expected = ['other', 'simple', 'new_service_one', 'new_service_two']
actual = services.keys()
self.assertEqual(set(expected), set(actual))
| 32.295858
| 94
| 0.566599
|
a5729ab658a92ee337cc5926ededd37ccbead72e
| 34,369
|
py
|
Python
|
dask/bag/tests/test_bag.py
|
epervago/dask
|
958732ce6c51ef6af39db4727d948bfa66a0a8d6
|
[
"BSD-3-Clause"
] | null | null | null |
dask/bag/tests/test_bag.py
|
epervago/dask
|
958732ce6c51ef6af39db4727d948bfa66a0a8d6
|
[
"BSD-3-Clause"
] | null | null | null |
dask/bag/tests/test_bag.py
|
epervago/dask
|
958732ce6c51ef6af39db4727d948bfa66a0a8d6
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import, division, print_function
import pytest
import math
import os
import sys
from collections import Iterator
from distutils.version import LooseVersion
import partd
from toolz import merge, join, filter, identity, valmap, groupby, pluck
import dask
import dask.bag as db
from dask.bag.core import (Bag, lazify, lazify_task, map, collect,
reduceby, reify, partition, inline_singleton_lists,
optimize, from_delayed)
from dask.async import get_sync
from dask.compatibility import BZ2File, GzipFile, PY2
from dask.utils import filetexts, tmpfile, tmpdir, open
from dask.utils_test import inc, add
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert sorted(b._keys()) == sorted(dsk.keys())
def test_map():
c = b.map(inc)
expected = merge(dsk, dict(((c.name, i), (reify, (map, inc, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
assert c.name == b.map(inc).name
def test_map_function_with_multiple_arguments():
b = db.from_sequence([(1, 10), (2, 20), (3, 30)], npartitions=3)
assert list(b.map(lambda x, y: x + y).compute(get=dask.get)) == [11, 22, 33]
assert list(b.map(list).compute()) == [[1, 10], [2, 20], [3, 30]]
class A(object):
def __init__(self, a, b, c):
pass
class B(object):
def __init__(self, a):
pass
def test_map_with_constructors():
assert db.from_sequence([[1, 2, 3]]).map(A).compute()
assert db.from_sequence([1, 2, 3]).map(B).compute()
assert db.from_sequence([[1, 2, 3]]).map(B).compute()
failed = False
try:
db.from_sequence([[1,]]).map(A).compute()
except TypeError:
failed = True
assert failed
def test_map_with_builtins():
b = db.from_sequence(range(3))
assert ' '.join(b.map(str)) == '0 1 2'
assert b.map(str).map(tuple).compute() == [('0',), ('1',), ('2',)]
assert b.map(str).map(tuple).map(any).compute() == [True, True, True]
b2 = b.map(lambda n: [(n, n + 1), (2 * (n - 1), -n)])
assert b2.map(dict).compute() == [{0: 1, -2: 0}, {1: 2, 0: -1}, {2: -2}]
assert b.map(lambda n: (n, n + 1)).map(pow).compute() == [0, 1, 8]
assert b.map(bool).compute() == [False, True, True]
assert db.from_sequence([(1, 'real'), ('1', 'real')]).map(hasattr).compute() == \
[True, False]
def test_map_with_kwargs():
b = db.from_sequence(range(100), npartitions=10)
assert b.map(lambda x, factor=0: x * factor,
factor=2).sum().compute() == 9900.0
assert b.map(lambda x, total=0: x / total,
total=b.sum()).sum().compute() == 1.0
assert b.map(lambda x, factor=0, total=0: x * factor / total,
total=b.sum(),
factor=2).sum().compute() == 2.0
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(reify, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
assert c.name == b.filter(iseven).name
def test_remove():
f = lambda x: x % 2 == 0
c = b.remove(f)
assert list(c) == [1, 3] * 3
assert c.name == b.remove(f).name
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
@pytest.mark.parametrize('func', [str, repr])
def test_repr(func):
assert str(b.npartitions) in func(b)
assert b.name[:5] in func(b)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
assert b.pluck([1, 0]).name == b.pluck([1, 0]).name
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
pytest.raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
assert b.pluck(0, None).name == b.pluck(0, None).name
assert b.pluck(0).name != b.pluck(0, None).name
def test_unzip():
b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2))
one, two, three = b.unzip(3)
assert list(one) == list(range(100))
assert list(three) == [i + 2 for i in range(100)]
assert one.name == b.unzip(3)[0].name
assert one.name != two.name
def test_fold():
c = b.fold(add)
assert c.compute() == sum(L)
assert c.key == b.fold(add).key
c2 = b.fold(add, initial=10)
assert c2.key != c.key
assert c2.compute() == sum(L) + 10 * b.npartitions
assert c2.key == b.fold(add, initial=10).key
c = db.from_sequence(range(5), npartitions=3)
def binop(acc, x):
acc = acc.copy()
acc.add(x)
return acc
d = c.fold(binop, set.union, initial=set())
assert d.compute() == set(c)
assert d.key == c.fold(binop, set.union, initial=set()).key
d = db.from_sequence('hello')
assert set(d.fold(lambda a, b: ''.join([a, b]), initial='').compute()) == set('hello')
e = db.from_sequence([[1], [2], [3]], npartitions=2)
with dask.set_options(get=get_sync):
assert set(e.fold(add, initial=[]).compute()) == set([1, 2, 3])
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
assert b.distinct().name == b.distinct().name
assert 'distinct' in b.distinct().name
assert b.distinct().count().compute() == 5
def test_frequencies():
c = b.frequencies()
assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
c2 = b.frequencies(split_every=2)
assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
assert c.name == b.frequencies().name
assert c.name != c2.name
assert c2.name == b.frequencies(split_every=2).name
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
c = b.topk(4, key=lambda x: -x)
assert list(c) == [0, 0, 0, 1]
c2 = b.topk(4, key=lambda x: -x, split_every=2)
assert list(c2) == [0, 0, 0, 1]
assert c.name != c2.name
assert b.topk(4).name == b.topk(4).name
def test_topk_with_non_callable_key():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]
assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]
assert b.topk(2, key=1).name == b.topk(2, key=1).name
def test_topk_with_multiarg_lambda():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert b.any().compute() is True
assert b.all().compute() is False
assert b.all().key == b.all().key
assert b.all().key != b.any().key
def test_reduction_names():
assert b.sum().name.startswith('sum')
assert b.reduction(sum, sum).name.startswith('sum')
assert any(isinstance(k, str) and k.startswith('max')
for k in b.reduction(sum, max).dask)
assert b.reduction(sum, sum, name='foo').name.startswith('foo')
def test_tree_reductions():
b = db.from_sequence(range(12))
c = b.reduction(sum, sum, split_every=2)
d = b.reduction(sum, sum, split_every=6)
e = b.reduction(sum, sum, split_every=5)
assert c.compute() == d.compute() == e.compute()
assert len(c.dask) > len(d.dask)
c = b.sum(split_every=2)
d = b.sum(split_every=5)
assert c.compute() == d.compute()
assert len(c.dask) > len(d.dask)
assert c.key != d.key
assert c.key == b.sum(split_every=2).key
assert c.key != b.sum().key
def test_mean():
assert b.mean().compute(get=dask.get) == 2.0
assert float(b.mean()) == 2.0
def test_non_splittable_reductions():
np = pytest.importorskip('numpy')
data = list(range(100))
c = db.from_sequence(data, npartitions=10)
assert c.mean().compute() == np.mean(data)
assert c.std().compute(get=dask.get) == np.std(data)
def test_std():
assert b.std().compute(get=dask.get) == math.sqrt(2.0)
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert b.var().compute(get=dask.get) == 2.0
assert float(b.var()) == 2.0
def test_join():
c = b.join([1, 2, 3], on_self=isodd, on_other=iseven)
assert list(c) == list(join(iseven, [1, 2, 3], isodd, list(b)))
assert (list(b.join([1, 2, 3], isodd)) ==
list(join(isodd, [1, 2, 3], isodd, list(b))))
assert c.name == b.join([1, 2, 3], on_self=isodd, on_other=iseven).name
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
assert c.name == b.foldby(iseven, add, 0, add, 0).name
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
assert b.map_partitions(len).name == b.map_partitions(len).name
assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name
def test_map_partitions_with_kwargs():
b = db.from_sequence(range(100), npartitions=10)
assert b.map_partitions(
lambda X, factor=0: [x * factor for x in X],
factor=2).sum().compute() == 9900.0
assert b.map_partitions(
lambda X, total=0: [x / total for x in X],
total=b.sum()).sum().compute() == 1.0
assert b.map_partitions(
lambda X, factor=0, total=0: [x * factor / total for x in X],
total=b.sum(),
factor=2).sum().compute() == 2.0
def test_random_sample_size():
"""
Number of randomly sampled elements are in the expected range.
"""
a = db.from_sequence(range(1000), npartitions=5)
# we expect a size of approx. 100, but leave large margins to avoid
# random failures
assert 10 < len(list(a.random_sample(0.1, 42))) < 300
def test_random_sample_prob_range():
"""
Specifying probabilities outside the range [0, 1] raises ValueError.
"""
a = db.from_sequence(range(50), npartitions=5)
with pytest.raises(ValueError):
a.random_sample(-1)
with pytest.raises(ValueError):
a.random_sample(1.1)
def test_random_sample_repeated_computation():
"""
Repeated computation of a defined random sampling operation
generates identical results.
"""
a = db.from_sequence(range(50), npartitions=5)
b = a.random_sample(0.2)
assert list(b) == list(b) # computation happens here
def test_random_sample_different_definitions():
"""
Repeatedly defining a random sampling operation yields different results
upon computation if no random seed is specified.
"""
a = db.from_sequence(range(50), npartitions=5)
assert list(a.random_sample(0.5)) != list(a.random_sample(0.5))
assert a.random_sample(0.5).name != a.random_sample(0.5).name
def test_random_sample_random_state():
"""
Sampling with fixed random seed generates identical results.
"""
a = db.from_sequence(range(50), npartitions=5)
b = a.random_sample(0.5, 1234)
c = a.random_sample(0.5, 1234)
assert list(b) == list(c)
def test_lazify_task():
task = (sum, (reify, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (reify, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (reify, (map, inc, (reify, (filter, iseven, 'y'))))
b = (reify, (map, inc, (filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (reify, (map, inc, (reify, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (reify, (map, inc, (filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_inline_singleton_lists():
inp = {'b': (list, 'a'),
'c': (f, 'b', 1)}
out = {'c': (f, (list, 'a'), 1)}
assert inline_singleton_lists(inp) == out
out = {'c': (f, 'a', 1)}
assert optimize(inp, ['c']) == out
inp = {'b': (list, 'a'),
'c': (f, 'b', 1),
'd': (f, 'b', 2)}
assert inline_singleton_lists(inp) == inp
inp = {'b': (4, 5)} # doesn't inline constants
assert inline_singleton_lists(inp) == inp
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_take_npartitions():
assert list(b.take(6, npartitions=2)) == [0, 1, 2, 3, 4, 0]
assert b.take(6, npartitions=-1) == (0, 1, 2, 3, 4, 0)
assert b.take(3, npartitions=-1) == (0, 1, 2)
with pytest.raises(ValueError):
b.take(1, npartitions=5)
@pytest.mark.skipif(sys.version_info[:2] == (3,3),
reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_take_npartitions_warn():
with pytest.warns(None):
b.take(100)
with pytest.warns(None):
b.take(7)
with pytest.warns(None):
b.take(7, npartitions=2)
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
def test_from_castra():
pytest.importorskip('castra')
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
blosc = pytest.importorskip('blosc')
if LooseVersion(blosc.__version__) == '1.3.0':
pytest.skip()
df = pd.DataFrame({'x': list(range(100)),
'y': [str(i) for i in range(100)]})
a = dd.from_pandas(df, 10)
with tmpfile('.castra') as fn:
c = a.to_castra(fn)
default = db.from_castra(c)
with_columns = db.from_castra(c, 'x')
with_index = db.from_castra(c, 'x', index=True)
assert (list(default) == [{'x': i, 'y': str(i)}
for i in range(100)] or
list(default) == [(i, str(i)) for i in range(100)])
assert list(with_columns) == list(range(100))
assert list(with_index) == list(zip(range(100), range(100)))
assert default.name != with_columns.name != with_index.name
assert with_index.name == db.from_castra(c, 'x', index=True).name
@pytest.mark.slow
def test_from_url():
a = db.from_url(['http://google.com', 'http://github.com'])
assert a.npartitions == 2
b = db.from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst')
assert b.npartitions == 1
assert b'Dask\n' in b.take(10)
def test_read_text():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert (set(line.strip() for line in db.read_text(fns)) ==
set('ABCD'))
assert (set(line.strip() for line in db.read_text('a*.log')) ==
set('ABCD'))
pytest.raises(ValueError, lambda: db.read_text('non-existent-*-path'))
def test_read_text_large():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(('Hello, world!' + os.linesep).encode() * 100)
b = db.read_text(fn, blocksize=100)
c = db.read_text(fn)
assert len(b.dask) > 5
assert list(map(str, b.str.strip())) == list(map(str, c.str.strip()))
d = db.read_text([fn], blocksize=100)
assert list(b) == list(d)
def test_read_text_encoding():
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write((u'你好!' + os.linesep).encode('gb18030') * 100)
b = db.read_text(fn, blocksize=100, encoding='gb18030')
c = db.read_text(fn, encoding='gb18030')
assert len(b.dask) > 5
assert (list(b.str.strip().map(lambda x: x.encode('utf-8'))) ==
list(c.str.strip().map(lambda x: x.encode('utf-8'))))
d = db.read_text([fn], blocksize=100, encoding='gb18030')
assert list(b) == list(d)
def test_read_text_large_gzip():
with tmpfile('gz') as fn:
f = GzipFile(fn, 'wb')
f.write(b'Hello, world!\n' * 100)
f.close()
with pytest.raises(ValueError):
db.read_text(fn, blocksize=50, linedelimiter='\n')
c = db.read_text(fn)
assert c.npartitions == 1
@pytest.mark.slow
def test_from_s3():
# note we don't test connection modes with aws_access_key and
# aws_secret_key because these are not on travis-ci
pytest.importorskip('s3fs')
five_tips = (u'total_bill,tip,sex,smoker,day,time,size\n',
u'16.99,1.01,Female,No,Sun,Dinner,2\n',
u'10.34,1.66,Male,No,Sun,Dinner,3\n',
u'21.01,3.5,Male,No,Sun,Dinner,3\n',
u'23.68,3.31,Male,No,Sun,Dinner,2\n')
# test compressed data
e = db.read_text('s3://tip-data/t*.gz', storage_options=dict(anon=True))
assert e.take(5) == five_tips
# test all keys in bucket
c = db.read_text('s3://tip-data/*', storage_options=dict(anon=True))
assert c.npartitions == 4
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
assert z.name != b2.name
assert z.name == x.product(y).name
def test_partition_collect():
with partd.Pickle() as p:
partition(identity, range(6), 3, p)
assert set(p.get(0)) == set([0, 3])
assert set(p.get(1)) == set([1, 4])
assert set(p.get(2)) == set([2, 5])
assert sorted(collect(identity, 0, p, '')) == [(0, [0]), (3, [3])]
def test_groupby():
c = b.groupby(identity)
result = dict(c)
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert c.npartitions == b.npartitions
assert c.name == b.groupby(identity).name
assert c.name != b.groupby(lambda x: x + 1).name
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
result2 = dict(result)
assert result2 == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
assert c.name == db.concat([a, b]).name
assert b.concat().name != a.concat().name
assert b.concat().name == b.concat().name
b = db.from_sequence([1, 2, 3]).map(lambda x: x * [1, 2, 3])
assert list(b.concat()) == [1, 2, 3] * sum([1, 2, 3])
def test_concat_after_map():
a = db.from_sequence([1, 2])
b = db.from_sequence([4, 5])
result = db.concat([a.map(inc), b])
assert list(result) == [2, 3, 4, 5]
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
pytest.importorskip('dask.dataframe')
pd = pytest.importorskip('pandas')
b = db.from_sequence([(1, 2), (10, 20), (100, 200)], npartitions=2)
df = b.to_dataframe()
assert list(df.columns) == list(pd.DataFrame(list(b)).columns)
df = b.to_dataframe(columns=['a', 'b'])
assert df.npartitions == b.npartitions
assert list(df.columns) == ['a', 'b']
assert df.a.compute().values.tolist() == list(b.pluck(0))
assert df.b.compute().values.tolist() == list(b.pluck(1))
b = db.from_sequence([{'a': 1, 'b': 2},
{'a': 10, 'b': 20},
{'a': 100, 'b': 200}], npartitions=2)
df2 = b.to_dataframe()
assert (df2.compute().values == df.compute().values).all()
assert df2._name == b.to_dataframe()._name
assert df2._name != df._name
meta = pd.DataFrame({'a': [1], 'b': [2]}).iloc[0:0]
df3 = b.to_dataframe(columns=meta)
assert df2._name == df3._name
assert (df3.compute().values == df2.compute().values).all()
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=2)
df4 = b.to_dataframe()
assert len(df4.columns) == 1
assert list(df4.compute()) == list(pd.DataFrame(list(b)))
ext_open = [('gz', GzipFile), ('', open)]
if not PY2:
ext_open.append(('bz2', BZ2File))
@pytest.mark.parametrize('ext,myopen', ext_open)
def test_to_textfiles(ext, myopen):
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), compute=False)
dask.compute(*c, get=dask.get)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
def test_to_textfiles_name_function_preserves_order():
seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
b = db.from_sequence(seq, npartitions=16)
with tmpdir() as dn:
b.to_textfiles(dn)
out = db.read_text(os.path.join(dn, "*"), encoding='ascii').map(str).map(str.strip).compute()
assert seq == out
@pytest.mark.skipif(sys.version_info[:2] == (3,3), reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_to_textfiles_name_function_warn():
seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
a = db.from_sequence(seq, npartitions=16)
with tmpdir() as dn:
with pytest.warns(None):
a.to_textfiles(dn, name_function=str)
def test_to_textfiles_encoding():
b = db.from_sequence([u'汽车', u'苹果', u'天气'], npartitions=2)
for ext, myopen in [('gz', GzipFile), ('bz2', BZ2File), ('', open)]:
if ext == 'bz2' and PY2:
continue
with tmpdir() as dir:
c = b.to_textfiles(os.path.join(dir, '*.' + ext), encoding='gb18030', compute=False)
dask.compute(*c)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'rb')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode('gb18030')
assert u'天气' in text
f.close()
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b])
assert os.path.exists(a)
assert os.path.exists(b)
with tmpdir() as dirname:
B.to_textfiles(dirname)
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
pytest.raises(ValueError, lambda: B.to_textfiles(5))
def test_to_textfiles_endlines():
b = db.from_sequence(['a', 'b', 'c'], npartitions=1)
with tmpfile() as fn:
b.to_textfiles([fn])
with open(fn, 'r') as f:
result = f.readlines()
assert result == ['a\n', 'b\n', 'c']
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
pytest.raises(AttributeError, lambda: b.str.sfohsofhf)
assert b.str.match('*Smith').name == b.str.match('*Smith').name
assert b.str.match('*Smith').name != b.str.match('*John').name
def test_string_namespace_with_unicode():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
def test_str_empty_split():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.split()) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
def test_map_with_iterator_function():
b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)
def f(L):
for x in L:
yield x + 1
c = b.map(f)
assert list(c) == [[2, 3, 4], [5, 6, 7]]
def test_ensure_compute_output_is_concrete():
b = db.from_sequence([1, 2, 3])
result = b.map(lambda x: x + 1).compute()
assert not isinstance(result, Iterator)
class BagOfDicts(db.Bag):
def get(self, key, default=None):
return self.map(lambda d: d.get(key, default))
def set(self, key, value):
def setter(d):
d[key] = value
return d
return self.map(setter)
def test_bag_class_extend():
dictbag = BagOfDicts(*db.from_sequence([{'a': {'b': 'c'}}])._args)
assert dictbag.get('a').get('b').compute()[0] == 'c'
assert dictbag.get('a').set('d', 'EXTENSIBILITY!!!').compute()[0] == \
{'b': 'c', 'd': 'EXTENSIBILITY!!!'}
assert isinstance(dictbag.get('a').get('b'), BagOfDicts)
def test_gh715():
bin_data = u'\u20ac'.encode('utf-8')
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(bin_data)
a = db.read_text(fn)
assert a.compute()[0] == bin_data.decode('utf-8')
def test_bag_compute_forward_kwargs():
x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1)
x.compute(bogus_keyword=10)
def test_to_delayed():
from dask.delayed import Delayed
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
a, b, c = b.map(inc).to_delayed()
assert all(isinstance(x, Delayed) for x in [a, b, c])
assert b.compute() == [4, 5]
b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)
t = b.sum().to_delayed()
assert isinstance(t, Delayed)
assert t.compute() == 21
def test_from_delayed():
from dask.delayed import delayed
a, b, c = delayed([1, 2, 3]), delayed([4, 5, 6]), delayed([7, 8, 9])
bb = from_delayed([a, b, c])
assert bb.name == from_delayed([a, b, c]).name
assert isinstance(bb, Bag)
assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
asum_value = delayed(lambda X: sum(X))(a)
asum_item = db.Item.from_delayed(asum_value)
assert asum_value.compute() == asum_item.compute() == 6
def test_range():
for npartitions in [1, 7, 10, 28]:
b = db.range(100, npartitions=npartitions)
assert len(b.dask) == npartitions
assert b.npartitions == npartitions
assert list(b) == list(range(100))
@pytest.mark.parametrize("npartitions", [1, 7, 10, 28])
def test_zip(npartitions, hi=1000):
evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions)
odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions)
pairs = db.zip(evens, odds)
assert pairs.npartitions == npartitions
assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2)))
def test_repartition():
for x, y in [(10, 5), (7, 3), (5, 1), (5, 4)]:
b = db.from_sequence(range(20), npartitions=x)
c = b.repartition(y)
assert b.npartitions == x
assert c.npartitions == y
assert list(b) == c.compute(get=dask.get)
try:
b.repartition(100)
except NotImplementedError as e:
assert '100' in str(e)
@pytest.mark.skipif('not db.core._implement_accumulate')
def test_accumulate():
parts = [[1, 2, 3], [4, 5], [], [6, 7]]
dsk = dict((('test', i), p) for (i, p) in enumerate(parts))
b = db.Bag(dsk, 'test', len(parts))
r = b.accumulate(add)
assert r.name == b.accumulate(add).name
assert r.name != b.accumulate(add, -1).name
assert r.compute() == [1, 3, 6, 10, 15, 21, 28]
assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27]
assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29]
b = db.from_sequence([1, 2, 3], npartitions=1)
assert b.accumulate(add).compute() == [1, 3, 6]
def test_groupby_tasks():
b = db.from_sequence(range(160), npartitions=4)
out = b.groupby(lambda x: x % 10, max_branch=4, method='tasks')
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
b = db.from_sequence(range(1000), npartitions=100)
out = b.groupby(lambda x: x % 123, method='tasks')
assert len(out.dask) < 100**2
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
b = db.from_sequence(range(10000), npartitions=345)
out = b.groupby(lambda x: x % 2834, max_branch=24, method='tasks')
partitions = dask.get(out.dask, out._keys())
for a in partitions:
for b in partitions:
if a is not b:
assert not set(pluck(0, a)) & set(pluck(0, b))
def test_groupby_tasks_names():
b = db.from_sequence(range(160), npartitions=4)
func = lambda x: x % 10
func2 = lambda x: x % 20
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) ==
set(b.groupby(func, max_branch=4, method='tasks').dask))
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) !=
set(b.groupby(func, max_branch=2, method='tasks').dask))
assert (set(b.groupby(func, max_branch=4, method='tasks').dask) !=
set(b.groupby(func2, max_branch=4, method='tasks').dask))
@pytest.mark.parametrize('size,npartitions,groups', [(1000, 20, 100),
(12345, 234, 1042)])
def test_groupby_tasks_2(size, npartitions, groups):
func = lambda x: x % groups
b = db.range(size, npartitions=npartitions).groupby(func, method='tasks')
result = b.compute(get=dask.get)
assert dict(result) == groupby(func, range(size))
def test_groupby_tasks_3():
func = lambda x: x % 10
b = db.range(20, npartitions=5).groupby(func, method='tasks', max_branch=2)
result = b.compute(get=dask.get)
assert dict(result) == groupby(func, range(20))
# assert b.npartitions == 5
def test_to_textfiles_empty_partitions():
with tmpdir() as d:
b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str)
b.to_textfiles(os.path.join(d, '*.txt'))
assert len(os.listdir(d)) == 5
def test_reduction_empty():
b = db.from_sequence(range(10), npartitions=100)
assert b.filter(lambda x: x % 2 == 0).max().compute(get=dask.get) == 8
assert b.filter(lambda x: x % 2 == 0).min().compute(get=dask.get) == 0
class StrictReal(int):
def __eq__(self, other):
assert isinstance(other, StrictReal)
return self.real == other.real
def __ne__(self, other):
assert isinstance(other, StrictReal)
return self.real != other.real
def test_reduction_with_non_comparable_objects():
b = db.from_sequence([StrictReal(x) for x in range(10)], partition_size=2)
assert b.fold(max, max).compute(get=dask.get) == StrictReal(9)
def test_reduction_with_sparse_matrices():
sp = pytest.importorskip('scipy.sparse')
b = db.from_sequence([sp.csr_matrix([0]) for x in range(4)], partition_size=2)
def sp_reduce(a, b):
return sp.vstack([a, b])
assert b.fold(sp_reduce, sp_reduce).compute(get=dask.get).shape == (4, 1)
def test_empty():
list(db.from_sequence([])) == []
def test_bag_picklable():
from pickle import loads, dumps
b = db.from_sequence(range(100))
b2 = loads(dumps(b))
assert b.compute() == b2.compute()
s = b.sum()
s2 = loads(dumps(s))
assert s.compute() == s2.compute()
def test_msgpack_unicode():
b = db.from_sequence([{"a": 1}]).groupby("a")
result = b.compute(get=dask.async.get_sync)
assert dict(result) == {1: [{'a': 1}]}
def test_bag_with_single_callable():
f = lambda: None
b = db.from_sequence([f])
assert list(b.compute(get=dask.get)) == [f]
def test_optimize_fuse_keys():
x = db.range(10, npartitions=2)
y = x.map(inc)
z = y.map(inc)
dsk = z._optimize(z.dask, z._keys())
assert not set(y.dask) & set(dsk)
dsk = z._optimize(z.dask, z._keys(), fuse_keys=y._keys())
assert all(k in dsk for k in y._keys())
def test_reductions_are_lazy():
current = [None]
def part():
for i in range(10):
current[0] = i
yield i
def func(part):
assert current[0] == 0
return sum(part)
b = Bag({('foo', 0): part()}, 'foo', 1)
res = b.reduction(func, sum)
assert res.compute(get=dask.get) == sum(range(10))
def test_repeated_groupby():
b = db.range(10, npartitions=4)
c = b.groupby(lambda x: x % 3)
assert valmap(len, dict(c)) == valmap(len, dict(c))
| 30.990983
| 105
| 0.577468
|
543664183cf1babb63ffa4eaa831e7193aa73f45
| 2,134
|
py
|
Python
|
phase_cells/nature_method2020/plot_metric_loss.py
|
shenghh2015/segmentation_models
|
473c528c724f62ff38ac127747dd8babb7de6b85
|
[
"MIT"
] | null | null | null |
phase_cells/nature_method2020/plot_metric_loss.py
|
shenghh2015/segmentation_models
|
473c528c724f62ff38ac127747dd8babb7de6b85
|
[
"MIT"
] | null | null | null |
phase_cells/nature_method2020/plot_metric_loss.py
|
shenghh2015/segmentation_models
|
473c528c724f62ff38ac127747dd8babb7de6b85
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import sys
def plot_separate(file_name, loss_list, title_list):
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
rows, cols, size = 1,1,5
font_size = 30; label_size = 25; line_width = 2.5
fig = Figure(tight_layout=True,figsize=(8, 6)); ax = fig.subplots(rows,cols)
ax.plot(loss_list[0],linewidth = line_width);ax.plot(loss_list[1], linewidth=line_width)
ax.set_ylabel(title_list[0], fontsize = font_size);ax.set_xlabel('Epochs',fontsize = font_size);ax.legend(['train','valid'],fontsize = font_size)
ax.tick_params(axis = 'x', labelsize = label_size); ax.tick_params(axis = 'y', labelsize = label_size);
ax.set_xlim([0,len(loss_list[0])])
canvas = FigureCanvasAgg(fig); canvas.print_figure(file_name, dpi=100)
model_root_dir = '/data/models/report_results/'
model_names = os.listdir(model_root_dir)
# nb_epochs = 100
nb_epochs = 80
#model_name = 'single-net-Unet-bone-efficientnetb2-pre-True-epoch-200-batch-3-lr-0.0005-dim-992-train-1100-rot-0-set-cell_cycle_1984-ext-True-loss-focal+dice-up-upsampling-filters-256'
for model_name in model_names:
train_dir = os.path.join(model_root_dir, model_name, 'train_dir')
if os.path.exists(train_dir):
train_loss = 4*np.loadtxt(train_dir+'/train_loss.txt')
val_loss = 4*np.loadtxt(train_dir+'/val_loss.txt')
train_dice = np.loadtxt(train_dir+'/train_f1-score.txt')
val_dice = np.loadtxt(train_dir+'/val_f1-score.txt')
if nb_epochs < len(train_loss):
train_loss = train_loss[:nb_epochs]
val_loss = val_loss[:nb_epochs]
train_dice = train_dice[:nb_epochs]
val_dice = val_dice[:nb_epochs]
file_name = train_dir + '/loss_dice_history.png'
loss_list = [train_loss, val_loss]
metric_list = [train_dice, val_dice]
#title_list = ['Focal loss', 'Dice score']
#plot_history(file_name, loss_list, metric_list, title_list)
file_name = train_dir + '/loss_history_{}.png'.format(nb_epochs)
plot_separate(file_name, loss_list, ['Focal loss'])
file_name = train_dir + '/dice_history.png'
plot_separate(file_name, metric_list, ['Dice score'])
| 46.391304
| 184
| 0.752577
|
8dc23c61e2100b217d30f351b85c8c498df2881d
| 1,133
|
py
|
Python
|
miningsimulator/views.py
|
emergent-consensus/simulator
|
7853177f5d6bbfd6f674a96013dad822172f724c
|
[
"MIT"
] | null | null | null |
miningsimulator/views.py
|
emergent-consensus/simulator
|
7853177f5d6bbfd6f674a96013dad822172f724c
|
[
"MIT"
] | null | null | null |
miningsimulator/views.py
|
emergent-consensus/simulator
|
7853177f5d6bbfd6f674a96013dad822172f724c
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, Response, render_template
from flask_bootstrap import Bootstrap
from app import connectionmanager, miningnetwork, socketapi
app = Flask(__name__)
from flask_socketio import SocketIO
import os
import logging
logging.basicConfig()
app = Flask(__name__)
Bootstrap(app)
socketio = SocketIO(app)
sockets = socketapi.SocketAPI(socketio)
def on_block_found(block):
sockets.send_block_found(block)
print block
network = miningnetwork.MiningNetwork(on_block_found, None)
connectionmanager = connectionmanager.ConnectionManager(sockets, network)
@app.route("/")
def main_page():
return network_page()
@app.route("/network")
def network_page():
response = app.make_response(render_template('network.html'))
connectionmanager.get_or_set_cookie(request.cookies, response)
return response
@socketio.on('disconnect', namespace='/mining')
def disconnect():
connectionmanager.disconnect(request.sid)
@socketio.on('identify', namespace='/mining')
def identify(data):
connectionmanager.add_user(data["userid"], request.sid)
if __name__ == "__main__":
socketio.run(app)
| 25.75
| 73
| 0.774051
|
2b0d27b80bfd2d2892bf8b806f3196b800701250
| 2,730
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_pragma793.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma793.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma793.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=18
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Z.on(input_qubit[3])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=15
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=16
c.append(cirq.H.on(input_qubit[1])) # number=17
c.append(cirq.X.on(input_qubit[1])) # number=11
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.rx(2.808583832309275).on(input_qubit[2])) # number=7
c.append(cirq.Y.on(input_qubit[3])) # number=13
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[3])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma793.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 32.891566
| 92
| 0.646154
|
d8ebae69baa1859200dda65dcc8fa725fb3159d9
| 6,596
|
py
|
Python
|
Tools/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
|
VincentWei/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | 6
|
2017-05-31T01:46:45.000Z
|
2018-06-12T10:53:30.000Z
|
Tools/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
|
FMSoftCN/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | null | null | null |
Tools/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
|
FMSoftCN/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | 2
|
2017-07-17T06:02:42.000Z
|
2018-09-19T10:08:38.000Z
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebKit's Python module for interacting with patches."""
import logging
import re
_log = logging.getLogger("webkitpy.common.checkout.diff_parser")
_regexp_compile_cache = {}
def match(pattern, string):
"""Matches the string with the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = re.compile(pattern)
return _regexp_compile_cache[pattern].match(string)
def git_diff_to_svn_diff(line):
"""Converts a git formatted diff line to a svn formatted line.
Args:
line: A string representing a line of the diff.
"""
conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"),
("^new file.*", lambda matched: "\n"),
("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"),
("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"),
("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n"))
for pattern, conversion in conversion_patterns:
matched = match(pattern, line)
if matched:
return conversion(matched)
return line
def get_diff_converter(first_diff_line):
"""Gets a converter function of diff lines.
Args:
first_diff_line: The first filename line of a diff file.
If this line is git formatted, we'll return a
converter from git to SVN.
"""
if match(r"^diff --git \w/", first_diff_line):
return git_diff_to_svn_diff
return lambda input: input
_INITIAL_STATE = 1
_DECLARED_FILE_PATH = 2
_PROCESSING_CHUNK = 3
class DiffFile:
"""Contains the information for one file in a patch.
The field "lines" is a list which contains tuples in this format:
(deleted_line_number, new_line_number, line_string)
If deleted_line_number is zero, it means this line is newly added.
If new_line_number is zero, it means this line is deleted.
"""
def __init__(self, filename):
self.filename = filename
self.lines = []
def add_new_line(self, line_number, line):
self.lines.append((0, line_number, line))
def add_deleted_line(self, line_number, line):
self.lines.append((line_number, 0, line))
def add_unchanged_line(self, deleted_line_number, new_line_number, line):
self.lines.append((deleted_line_number, new_line_number, line))
class DiffParser:
"""A parser for a patch file.
The field "files" is a dict whose key is the filename and value is
a DiffFile object.
"""
def __init__(self, diff_input):
"""Parses a diff.
Args:
diff_input: An iterable object.
"""
state = _INITIAL_STATE
self.files = {}
current_file = None
old_diff_line = None
new_diff_line = None
for line in diff_input:
line = line.rstrip("\n")
if state == _INITIAL_STATE:
transform_line = get_diff_converter(line)
line = transform_line(line)
file_declaration = match(r"^Index: (?P<FilePath>.+)", line)
if file_declaration:
filename = file_declaration.group('FilePath')
current_file = DiffFile(filename)
self.files[filename] = current_file
state = _DECLARED_FILE_PATH
continue
lines_changed = match(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@", line)
if lines_changed:
if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK:
_log.error('Unexpected line change without file path '
'declaration: %r' % line)
old_diff_line = int(lines_changed.group('OldStartLine'))
new_diff_line = int(lines_changed.group('NewStartLine'))
state = _PROCESSING_CHUNK
continue
if state == _PROCESSING_CHUNK:
if line.startswith('+'):
current_file.add_new_line(new_diff_line, line[1:])
new_diff_line += 1
elif line.startswith('-'):
current_file.add_deleted_line(old_diff_line, line[1:])
old_diff_line += 1
elif line.startswith(' '):
current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:])
old_diff_line += 1
new_diff_line += 1
elif line == '\\ No newline at end of file':
# Nothing to do. We may still have some added lines.
pass
else:
_log.error('Unexpected diff format when parsing a '
'chunk: %r' % line)
| 39.73494
| 163
| 0.619012
|
9ca078f5096248492c19ba88c5d59b442777ee1e
| 3,391
|
py
|
Python
|
pdf_data.py
|
yk-st/foreign_tax
|
157a844ce348fb8d43a9f1d1ee6280fc5eca6d78
|
[
"MIT"
] | 1
|
2021-11-19T01:54:05.000Z
|
2021-11-19T01:54:05.000Z
|
pdf_data.py
|
yk-st/foreign_tax
|
157a844ce348fb8d43a9f1d1ee6280fc5eca6d78
|
[
"MIT"
] | null | null | null |
pdf_data.py
|
yk-st/foreign_tax
|
157a844ce348fb8d43a9f1d1ee6280fc5eca6d78
|
[
"MIT"
] | null | null | null |
from re import split
from pdfminer.high_level import extract_text
import re
import os
from decimal import Decimal
DIR = "/Users/saitouyuuki/Desktop/src/2021-exempt/"
exempt_d={}
exempts_l=[]
# Ticker Symb macher
ticker_r = re.compile(r"[0-9]{3}-[a-zA-Z]*")
# % macher for 「配当金等金額」「外国源泉徴収額」
delimiter_r = re.compile(r"%")
# date macher for 「配当金等金額(円)」「外国源泉徴収額(円)」
delimiter_r2 = re.compile(r"[0-9]{4}/[0-9]{2}/[0-9]{2}")
for filename in os.listdir(DIR):
if filename.endswith(".pdf"):
dividend_b=False
dividend_Yen_b=False
gT_b=False
counter=0
#テキストの抽出
text = extract_text(os.path.join(DIR, filename))
lines=text.split('\n')
#print(text)
for line in lines:
# ticker
if re.match(ticker_r,line) != None:
print("-----------------------")
exempt_d['ティッカー']=re.match(ticker_r,line).group()
counter=0
dividend_b=True
dividend_Yen_b=False
gT_b=False
print(re.match(ticker_r,line).group())
# when gt shows up its time to get dividend and foriegn tax etc
if re.match(delimiter_r,line) != None and dividend_b:
#print(re.match(delimiter_r,line).group())
counter=0
if dividend_b:
counter = counter + 1
if line == 'gT' and dividend_b:
gT_b = True
counter = 0
if not gT_b:
if dividend_b and counter == 11 :
print(line)
exempt_d['配当金等金額']=line
if dividend_b and counter == 13 :
print(line)
exempt_d['外国源泉徴収額']=line
if gT_b:
if dividend_b and counter == 4 :
print(line)
exempt_d['配当金等金額']=line
if dividend_b and counter == 6 :
print(line)
exempt_d['外国源泉徴収額']=line
# if date format shows up
if re.match(delimiter_r2,line) != None and dividend_b:
#print(re.match(delimiter_r2,line).group())
counter=0
# 配当金等金額YENモードON
dividend_Yen_b=True
dividend_b=False
if dividend_Yen_b:
counter = counter + 1
if dividend_Yen_b and counter == 7 :
print(line)
exempt_d['配当金等金額YEN']=line.replace(',','')
if dividend_Yen_b and counter == 9 :
print(line)
exempt_d['外国源泉徴収額YEN']=line.replace(',','')
#結果の保存
exempts_l.append(exempt_d)
exempt_d={}
print(exempts_l)
# 合計値の確認
配当金等金額_sum=Decimal(0.0)
外国源泉徴収額_sum=Decimal(0.0)
配当金等金額_yen_sum=Decimal(0.0)
外国源泉徴収額_yen_sum=Decimal(0.0)
for data in exempts_l:
配当金等金額_sum=配当金等金額_sum + Decimal(data['配当金等金額'])
外国源泉徴収額_sum=外国源泉徴収額_sum + Decimal(data['外国源泉徴収額'])
配当金等金額_yen_sum=配当金等金額_yen_sum + Decimal(data['配当金等金額YEN'])
外国源泉徴収額_yen_sum=外国源泉徴収額_yen_sum + Decimal(data['外国源泉徴収額YEN'])
print("---------確定申告に記載する値-------------")
print("配当金等金額->" + str(配当金等金額_sum) + "$")
print("外国源泉徴収額->" + str(外国源泉徴収額_sum) + "$")
print("配当金等金額YEN->" + str(配当金等金額_yen_sum) + "円")
print("外国源泉徴収額YEN->" + str(外国源泉徴収額_yen_sum) + "円")
| 34.252525
| 75
| 0.535535
|
6a76df0da261671fca4b666b750cb092e5eb8167
| 597
|
py
|
Python
|
apis/migrations/0002_api_user.py
|
ale180192/openapi-viewer-back
|
22dedf5f21438e8f7ff89d5e17ff2ff711bdd167
|
[
"Apache-2.0"
] | 2
|
2020-03-02T04:16:47.000Z
|
2020-05-20T06:21:32.000Z
|
apis/migrations/0002_api_user.py
|
ale180192/openapi-viewer-back
|
22dedf5f21438e8f7ff89d5e17ff2ff711bdd167
|
[
"Apache-2.0"
] | 6
|
2020-06-06T01:28:39.000Z
|
2022-02-10T12:06:42.000Z
|
apis/migrations/0002_api_user.py
|
ale180192/openapi-viewer-back
|
22dedf5f21438e8f7ff89d5e17ff2ff711bdd167
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2 on 2020-02-26 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('apis', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='api',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='apis', to=settings.AUTH_USER_MODEL),
),
]
| 24.875
| 131
| 0.664992
|
97e90ead0be16ebd7c187144dd1507536727c22e
| 5,057
|
py
|
Python
|
huey/tests/test_signals.py
|
vb8448/huey
|
345857dd01293af70a3a72dcb8052e34b38574b1
|
[
"MIT"
] | 1
|
2021-01-28T15:26:45.000Z
|
2021-01-28T15:26:45.000Z
|
huey/tests/test_signals.py
|
vb8448/huey
|
345857dd01293af70a3a72dcb8052e34b38574b1
|
[
"MIT"
] | 1
|
2021-01-31T08:01:24.000Z
|
2021-01-31T08:01:24.000Z
|
huey/tests/test_signals.py
|
vb8448/huey
|
345857dd01293af70a3a72dcb8052e34b38574b1
|
[
"MIT"
] | 1
|
2021-01-31T07:59:29.000Z
|
2021-01-31T07:59:29.000Z
|
from huey.signals import *
from huey.tests.base import BaseTestCase
class TestSignals(BaseTestCase):
def setUp(self):
super(TestSignals, self).setUp()
self._state = []
@self.huey.signal()
def signal_handle(signal, task, *args):
self._state.append((signal, task, args))
def assertSignals(self, expected):
self.assertEqual([s[0] for s in self._state], expected)
self._state = []
def test_signals_simple(self):
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(3)
self.assertSignals([])
self.assertEqual(self.execute_next(), 4)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
r = task_a.schedule((2,), delay=60)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_SCHEDULED])
r = task_a(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
def test_signal_complete_result_ready(self):
@self.huey.task()
def task_a(n):
return n + 1
results = []
@self.huey.signal(SIGNAL_COMPLETE)
def on_complete(sig, task, *_):
results.append(self.huey.result(task.id))
r = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(results, [3])
def test_signals_on_retry(self):
@self.huey.task(retries=1)
def task_a(n):
return n + 1
r = task_a(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
@self.huey.task(retries=1, retry_delay=60)
def task_b(n):
return n + 1
r = task_b(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING,
SIGNAL_SCHEDULED])
def test_signals_revoked(self):
@self.huey.task()
def task_a(n):
return n + 1
task_a.revoke(revoke_once=True)
r = task_a(2)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_REVOKED])
r = task_a(3)
self.assertEqual(self.execute_next(), 4)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
def test_signals_locked(self):
@self.huey.task()
@self.huey.lock_task('lock-a')
def task_a(n):
return n + 1
r = task_a(1)
self.assertSignals([])
self.assertEqual(self.execute_next(), 2)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
with self.huey.lock_task('lock-a'):
r = task_a(2)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_LOCKED])
def test_specific_handler(self):
extra_state = []
@self.huey.signal(SIGNAL_EXECUTING)
def extra_handler(signal, task):
extra_state.append(task.args[0])
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(3)
self.assertEqual(extra_state, [])
self.assertEqual(self.execute_next(), 4)
self.assertEqual(extra_state, [3])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
r2 = task_a(1)
self.assertEqual(self.execute_next(), 2)
self.assertEqual(extra_state, [3, 1])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
self.huey.disconnect_signal(extra_handler, SIGNAL_EXECUTING)
r3 = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(extra_state, [3, 1])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
def test_multi_handlers(self):
state1 = []
state2 = []
@self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
def handler1(signal, task):
state1.append(signal)
@self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
def handler2(signal, task):
state2.append(signal)
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(1)
self.assertEqual(self.execute_next(), 2)
self.assertEqual(state1, ['executing', 'complete'])
self.assertEqual(state2, ['executing', 'complete'])
self.huey.disconnect_signal(handler1, SIGNAL_COMPLETE)
self.huey.disconnect_signal(handler2)
r2 = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(state1, ['executing', 'complete', 'executing'])
self.assertEqual(state2, ['executing', 'complete'])
| 31.02454
| 77
| 0.606684
|
a7bec55db505161faafdf71c929ae51d5a8fa418
| 14,253
|
py
|
Python
|
fragmenstein/monster/_utility.py
|
matteoferla/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 41
|
2020-04-09T14:11:39.000Z
|
2022-03-15T15:44:14.000Z
|
fragmenstein/monster/_utility.py
|
LaYeqa/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 13
|
2020-12-02T13:13:59.000Z
|
2022-01-14T11:29:46.000Z
|
fragmenstein/monster/_utility.py
|
LaYeqa/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 6
|
2020-09-07T10:47:51.000Z
|
2021-09-23T14:22:39.000Z
|
########################################################################################################################
__doc__ = \
"""
These are extras for the Monster step
"""
########################################################################################################################
from typing import List, Optional, Tuple, Dict
from warnings import warn
from rdkit import Chem
from rdkit.Chem import AllChem, rdFMCS, Draw
import json
try:
from IPython.display import SVG, display
except ImportError:
warn('No Jupyter notebook installed. `.draw_nicely` will not work.')
SVG = lambda *args, **kwargs: print('Install IPython...')
display = lambda *args, **kwargs: print('Install IPython...')
from ._communal import _MonsterCommunal
from .positional_mapping import GPM
########################################################################################################################
class _MonsterUtil(_MonsterCommunal, GPM):
@classmethod
def get_combined_rmsd(cls, followup_moved: Chem.Mol, followup_placed: Optional[Chem.Mol] = None,
hits: Optional[List[Chem.Mol]] = None) -> float:
"""
Depracated.
The inbuilt RMSD calculations in RDKit align the two molecules, this does not align them.
This deals with the case of multiple hits.
For euclidean distance the square root of the sum of the differences in each coordinates is taken.
For a regular RMSD the still-squared distance is averaged before taking the root.
Here the average is done across all the atom pairs between each hit and the followup.
Therefore, atoms in followup that derive in the blended molecule by multiple atom are scored multiple times.
As a classmethod ``followup_placed`` and ``hits`` must be provided. But as an instance method they don't.
:param followup_moved: followup compound moved by Igor or similar
:param followup_placed: followup compound as placed by Monster
:param hits: list of hits.
:return: combined RMSD
"""
# class or instance?
if followup_placed is None: # instance
assert hasattr(cls, '__class__'), 'if called as a classmethod the list of hits need to be provided.'
followup_placed = cls.positioned_mol
if hits is None: # instance
assert hasattr(cls, '__class__'), 'if called as a classmethod the list of hits need to be provided.'
hits = cls.hits
for i in range(followup_placed.GetNumAtoms()):
assert followup_placed.GetAtomWithIdx(i).GetSymbol() == followup_moved.GetAtomWithIdx(
i).GetSymbol(), 'The atoms order is changed.'
if followup_moved.GetNumAtoms() > followup_placed.GetNumAtoms():
warn(
f'Followup moved {followup_moved.GetNumAtoms()} has more atoms that followup placed {followup_placed.GetNumAtoms()}. Assuming these are hydrogens.')
# calculate
tatoms = 0
d = 0
for hit in hits:
mapping = list(cls.get_positional_mapping(followup_placed, hit).items())
tatoms += len(mapping)
if len(mapping) == 0:
continue
d += cls._get_square_deviation(followup_moved, hit, mapping)
return d / tatoms ** 0.5
@classmethod
def get_pair_rmsd(cls, molA, molB, mapping: List[Tuple[int, int]]) -> float:
return (cls._get_square_deviation(molA, molB, mapping) / len(mapping)) ** 0.5
def _get_square_deviation(self, molA, molB, mapping):
confA = molA.GetConformer()
confB = molB.GetConformer()
return sum([(confA.GetAtomPosition(a).x - confB.GetAtomPosition(b).x) ** 2 +
(confA.GetAtomPosition(a).y - confB.GetAtomPosition(b).y) ** 2 +
(confA.GetAtomPosition(a).z - confB.GetAtomPosition(b).z) ** 2 for a, b in mapping])
@property
def num_common(self) -> int:
template = self._get_last_template()
mcs = rdFMCS.FindMCS([template, self.initial_mol],
atomCompare=rdFMCS.AtomCompare.CompareElements,
bondCompare=rdFMCS.BondCompare.CompareOrder)
return Chem.MolFromSmarts(mcs.smartsString).GetNumAtoms()
def _get_last_template(self):
if 'chimera' in self.modifications:
template = self.modifications['chimera']
elif 'scaffold' in self.modifications:
template = self.modifications['scaffold']
else:
raise KeyError('There is no chimeric or reg scaffold/template to compare to.')
@property
def percent_common(self) -> int:
return round(self.num_common / self.initial_mol.GetNumAtoms() * 100)
def stdev_from_mol(self, mol: Chem.Mol = None):
"""
these values are stored from Monster for scaffold, chimera and positioned_mol
:param mol: Chem.Mol
:return: stdev list for each atom
"""
if mol is None:
mol = self.positioned_mol
return [atom.GetDoubleProp('_Stdev') if atom.HasProp('_Stdev') else 0 for atom in mol.GetAtoms()]
def max_from_mol(self, mol: Chem.Mol = None):
if mol is None:
mol = self.positioned_mol
return [atom.GetDoubleProp('_Max') if atom.HasProp('_Max') else 0 for atom in mol.GetAtoms()]
def origin_from_mol(self, mol: Chem.Mol = None):
"""
these values are stored from Monster for scaffold, chimera and positioned_mol
:param mol: Chem.Mol
:return: stdev list for each atom
"""
if mol is None:
mol = self.positioned_mol
if mol.HasProp('_Origins'):
return json.loads(mol.GetProp('_Origins'))
origin = []
for atom in mol.GetAtoms():
if atom.HasProp('_Origin'):
x = atom.GetProp('_Origin')
if x == 'none':
origin.append([])
else:
origin.append(json.loads(x))
else:
origin.append([])
return origin
def guess_origins(self, mol: Chem.Mol = None, hits: Optional[List[Chem.Mol]] = None):
"""
Given a positioned mol guess its origins...
:param mol:
:return:
"""
if hits is None:
hits = self.hits
mappings = []
for h, hit in enumerate(hits):
hname = hit.GetProp('_Name')
for hi, mi in self.get_positional_mapping(hit, mol).items():
atom = mol.GetAtomWithIdx(mi)
if atom.HasProp('_Novel') and atom.GetBoolProp('_Novel') == True:
continue # flagged to avoid.
elif atom.HasProp('_Origin') and atom.GetProp('_Origin') != 'none':
origin = json.loads(atom.GetProp('_Origin'))
else:
origin = []
origin.append(f'{hname}.{hi}')
atom.SetProp('_Origin', json.dumps(origin))
# class attribute for next method
_i = 0
def save_temp(self, mol):
"""
This is a silly debug-by-print debug method. drop it in where you want to spy on stuff.
"""
Chem.MolToMolFile(mol, f'debug_temp{self.i}.mol', kekulize=False)
self._i += 1
def save_commonality(self, filename: Optional[str] = None):
"""
Saves an SVG of the followup fragmenstein monster with the common atoms with the chimeric scaffold highlighted.
:param filename: optinal filename to save it as. Otherwise returns a Draw.MolDraw2DSVG object.
:return:
"""
template = self._get_last_template()
mcs = rdFMCS.FindMCS([template, self.positioned_mol],
atomCompare=rdFMCS.AtomCompare.CompareElements,
bondCompare=rdFMCS.BondCompare.CompareOrder,
ringMatchesRingOnly=True)
common = Chem.MolFromSmarts(mcs.smartsString)
match = self.positioned_mol.GetSubstructMatch(common)
d = self.draw_nicely(self.positioned_mol, show=False, highlightAtoms=match)
if filename is None:
return d
else:
assert '.svg' in filename, 'Can only save SVGs.'
with open(filename, 'w') as w:
w.write(d.GetDrawingText())
def make_pse(self, filename='test.pse', extra_mols: Optional[Chem.Mol] = None):
"""
This is specifically for debugging the full fragment merging mode.
For general use. Please use the Victor method ``make_pse``.
:param filename:
:return:
"""
assert '.pse' in filename, 'Must be a pymol pse extension!'
import pymol2
with pymol2.PyMOL() as pymol:
tints = iter(
['wheat', 'palegreen', 'lightblue', 'paleyellow', 'lightpink', 'palecyan', 'lightorange', 'bluewhite'])
# pymol.cmd.bg_color('white')
for h, hit in enumerate(self.hits):
pymol.cmd.read_molstr(Chem.MolToMolBlock(hit, kekulize=False), f'hit{h}')
pymol.cmd.color(next(tints), f'hit{h} and name C*')
if 'scaffold' in self.modifications:
pymol.cmd.read_molstr(Chem.MolToMolBlock(self.modifications['scaffold'], kekulize=False), f'scaffold')
pymol.cmd.color('tv_blue', f'scaffold and name C*')
if 'chimera' in self.modifications:
pymol.cmd.read_molstr(Chem.MolToMolBlock(self.modifications['chimera'], kekulize=False), f'chimera')
pymol.cmd.color('cyan', f'chimera and name C*')
if self.positioned_mol:
pymol.cmd.read_molstr(Chem.MolToMolBlock(self.positioned_mol, kekulize=False), f'followup')
pymol.cmd.color('tv_green', f'followup and name C*')
if self.mol_options:
for i, mol in enumerate(self.mol_options):
pymol.cmd.read_molstr(Chem.MolToMolBlock(mol, kekulize=False), f'opt{i}')
pymol.cmd.color('grey50', f'opt{i} and name C*')
pymol.cmd.hide('sticks')
pymol.cmd.hide('cartoon') # there should not be....
pymol.cmd.show('lines', 'not polymer')
if 'chimera' in self.modifications:
pymol.cmd.show('sticks', 'chimera')
if self.positioned_mol:
pymol.cmd.show('sticks', 'followup')
if extra_mols:
for mol in extra_mols:
name = mol.GetProp('_Name')
pymol.cmd.read_molstr(Chem.MolToMolBlock(mol, kekulize=False), name)
pymol.cmd.color('magenta', f'{name} and name C*')
pymol.cmd.save(filename)
def draw_nicely(self, mol, show=True, **kwargs) -> Draw.MolDraw2DSVG:
"""
Draw with atom indices for Jupyter notebooks.
:param mol:
:param kwargs: Key value pairs get fed into ``PrepareAndDrawMolecule``.
:return:
"""
if mol.HasProp('_Name'):
print(mol.GetProp('_Name'))
d = Draw.MolDraw2DSVG(400, 400)
d.drawOptions().addAtomIndices = True
d.drawOptions().addStereoAnnotation = True
d.drawOptions().prepareMolsBeforeDrawing = False
d.drawOptions().dummiesAreAttachments = True
x = Chem.Mol(mol)
AllChem.Compute2DCoords(x)
Chem.SanitizeMol(x, catchErrors=True)
try:
# x = Chem.MolFromSmiles(Chem.MolToSmiles(x, kekuleSmiles=False), sanitize=False)
Draw.PrepareAndDrawMolecule(d, x, **kwargs)
d.FinishDrawing()
if show:
display(SVG(d.GetDrawingText()))
return d
except Exception as err:
warn(f'*{err.__class__.__name__}* : {err}')
display(x)
def mmff_minimise(self, mol: Optional[Chem.Mol] = None) -> None:
"""
Minimises a mol, or self.positioned_mol if not provided, with MMFF constrained to 2 Å.
Gets called by Victor if the flag .monster_mmff_minimisation is true during PDB template construction.
:param mol: opt. mol. modified in place.
:return: None
"""
if mol is None and self.positioned_mol is None:
raise ValueError('No valid molecule')
elif mol is None:
mol = self.positioned_mol
else:
pass # mol is fine
# protect
for atom in mol.GetAtomsMatchingQuery(Chem.rdqueries.AtomNumEqualsQueryAtom(0)):
atom.SetBoolProp('_IsDummy', True)
atom.SetAtomicNum(16)
#
mol.UpdatePropertyCache()
# Chem.GetSymmSSSR(mol)
# Chem.MolToMolFile(mol, 'test.mol')
Chem.SanitizeMol(mol)
#
p = AllChem.MMFFGetMoleculeProperties(mol, 'MMFF94')
if p is None:
self.journal.error(f'MMFF cannot work on a molecule that has errors!')
return None
ff = AllChem.MMFFGetMoleculeForceField(mol, p)
# restrain
for atom in mol.GetAtomsMatchingQuery(Chem.rdqueries.HasPropQueryAtom('_Novel', negate=True)):
i = atom.GetIdx()
ff.MMFFAddPositionConstraint(i, 2, 10)
for atom in mol.GetAtomsMatchingQuery(Chem.rdqueries.HasPropQueryAtom('_IsDummy')):
i = atom.GetIdx()
ff.MMFFAddPositionConstraint(i, 0.1, 10)
try:
m = ff.Minimize()
if m == -1:
self.journal.error('MMFF Minisation could not be started')
elif m == 0:
self.journal.info('MMFF Minisation was successful')
elif m == 1:
self.journal.info('MMFF Minisation was run, but the minimisation was not unsuccessful')
else:
self.journal.critical("Iä! Iä! Cthulhu fhtagn! Ph'nglui mglw'nafh Cthulhu R'lyeh wgah'nagl fhtagn")
except RuntimeError as error:
self.journal.error(f'MMFF minimisation failed {error.__class__.__name__}: {error}')
# deprotect
for atom in mol.GetAtomsMatchingQuery(Chem.rdqueries.HasPropQueryAtom('_IsDummy')):
atom.SetAtomicNum(0)
| 43.587156
| 164
| 0.585631
|
e2395b86b152ddefb08f63b1dacb5f7b0f14b502
| 7,338
|
py
|
Python
|
ecal/fit_peak_pos.py
|
rhambach/TEMareels
|
92a907f483baeb919dd485895c56454f0b552c76
|
[
"MIT"
] | null | null | null |
ecal/fit_peak_pos.py
|
rhambach/TEMareels
|
92a907f483baeb919dd485895c56454f0b552c76
|
[
"MIT"
] | null | null | null |
ecal/fit_peak_pos.py
|
rhambach/TEMareels
|
92a907f483baeb919dd485895c56454f0b552c76
|
[
"MIT"
] | 1
|
2019-03-20T21:05:24.000Z
|
2019-03-20T21:05:24.000Z
|
"""
fitting of peak positions in shifted EELS spectra for
energy-calibrations
IMPLEMENTATION:
- gauss fit for ZLP (highest peak in spectrum)
- correlation with plasmon spectrum for second highest peak
(The position corresponds to the center of the reference spectrum.)
TODO:
- make implementation more general: just fit left and right peak
using either a reference spectrum or a model function
Copyright (c) 2013, rhambach.
This file is part of the TEMareels package and released
under the MIT-Licence. See LICENCE file for details.
"""
import numpy as np
import matplotlib.pylab as plt
import scipy.signal as sig;
import scipy.optimize as opt;
from TEMareels.tools.models import gauss;
from TEMareels.tools.msa import MSA;
import TEMareels.tools.tifffile as tiff;
def fit_zlp(spectra, medfilt_radius=5, verbosity=0, border=10, ampl_cut=0.5, sort=False):
"""
fitting gauss to highest peak in spectrum
RETURNS
(Nspectra,3)-array with fitting parameters (center, height, width)
"""
if verbosity>2: print "-- fitting zero-loss peak ---------------------------";
Nspectra, Npx = spectra.shape;
x = np.arange(Npx);
peaks = np.zeros((Nspectra,3));
for s in xrange(Nspectra):
line = sig.medfilt(spectra[s],medfilt_radius);
imax = np.argmax(line); # initial guess for ZLP
peaks[s], pconv = \
opt.curve_fit(gauss,x,line,p0=(imax, np.sum(line[imax-5:imax+5]), 10));
if verbosity>2:
print "#%03d: "%s, "pos max: ", imax, ", fit params: ", peaks[s]
# remove outliers
# - peak height < 50% of max. amplitude in all spectra
peaks = np.asarray(peaks);
height = peaks[:,1];
peaks[ height < ampl_cut*np.nanmax(height) ] = np.nan;
# - peak pos close to the border (10px)
pos = peaks[:,0];
peaks[ (pos<border) | (Npx - pos<border) ] = np.nan;
# return sorted arrays
if sort:
i=np.argsort(peaks[:,0])[::-1];
return peaks[i], spectra[i];
else:
return peaks, spectra;
def fit_plasmon(spectra, ref, xmin=None, xmax=None, medfilt_radius=5, border=10, ampl_cut=0.5, verbosity=0):
"""
fitting reference peak by finding the best correlation with
the original spectrum within a restricted range [xmin, xmax]
NOTE: A gauss fit to the plasmon peak is rather poor due to
its assymetry. We need a precision of about 1px.
RETURNS:
(Nspectra,2)-array containing the position of the best overlap
with respect to the center of the reference spectum and
the maximal intensity in the spectrum
"""
if verbosity>2: print "-- fitting plasmon peak ------------------------------";
Nspectra, Npx = spectra.shape;
if xmin is None: xmin = np.zeros(Nspectra);
else: xmin = np.asarray(xmin,dtype=int);
if xmax is None: xmax = np.ones(Nspectra)*Npx;
else: xmax = np.asarray(xmax,dtype=int);
peaks = [[]]*Nspectra;
for s in xrange(Nspectra):
# skip lines, where no ZLP was found (nan is -2147483648 after conversion to int)
if xmin[s]<0 or xmax[s]<0:
peaks[s] = [np.nan, np.nan];
continue;
line = sig.medfilt(spectra[s],medfilt_radius);
x = np.arange(xmin[s],xmax[s],dtype=int);
line = line[x]; # region of interesst
conv = sig.convolve(line,ref[::-1],'same');
peaks[s] = [x[np.argmax(conv)], line.max() ];
## Alternatively: try to fit an (assymetric) model function
#try:
# peaks[s], pconv = \
# opt.curve_fit(gauss,x,line,p0=(x[imax], line[imax], 50.));
#except: # Catch any fitting errors
# peaks[s], cov = [np.nan,]*3, None
#plt.plot(x,line); plt.plot(x,gauss(x,*peaks[s]));
#plt.show();
if verbosity>2:
#print s, peaks[s]
print "#%03d: pos max: %5s, "%(s,peaks[0]), "fit params: ", peaks[s]
# remove outliers
# - peak height < 50% of max. amplitude in all spectra
peaks = np.asarray(peaks);
height = peaks[:,1];
peaks[ height < ampl_cut*np.nanmax(height) ] = np.nan;
# - peak pos close to the border (10px)
pos = peaks[:,0];
peaks[ (pos<border) | (Npx - pos<border) ] = np.nan;
return peaks;
def plot_peaks(spectra, ref, zl, pl, filename=''):
plt.figure();
plt.title("Debug: Peak fitting for '%s'" % filename);
plt.xlabel("y-position [px]");
plt.ylabel("Intensity");
Nspectra, Npx = spectra.shape;
for s in xrange(Nspectra):
scale = 1./spectra.max();
offset= -s*0.1;
# plot data
plt.plot(spectra[s]*scale + offset,'k',linewidth=2);
# plot first peak
p,A,w = zl[s];
x = np.arange(-2*w, 2*w) + p;
plt.plot(x,gauss(x,*zl[s])*scale + offset,'r');
# plot second peak
if ref is not None:
p,A = pl[s];
x = np.arange(len(ref)) - len(ref)/2 + p;
plt.plot(x,ref/ref.max()*A*scale + offset,'g');
#plt.xlim(xmin=0,xmax=Npx);
def get_peak_pos(filename, refname=None, medfilt_radius=5, sort=False, border=10, ampl_cut=0.5, verbosity=1):
"""
calculate the position-dependent energy dispersion from
the distance between two peaks (ZLP and plasmon reference)
filename ... file containing the spectrum image (Nspectra, Npx)
refname ... (opt) filename of reference spectrum for second peak
medfilt_radius... (opt) median filter radius for smoothing of spectra
sort ... (opt) if True, sort spectra according to ZLP position
border ... (opt) skip peaks which are too close to the border (in pixel)
ampl_cut ... (opt) skip peaks with amplitude smaller than ampl_cut*maximum
verbosity... (opt) 0 (silent), 1 (minimal), 2 (plot), 3 (debug)
RETURNS
x(N), zl(N) or
x(N), zl(N), pl(N) which are one-dimensional arrays of length N
containing the x-value of the spectrum, the zero-loss and
plasmon-peak position.
(N=Nspectra)
"""
# 1. read EELS spectra of series
if verbosity>0: print "Loading spectra from file '%s'"%filename;
IN = tiff.imread(filename); # Ny, Ns+1
data = IN[:,:-1];
x = IN[:,-1]; # last line in image corresponds
# to energie values
# 2. fit ZLP to spectra
zl,spectra = fit_zlp(data, border=border, medfilt_radius=medfilt_radius,
ampl_cut=ampl_cut, verbosity=verbosity, sort=sort);
if refname is None:
if verbosity>2: plot_peaks(spectra, None, zl, None, filename=filename);
return x,zl;
# 3. fit second peak from correlation with reference spectrum
spectra_noZLP=spectra.copy();
for s in range(len(spectra)): # for each spectrum, we remove the ZLP
x0,I,fwhm = zl[s]; # parameters from ZLP
xmin,xmax = max(0,x0-5*fwhm), min(len(spectra[s]),x0+5*fwhm);
spectra_noZLP[s,xmin:xmax]=0;
REF = MSA(refname).get_data();
pl = fit_plasmon(spectra_noZLP, REF, border=border,
ampl_cut=ampl_cut, medfilt_radius=medfilt_radius, verbosity=verbosity);
if verbosity>2: plot_peaks(spectra, REF, zl, pl, filename=filename);
return x,zl,pl
# -- main ----------------------------------------
if __name__ == '__main__':
ref = "../tests/Ereference.msa"; # ref: maximum must be at the center !
dat = "../tests/Eseries1.tif"; # spectra
get_peak_pos(dat,ref, sort=False, border=80, verbosity=3);
plt.show();
| 34.130233
| 109
| 0.626874
|
06c705153f88c7a618a8529cab462287c82efaf7
| 82,027
|
py
|
Python
|
plenum/cli/cli.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/cli/cli.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/cli/cli.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import glob
import shutil
from os.path import basename, dirname
from typing import Iterable
from jsonpickle import json
from ledger.compact_merkle_tree import CompactMerkleTree
from ledger.genesis_txn.genesis_txn_file_util import create_genesis_txn_init_ledger
from ledger.genesis_txn.genesis_txn_initiator_from_file import GenesisTxnInitiatorFromFile
from ledger.ledger import Ledger
from plenum.cli.command import helpCmd, statusNodeCmd, statusClientCmd, \
loadPluginsCmd, clientSendCmd, clientShowCmd, newKeyCmd, \
newWalletCmd, renameWalletCmd, useWalletCmd, saveWalletCmd, \
listWalletCmd, listIdsCmd, useIdCmd, addGenesisTxnCmd, \
createGenesisTxnFileCmd, changePromptCmd, exitCmd, quitCmd, Command
from plenum.cli.command import licenseCmd
from plenum.cli.command import newClientCmd
from plenum.cli.command import newNodeCmd
from plenum.cli.command import statusCmd
from plenum.cli.constants import SIMPLE_CMDS, CLI_CMDS, NODE_OR_CLI, NODE_CMDS, \
PROMPT_ENV_SEPARATOR, WALLET_FILE_EXTENSION, NO_ENV
from plenum.cli.helper import getUtilGrams, getNodeGrams, getClientGrams, \
getAllGrams
from plenum.cli.phrase_word_completer import PhraseWordCompleter
from plenum.client.wallet import Wallet, WalletStorageHelper
from plenum.common.constants import TXN_TYPE, TARGET_NYM, DATA, IDENTIFIER, \
NODE, ALIAS, NODE_IP, NODE_PORT, CLIENT_PORT, CLIENT_IP, VERKEY, BY, \
CLIENT_STACK_SUFFIX
from plenum.common.exceptions import NameAlreadyExists, KeysNotFoundException
from plenum.common.keygen_utils import learnKeysFromOthers, tellKeysToOthers, areKeysSetup
from plenum.common.plugin_helper import loadPlugins
from plenum.common.signer_did import DidSigner
from plenum.common.stack_manager import TxnStackManager
from plenum.common.tools import lazy_field
from plenum.common.transactions import PlenumTransactions
from prompt_toolkit.utils import is_windows, is_conemu_ansi
from storage.kv_in_memory import KeyValueStorageInMemory
from stp_core.crypto.util import cleanSeed, seedFromHex
from stp_core.network.port_dispenser import genHa
from stp_core.types import HA
from plenum.common.config_helper import PNodeConfigHelper
import configparser
import os
from configparser import ConfigParser
from collections import OrderedDict
import time
import ast
from functools import reduce, partial
import sys
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.contrib.regular_languages.compiler import compile
from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.shortcuts import create_prompt_application, \
create_asyncio_eventloop
from prompt_toolkit.layout.lexers import SimpleLexer
from prompt_toolkit.styles import PygmentsStyle
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from pygments.token import Token
from plenum.client.client import Client
from plenum.common.util import getMaxFailures, \
firstValue, randomString, bootstrapClientKeys, \
getFriendlyIdentifier, \
normalizedWalletFileName, getWalletFilePath, \
getLastSavedWalletFileName
from stp_core.common.log import \
getlogger, Logger
from plenum.server.node import Node
from plenum.common.types import NodeDetail
from plenum.server.plugin_loader import PluginLoader
from plenum.server.replica import Replica
from plenum.common.config_util import getConfig
from plenum.__metadata__ import __version__
from plenum.cli.command_history import CliFileHistory
if is_windows():
from prompt_toolkit.terminal.win32_output import Win32Output # noqa
from prompt_toolkit.terminal.conemu_output import ConEmuOutput # noqa
else:
from prompt_toolkit.terminal.vt100_output import Vt100_Output # noqa
class CustomOutput(Vt100_Output):
"""
Subclassing Vt100 just to override the `ask_for_cpr` method which prints
an escape character on the console. Not printing the escape character
"""
def ask_for_cpr(self):
"""
Asks for a cursor position report (CPR).
"""
self.flush()
class Cli:
isElectionStarted = False
primariesSelected = 0
electedPrimaries = set()
name = 'plenum'
properName = 'Plenum'
fullName = 'Plenum protocol'
NodeClass = Node
ClientClass = Client
defaultWalletName = 'Default'
_genesisTransactions = []
# noinspection PyPep8
def __init__(self, looper, basedirpath: str, ledger_base_dir: str, nodeReg=None, cliNodeReg=None,
output=None, debug=False, logFileName=None, config=None,
useNodeReg=False, withNode=True, unique_name=None,
override_tags=None, nodes_chroot: str=None):
self.unique_name = unique_name
self.curClientPort = None
self.basedirpath = os.path.expanduser(basedirpath)
self.ledger_base_dir = os.path.expanduser(ledger_base_dir)
self._config = config or getConfig(self.basedirpath)
Logger().enableCliLogging(self.out,
override_tags=override_tags)
self.looper = looper
self.withNode = withNode
self.__init_registry(useNodeReg, nodeReg, cliNodeReg)
# Used to store created clients
self.clients = {} # clientName -> Client
# To store the created requests
self.requests = {}
# To store the nodes created
self.nodes = {}
self.externalClientKeys = {} # type: Dict[str,str]
self.cliCmds = CLI_CMDS
self.nodeCmds = NODE_CMDS
self.helpablesCommands = self.cliCmds | self.nodeCmds
self.simpleCmds = SIMPLE_CMDS
self.commands = {'list', 'help'} | self.simpleCmds
self.cliActions = {'send', 'show'}
self.commands.update(self.cliCmds)
self.commands.update(self.nodeCmds)
self.node_or_cli = NODE_OR_CLI
self.nodeNames = list(self.nodeReg.keys()) + ["all"]
self.debug = debug
self.plugins = {}
self.pluginPaths = []
self.defaultClient = None
self.activeDID = None
# Wallet and Client are the same from user perspective for now
self._activeClient = None
self._wallets = {} # type: Dict[str, Wallet]
self._activeWallet = None # type: Wallet
self.keyPairs = {}
self.nodes_chroot = nodes_chroot
'''
examples:
status
new node Alpha
new node all
new client Joe
client Joe send <Cmd>
client Joe show 1
'''
self.utilGrams = getUtilGrams()
self.nodeGrams = getNodeGrams()
self.clientGrams = getClientGrams()
self._allGrams = []
self._lexers = {}
self.clientWC = WordCompleter([])
self._completers = {}
self.initializeInputParser()
self.style = PygmentsStyle.from_defaults({
Token.Operator: '#33aa33 bold',
Token.Gray: '#424242',
Token.Number: '#aa3333 bold',
Token.Name: '#ffff00 bold',
Token.Heading: 'bold',
Token.TrailingInput: 'bg:#662222 #ffffff',
Token.BoldGreen: '#33aa33 bold',
Token.BoldOrange: '#ff4f2f bold',
Token.BoldBlue: '#095cab bold'})
self.voidMsg = "<none>"
# Create an asyncio `EventLoop` object. This is a wrapper around the
# asyncio loop that can be passed into prompt_toolkit.
eventloop = create_asyncio_eventloop(looper.loop)
self.pers_hist = CliFileHistory(
command_filter=self.mask_seed, filename='.{}-cli-history'.format(self.name))
# Create interface.
app = create_prompt_application('{}> '.format(self.name),
lexer=self.grammarLexer,
completer=self.grammarCompleter,
style=self.style,
history=self.pers_hist)
self.currPromptText = self.name
if output:
out = output
else:
if is_windows():
if is_conemu_ansi():
out = ConEmuOutput(sys.__stdout__)
else:
out = Win32Output(sys.__stdout__)
else:
out = CustomOutput.from_pty(sys.__stdout__, true_color=True)
self.cli = CommandLineInterface(
application=app,
eventloop=eventloop,
output=out)
# Patch stdout in something that will always print *above* the prompt
# when something is written to stdout.
sys.stdout = self.cli.stdout_proxy()
if logFileName:
Logger().enableFileLogging(logFileName)
self.logger = getlogger("cli")
self.print("\n{}-CLI (c) 2017 Evernym, Inc.".format(self.properName))
self._actions = []
if nodeReg:
self.print("Node registry loaded.")
self.showNodeRegistry()
self.print("Type 'help' for more information.")
self.print("Running {} {}\n".format(self.properName,
self.getCliVersion()))
tp = loadPlugins(self.basedirpath)
self.logger.debug("total plugins loaded in cli: {}".format(tp))
self.restoreLastActiveWallet()
self.checkIfCmdHandlerAndCmdMappingExists()
@property
def pool_ledger_dir(self):
return self.ledger_base_dir
def __init_registry(self, useNodeReg=False, nodeReg=None, cliNodeReg=None):
self.nodeRegLoadedFromFile = False
if not (useNodeReg and nodeReg and len(nodeReg) and
cliNodeReg and len(cliNodeReg)):
self.__init_registry_from_ledger()
else:
self.nodeReg = nodeReg
self.cliNodeReg = cliNodeReg
self.nodeRegistry = {}
for nStkNm, nha in self.nodeReg.items():
cStkNm = nStkNm + CLIENT_STACK_SUFFIX
self.nodeRegistry[nStkNm] = NodeDetail(
HA(*nha), cStkNm, HA(*self.cliNodeReg[cStkNm]))
def __init_registry_from_ledger(self):
self.nodeRegLoadedFromFile = True
genesis_txn_initiator = GenesisTxnInitiatorFromFile(
self.pool_ledger_dir, self.config.poolTransactionsFile)
ledger = Ledger(CompactMerkleTree(),
dataDir=self.pool_ledger_dir,
fileName=self.config.poolTransactionsFile,
genesis_txn_initiator=genesis_txn_initiator,
transactionLogStore=KeyValueStorageInMemory())
nodeReg, cliNodeReg, _ = TxnStackManager.parseLedgerForHaAndKeys(
ledger)
ledger.stop()
self.nodeReg = nodeReg
self.cliNodeReg = cliNodeReg
def close(self):
"""
Stops all the created clients and nodes.
"""
for key in self.clients:
self.clients[key].stop()
for key in self.nodes:
self.nodes[key].stop()
def _getCmdMappingError(self, cmdHandlerFuncName, mappingFuncName):
msg = "Command mapping not provided for '{}' command handler. " \
"\nPlease add proper mapping for that command handler " \
"(in function '{}') with corresponding command object.".\
format(cmdHandlerFuncName, mappingFuncName)
sep = "\n" + "*" * 125 + "\n"
msg = sep + msg + sep
return msg
def checkIfCmdHandlerAndCmdMappingExists(self):
for cmdHandlerFunc in self.actions:
funcName = cmdHandlerFunc.__name__.replace("_", "")
if funcName not in self.cmdHandlerToCmdMappings().keys():
raise Exception(self._getCmdMappingError(
cmdHandlerFunc.__name__,
self.cmdHandlerToCmdMappings.__name__))
@staticmethod
def getCliVersion():
return __version__
@property
def genesisTransactions(self):
return self._genesisTransactions
def reset(self):
self._genesisTransactions = []
@property
def actions(self):
if not self._actions:
self._actions = [self._simpleAction, self._helpAction,
self._newNodeAction, self._newClientAction,
self._statusNodeAction, self._statusClientAction,
self._loadPluginDirAction,
self._clientCommand, self._addKeyAction,
self._newKeyAction, self._listIdsAction,
self._useIdentifierAction, self._addGenesisAction,
self._createGenTxnFileAction, self._changePrompt,
self._newWallet, self._renameWallet,
self._useWalletAction, self._saveWalletAction,
self._listWalletsAction]
return self._actions
@property
def config(self):
if self._config:
return self._config
else:
self._config = getConfig()
return self._config
@lazy_field
def walletSaver(self):
return WalletStorageHelper(self.getWalletsBaseDir(),
dmode=self.config.WALLET_DIR_MODE,
fmode=self.config.WALLET_FILE_MODE)
@property
def allGrams(self):
if not self._allGrams:
self._allGrams = [self.utilGrams, self.nodeGrams, self.clientGrams]
return self._allGrams
@property
def completers(self):
if not self._completers:
self._completers = {
'node_command': WordCompleter(self.nodeCmds),
'client_command': WordCompleter(self.cliCmds),
'client': WordCompleter(['client']),
'command': WordCompleter(self.commands),
'node_or_cli': WordCompleter(self.node_or_cli),
'node_name': WordCompleter(self.nodeNames),
'more_nodes': WordCompleter(self.nodeNames),
'helpable': WordCompleter(self.helpablesCommands),
'load_plugins': PhraseWordCompleter('load plugins from'),
'client_name': self.clientWC,
'second_client_name': self.clientWC,
'cli_action': WordCompleter(self.cliActions),
'simple': WordCompleter(self.simpleCmds),
'add_key': PhraseWordCompleter('add key'),
'for_client': PhraseWordCompleter('for client'),
'new_key': PhraseWordCompleter('new key'),
'new_wallet': PhraseWordCompleter('new wallet'),
'rename_wallet': PhraseWordCompleter('rename wallet'),
'list_ids': PhraseWordCompleter('list ids'),
'list_wallet': PhraseWordCompleter('list wallets'),
'become': WordCompleter(['become']),
'use_id': PhraseWordCompleter('use DID'),
'use_wallet': PhraseWordCompleter('use wallet'),
'save_wallet': PhraseWordCompleter('save wallet'),
'add_gen_txn': PhraseWordCompleter('add genesis transaction'),
'prompt': WordCompleter(['prompt']),
'create_gen_txn_file': PhraseWordCompleter(
'create genesis transaction file')
}
return self._completers
@property
def lexers(self):
if not self._lexers:
lexerNames = {
'node_command',
'command',
'helpable',
'load_plugins',
'load',
'node_or_cli',
'node_name',
'more_nodes',
'simple',
'client_command',
'add_key',
'verkey',
'for_client',
'DID',
'new_key',
'list_ids',
'list_wallets',
'become',
'use_id',
'prompt',
'new_wallet',
'use_wallet',
'save_wallet',
'rename_wallet',
'add_genesis',
'create_gen_txn_file'
}
lexers = {n: SimpleLexer(Token.Keyword) for n in lexerNames}
self._lexers = {**lexers}
return self._lexers
def _renameWalletFile(self, oldWalletName, newWalletName):
walletsDir = self.getContextBasedWalletsBaseDir()
oldWalletFilePath = getWalletFilePath(
walletsDir, normalizedWalletFileName(oldWalletName))
if os.path.exists(oldWalletFilePath):
newWalletFilePath = getWalletFilePath(
walletsDir, normalizedWalletFileName(newWalletName))
if os.path.exists(newWalletFilePath):
self.print("A persistent wallet file already exists for "
"new wallet name. Please choose new wallet name.")
return False
os.rename(oldWalletFilePath, newWalletFilePath)
return True
def _renameWallet(self, matchedVars):
if matchedVars.get('rename_wallet'):
fromName = matchedVars.get('from')
toName = matchedVars.get('to')
conflictFound = self._checkIfIdentifierConflicts(
toName, checkInAliases=False, checkInSigners=False)
if not conflictFound:
fromWallet = self.wallets.get(fromName) if fromName \
else self.activeWallet
if not fromWallet:
self.print('Wallet {} not found'.format(fromName))
return True
if not self._renameWalletFile(fromName, toName):
return True
fromWallet.name = toName
del self.wallets[fromName]
self.wallets[toName] = fromWallet
self.print('Wallet {} renamed to {}'.format(fromName, toName))
return True
def _newWallet(self, matchedVars):
if matchedVars.get('new_wallet'):
name = matchedVars.get('name')
conflictFound = self._checkIfIdentifierConflicts(
name, checkInAliases=False, checkInSigners=False)
if not conflictFound:
self._saveActiveWallet()
self._createWallet(name)
return True
def _changePrompt(self, matchedVars):
if matchedVars.get('prompt'):
promptText = matchedVars.get('name')
self._setPrompt(promptText)
return True
def _createGenTxnFileAction(self, matchedVars):
if matchedVars.get('create_gen_txn_file'):
ledger = create_genesis_txn_init_ledger(
self.pool_ledger_dir, self.config.poolTransactionsFile)
ledger.reset()
for item in self.genesisTransactions:
ledger.add(item)
self.print('Genesis transaction file created at {} '
.format(ledger._transactionLog.db_path))
ledger.stop()
return True
def _addGenesisAction(self, matchedVars):
if matchedVars.get('add_gen_txn'):
if matchedVars.get(TARGET_NYM):
return self._addOldGenesisCommand(matchedVars)
else:
return self._addNewGenesisCommand(matchedVars)
def _addNewGenesisCommand(self, matchedVars):
typ = self._getType(matchedVars)
nodeName, nodeData, DID = None, None, None
jsonNodeData = json.loads(matchedVars.get(DATA))
for key, value in jsonNodeData.items():
if key == BY:
DID = value
else:
nodeName, nodeData = key, value
withData = {ALIAS: nodeName}
if typ == NODE:
nodeIp, nodePort = nodeData.get('node_address').split(':')
clientIp, clientPort = nodeData.get('client_address').split(':')
withData[NODE_IP] = nodeIp
withData[NODE_PORT] = int(nodePort)
withData[CLIENT_IP] = clientIp
withData[CLIENT_PORT] = int(clientPort)
newMatchedVars = {TXN_TYPE: typ, DATA: json.dumps(withData),
TARGET_NYM: nodeData.get(VERKEY),
IDENTIFIER: DID}
return self._addOldGenesisCommand(newMatchedVars)
def _addOldGenesisCommand(self, matchedVars):
destId = getFriendlyIdentifier(matchedVars.get(TARGET_NYM))
typ = self._getType(matchedVars)
txn = {
TXN_TYPE: typ,
TARGET_NYM: destId,
}
if matchedVars.get(IDENTIFIER):
txn[IDENTIFIER] = getFriendlyIdentifier(
matchedVars.get(IDENTIFIER))
if matchedVars.get(DATA):
txn[DATA] = json.loads(matchedVars.get(DATA))
self.genesisTransactions.append(txn)
self.print('Genesis transaction added')
return True
def _buildClientIfNotExists(self, config=None):
if not self._activeClient:
if not self.activeWallet:
print("Wallet is not initialized")
return
# Need a unique name so nodes can differentiate
name = self.name + randomString(6)
self.newClient(clientName=name, config=config)
def _getType(self, matchedVars):
typeVar = matchedVars.get(TXN_TYPE)
try:
type = PlenumTransactions(typeVar)
return type.value
except ValueError:
pass
try:
type = PlenumTransactions[typeVar]
return type.value
except KeyError:
pass
self.print("Invalid transaction type. Valid types are: {}". format(
", ".join(map(lambda r: r.name, PlenumTransactions))), Token.Error)
return None
@property
def wallets(self):
return self._wallets
@property
def activeWallet(self) -> Wallet:
if not self._activeWallet:
if self.wallets:
self.activeWallet = firstValue(self.wallets)
else:
self.activeWallet = self._createWallet()
return self._activeWallet
@activeWallet.setter
def activeWallet(self, wallet):
self._activeWallet = wallet
self.print('Active wallet set to "{}"'.format(wallet.name))
@property
def activeClient(self):
self._buildClientIfNotExists()
return self._activeClient
@activeClient.setter
def activeClient(self, client):
self._activeClient = client
self.print("Active client set to " + client.alias)
@staticmethod
def relist(seq):
return '(' + '|'.join(seq) + ')'
def initializeInputParser(self):
self.initializeGrammar()
self.initializeGrammarLexer()
self.initializeGrammarCompleter()
def initializeGrammar(self):
# TODO Do we really need both self.allGrams and self.grams
self.grams = getAllGrams(*self.allGrams)
self.grammar = compile("".join(self.grams))
def initializeGrammarLexer(self):
self.grammarLexer = GrammarLexer(self.grammar, lexers=self.lexers)
def initializeGrammarCompleter(self):
self.grammarCompleter = GrammarCompleter(self.grammar, self.completers)
def print(self, msg, token=None, newline=True):
if newline:
msg += "\n"
tkn = token or ()
part = partial(self.cli.print_tokens, [(tkn, msg)])
if self.debug:
part()
else:
self.cli.run_in_terminal(part)
def printVoid(self):
self.print(self.voidMsg)
def out(self, record, extra_cli_value=None):
"""
Callback so that this cli can manage colors
:param record: a log record served up from a custom handler
:param extra_cli_value: the "cli" value in the extra dictionary
:return:
"""
if extra_cli_value in ("IMPORTANT", "ANNOUNCE"):
self.print(record.msg, Token.BoldGreen) # green
elif extra_cli_value in ("WARNING",):
self.print(record.msg, Token.BoldOrange) # orange
elif extra_cli_value in ("STATUS",):
self.print(record.msg, Token.BoldBlue) # blue
elif extra_cli_value in ("PLAIN", "LOW_STATUS"):
self.print(record.msg, Token) # white
else:
self.print(record.msg, Token)
def cmdHandlerToCmdMappings(self):
# The 'key' of 'mappings' dictionary is action handler function name
# without leading underscore sign. Each such funcation name should be
# mapped here, its other thing that if you don't want to display it
# in help, map it to None, but mapping should be present, that way it
# will force developer to either write help message for those cli
# commands or make a decision to not show it in help message.
mappings = OrderedDict()
mappings['helpAction'] = helpCmd
mappings['statusAction'] = statusCmd
mappings['changePrompt'] = changePromptCmd
mappings['newNodeAction'] = newNodeCmd
mappings['newClientAction'] = newClientCmd
mappings['statusNodeAction'] = statusNodeCmd
mappings['statusClientAction'] = statusClientCmd
# mappings['keyShareAction'] = keyShareCmd
mappings['loadPluginDirAction'] = loadPluginsCmd
mappings['newWallet'] = newWalletCmd
mappings['renameWallet'] = renameWalletCmd
mappings['useWalletAction'] = useWalletCmd
mappings['saveWalletAction'] = saveWalletCmd
mappings['listWalletsAction'] = listWalletCmd
mappings['newKeyAction'] = newKeyCmd
mappings['useIdentifierAction'] = useIdCmd
mappings['listIdsAction'] = listIdsCmd
mappings['newNodeAction'] = newNodeCmd
mappings['newClientAction'] = newClientCmd
mappings['statusNodeAction'] = statusNodeCmd
mappings['statusClientAction'] = statusClientCmd
mappings['clientSendMsgCommand'] = clientSendCmd
mappings['clientShowMsgCommand'] = clientShowCmd
mappings['addGenesisAction'] = addGenesisTxnCmd
mappings['createGenTxnFileAction'] = createGenesisTxnFileCmd
mappings['licenseAction'] = licenseCmd
mappings['quitAction'] = quitCmd
mappings['exitAction'] = exitCmd
# below action handlers are those who handles multiple commands and so
# these will point to 'None' and specific commands will point to their
# corresponding help msgs.
mappings['clientCommand'] = None
mappings['simpleAction'] = None
# TODO: These seems to be obsolete, so either we need to remove these
# command handlers or let it point to None
mappings['addKeyAction'] = None # obsolete command
return mappings
def getTopComdMappingKeysForHelp(self):
return ['helpAction', 'statusAction']
def getComdMappingKeysToNotShowInHelp(self):
return ['quitAction']
def getBottomComdMappingKeysForHelp(self):
return ['licenseAction', 'exitAction']
def getDefaultOrderedCmds(self):
topCmdKeys = self.getTopComdMappingKeysForHelp()
removeCmdKeys = self.getComdMappingKeysToNotShowInHelp()
bottomCmdsKeys = self.getBottomComdMappingKeysForHelp()
topCmds = [self.cmdHandlerToCmdMappings().get(k) for k in topCmdKeys]
bottomCmds = [self.cmdHandlerToCmdMappings().get(k)
for k in bottomCmdsKeys]
middleCmds = [v for k, v in self.cmdHandlerToCmdMappings().items()
if k not in topCmdKeys and
k not in bottomCmdsKeys and
k not in removeCmdKeys]
return [c for c in (topCmds + middleCmds +
bottomCmds) if c is not None]
def _printGivenCmdsHelpMsgs(self, cmds: Iterable[Command], gapsInLines=1,
sort=False, printHeader=True, showUsageFor=[]):
helpMsgStr = ""
if printHeader:
helpMsgStr += "{}-CLI, a simple command-line interface for a {}.".\
format(self.properName, self.fullName)
helpMsgStr += "\n Commands:"
if sort:
cmds = sorted(cmds, key=lambda hm: hm.id)
for cmd in cmds:
helpMsgLines = cmd.title.split("\n")
helpMsgFormattedLine = "\n ".join(helpMsgLines)
helpMsgStr += "{} {} - {}".format(
'\n' * gapsInLines, cmd.id, helpMsgFormattedLine)
if cmd.id in showUsageFor:
helpMsgStr += "\n Usage:\n {}".\
format(cmd.usage)
self.print("\n{}\n".format(helpMsgStr))
def getHelpCmdIdsToShowUsage(self):
return ["help"]
def printHelp(self):
self._printGivenCmdsHelpMsgs(
self.getDefaultOrderedCmds(),
sort=False,
printHeader=True,
showUsageFor=self.getHelpCmdIdsToShowUsage())
@staticmethod
def joinTokens(tokens, separator=None, begin=None, end=None):
if separator is None:
separator = (Token, ', ')
elif isinstance(separator, str):
separator = (Token, separator)
r = reduce(lambda x, y: x + [separator, y] if x else [y], tokens, [])
if begin is not None:
b = (Token, begin) if isinstance(begin, str) else begin
r = [b] + r
if end:
if isinstance(end, str):
r.append((Token, end))
return r
def printTokens(self, tokens, separator=None, begin=None, end=None):
x = self.joinTokens(tokens, separator, begin, end)
self.cli.print_tokens(x, style=self.style)
def printNames(self, names, newline=False):
tokens = [(Token.Name, n) for n in names]
self.printTokens(tokens)
if newline:
self.printTokens([(Token, "\n")])
def showValidNodes(self):
self.printTokens([(Token, "Valid node names are: ")])
self.printNames(self.nodeReg.keys(), newline=True)
def showNodeRegistry(self):
t = []
for name in self.nodeReg:
ip, port = self.nodeReg[name]
t.append((Token.Name, " " + name))
t.append((Token, ": {}:{}\n".format(ip, port)))
self.cli.print_tokens(t, style=self.style)
def loadFromFile(self, file: str) -> None:
cfg = ConfigParser()
cfg.read(file)
self.nodeReg = Cli.loadNodeReg(cfg)
self.cliNodeReg = Cli.loadCliNodeReg(cfg)
@classmethod
def loadNodeReg(cls, cfg: ConfigParser) -> OrderedDict:
return cls._loadRegistry(cfg, 'node_reg')
@classmethod
def loadCliNodeReg(cls, cfg: ConfigParser) -> OrderedDict:
try:
return cls._loadRegistry(cfg, 'client_node_reg')
except configparser.NoSectionError:
return OrderedDict()
@classmethod
def _loadRegistry(cls, cfg: ConfigParser, reg: str):
registry = OrderedDict()
for n in cfg.items(reg):
host, port = n[1].split()
registry.update({n[0]: (host, int(port))})
return registry
def getStatus(self):
self.print('Nodes: ', newline=False)
if not self.nodes:
self.print("No nodes are running. Try typing 'new node <name>'.")
else:
self.printNames(self.nodes, newline=True)
if not self.clients:
clients = "No clients are running. Try typing 'new client <name>'."
else:
clients = ",".join(self.clients.keys())
self.print("Clients: " + clients)
f = getMaxFailures(len(self.nodes))
self.print("f-value (number of possible faulty nodes): {}".format(f))
if f != 0:
node = list(self.nodes.values())[0]
mPrimary = node.replicas[node.instances.masterId].primaryName
bPrimary = node.replicas[node.instances.backupIds[0]].primaryName
self.print("Instances: {}".format(f + 1))
if mPrimary:
self.print(" Master (primary is on {})".
format(Replica.getNodeName(mPrimary)))
if bPrimary:
self.print(" Backup (primary is on {})".
format(Replica.getNodeName(bPrimary)))
else:
self.print("Instances: "
"Not enough nodes to create protocol instances")
# def keyshare(self, nodeName):
# node = self.nodes.get(nodeName, None)
# if node is not None:
# node = self.nodes[nodeName]
# node.startKeySharing()
# elif nodeName not in self.nodeReg:
# tokens = [(Token.Error, "Invalid node name '{}'.".format(nodeName))]
# self.printTokens(tokens)
# self.showValidNodes()
# return
# else:
# tokens = [(Token.Error, "Node '{}' not started.".format(nodeName))]
# self.printTokens(tokens)
# self.showStartedNodes()
# return
def showStartedNodes(self):
self.printTokens([(Token, "Started nodes are: ")])
startedNodes = self.nodes.keys()
if startedNodes:
self.printNames(self.nodes.keys(), newline=True)
else:
self.print("None", newline=True)
def isOkToRunNodeDependentCommands(self):
if not self.withNode:
self.print("This command is only available if you start "
"this cli with command line argument --with-node "
"(and it assumes you have installed indy-node "
"dependency)")
return False
if not self.NodeClass:
self.print("This command requires indy-node dependency, "
"please install it and then resume.")
return False
return True
def newNode(self, nodeName: str):
if not self.isOkToRunNodeDependentCommands():
return
if len(self.clients) > 0 and not self.hasAnyKey:
return
if nodeName in self.nodes:
self.print("Node {} already exists.".format(nodeName))
return
if nodeName == "all":
nodeRegKeys = self.nodeReg.keys()
nodesKeys = self.nodes.keys()
names = set(nodeRegKeys) - set(nodesKeys)
elif nodeName not in self.nodeReg:
tokens = [
(Token.Error, "Invalid node name '{}'. ".format(nodeName))]
self.printTokens(tokens)
self.showValidNodes()
return
else:
names = [nodeName]
nodes = []
for name in names:
try:
nodeRegistry = None if self.nodeRegLoadedFromFile \
else self.nodeRegistry
config_helper = PNodeConfigHelper(name, self.config, chroot=self.nodes_chroot)
learnKeysFromOthers(config_helper.keys_dir, name,
self.nodes.values())
node = self.NodeClass(name,
nodeRegistry=nodeRegistry,
config_helper=config_helper,
pluginPaths=self.pluginPaths,
config=self.config)
except KeysNotFoundException as e:
self.print(str(e), Token.BoldOrange)
return
self.nodes[name] = node
self.looper.add(node)
if not self.nodeRegLoadedFromFile:
# node.startKeySharing()
tellKeysToOthers(node, self.nodes.values())
if len(self.clients) > 0:
self.bootstrapKey(self.activeWallet, node)
for DID, verkey in self.externalClientKeys.items():
node.clientAuthNr.addIdr(DID, verkey)
nodes.append(node)
return nodes
def ensureValidClientId(self, clientName):
"""
Ensures client id is not already used or is not starting with node
names.
:param clientName:
:return:
"""
if clientName in self.clients:
raise ValueError("Client {} already exists.".format(clientName))
if any([clientName.startswith(nm) for nm in self.nodeNames]):
raise ValueError("Client name cannot start with node names, "
"which are {}."
.format(', '.join(self.nodeReg.keys())))
def statusClient(self, clientName):
if clientName == "all":
for nm in self.clients:
self.statusClient(nm)
return
if clientName not in self.clients:
self.print("client not found", Token.Error)
else:
self.print(" Name: " + clientName)
client = self.clients[clientName] # type: Client
self.printTokens([(Token.Heading, 'Status for client:'),
(Token.Name, client.name)],
separator=' ', end='\n')
self.print(" age (seconds): {:.0f}".format(
time.perf_counter() - client.created))
self.print(" status: {}".format(client.status.name))
self.print(" connected to: ", newline=False)
if client.nodestack.conns:
self.printNames(client.nodestack.conns, newline=True)
else:
self.printVoid()
if self.activeWallet and self.activeWallet.defaultId:
wallet = self.activeWallet
idr = wallet.defaultId
self.print(" DID: {}".format(idr))
self.print(
" Verification key: {}".format(wallet.getVerkey(idr)))
def statusNode(self, nodeName):
if nodeName == "all":
for nm in self.nodes:
self.statusNode(nm)
return
if nodeName not in self.nodes:
self.print("Node {} not found".format(nodeName), Token.Error)
else:
self.print("\n Name: " + nodeName)
node = self.nodes[nodeName] # type: Node
ip, port = self.nodeReg.get(nodeName)
nha = "0.0.0.0:{}".format(port)
self.print(" Node listener: " + nha)
ip, port = self.cliNodeReg.get(nodeName + CLIENT_STACK_SUFFIX)
cha = "0.0.0.0:{}".format(port)
self.print(" Client listener: " + cha)
self.print(" Status: {}".format(node.status.name))
self.print(' Connections: ', newline=False)
connecteds = node.nodestack.connecteds
if connecteds:
self.printNames(connecteds, newline=True)
else:
self.printVoid()
notConnecteds = list({r for r in self.nodes.keys()
if r not in connecteds and
r != nodeName})
if notConnecteds:
self.print(' Not connected: ', newline=False)
self.printNames(notConnecteds, newline=True)
self.print(" Replicas: {}".format(len(node.replicas)),
newline=False)
if node.hasPrimary:
if node.has_master_primary:
self.print(" (primary of Master)")
else:
self.print(" (primary of Backup)")
else:
self.print(" (no primary replicas)")
self.print(" Up time (seconds): {:.0f}".
format(time.perf_counter() - node.created))
self.print(" Clients: ", newline=False)
clients = node.clientstack.connecteds
if clients:
self.printNames(clients, newline=True)
else:
self.printVoid()
def newClient(self, clientName,
config=None):
try:
self.ensureValidClientId(clientName)
if not areKeysSetup(clientName, self.basedirpath):
client_addr = genHa(ip='0.0.0.0')
else:
raise Exception("Usage of deprecated code")
nodeReg = None if self.nodeRegLoadedFromFile else self.cliNodeReg
client = self.ClientClass(clientName,
ha=client_addr,
nodeReg=nodeReg,
basedirpath=self.pool_ledger_dir,
config=config)
self.activeClient = client
self.looper.add(client)
self.clients[clientName] = client
self.clientWC.words = list(self.clients.keys())
return client
except ValueError as ve:
self.print(ve.args[0], Token.Error)
@staticmethod
def bootstrapKey(wallet, node, DID=None):
DID = DID or wallet.defaultId
assert DID, "Client has no DID"
node.clientAuthNr.addIdr(DID, wallet.getVerkey(DID))
def clientExists(self, clientName):
return clientName in self.clients
def printMsgForUnknownClient(self):
self.print("No such client. See: 'help new client' for more details")
def printMsgForUnknownWallet(self, walletName):
self.print("No such wallet {}.".format(walletName))
def sendMsg(self, clientName, msg):
client = self.clients.get(clientName, None)
wallet = self.wallets.get(clientName, None) # type: Wallet
if client:
if wallet:
req = wallet.signOp(msg)
request, errs = client.submitReqs(req)
if request:
rqst = request[0]
self.requests[rqst.key] = rqst
self.print("Request sent, request id: {}".format(
req.reqId), Token.BoldBlue)
else:
for err in errs:
self.print("Request error: {}".format(
err), Token.Error)
else:
try:
self._createWallet(clientName)
self.printNoKeyMsg()
except NameAlreadyExists:
self.print(
"Wallet with name {} is not in use, please select it by using 'use wallet {}' command" .format(
clientName, clientName))
else:
self.printMsgForUnknownClient()
def getReply(self, clientName, DID, reqId):
reqId = int(reqId)
client = self.clients.get(clientName, None)
if client and (DID, reqId) in self.requests:
reply, status = client.getReply(DID, reqId)
self.print("Reply for the request: {}".format(reply))
self.print("Status: {}".format(status))
elif not client:
self.printMsgForUnknownClient()
else:
self.print(
"No such request. See: 'help client show request status' for more details")
async def shell(self, *commands, interactive=True):
"""
Coroutine that runs command, including those from an interactive
command line.
:param commands: an iterable of commands to run first
:param interactive: when True, this coroutine will process commands
entered on the command line.
:return:
"""
# First handle any commands passed in
for command in commands:
if not command.startswith("--"):
self.print("\nRunning command: '{}'...\n".format(command))
self.parse(command)
# then handle commands from the prompt
while interactive:
try:
result = await self.cli.run_async()
cmd = result.text if result else ""
cmds = cmd.strip().splitlines()
for c in cmds:
self.parse(c)
except Exit:
break
except (EOFError, KeyboardInterrupt):
self._saveActiveWallet()
break
self.print('Goodbye.')
def _simpleAction(self, matchedVars):
if matchedVars.get('simple'):
cmd = matchedVars.get('simple')
if cmd == 'status':
self.getStatus()
elif cmd == 'license':
self._showLicense()
elif cmd in ['exit', 'quit']:
self._saveActiveWallet()
raise Exit
return True
def _showLicense(self):
self.print("""
Copyright 2016 Evernym, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
""")
def getMatchedHelpableMsg(self, helpable):
cmd_prefix = ' '.join(helpable.split(' ')[:2])
matchedHelpMsgs = [hm for hm in self.cmdHandlerToCmdMappings().values()
if hm and hm.id == cmd_prefix]
if matchedHelpMsgs:
return matchedHelpMsgs[0]
return None
def _helpAction(self, matchedVars):
if matchedVars.get('command') == 'help':
helpable = matchedVars.get('helpable')
if helpable:
matchedHelpMsg = self.getMatchedHelpableMsg(helpable)
if matchedHelpMsg:
self.print(str(matchedHelpMsg))
else:
self.print("No such command found: {}\n".format(helpable))
self.printHelp()
else:
self.printHelp()
return True
def _newNodeAction(self, matchedVars):
if matchedVars.get('node_command') == 'new':
self.createEntities('node_name', 'more_nodes',
matchedVars, self.newNode)
return True
def _newClientAction(self, matchedVars):
if matchedVars.get('client_command') == 'new':
self.createEntities('client_name', 'more_clients',
matchedVars, self.newClient)
return True
def _statusNodeAction(self, matchedVars):
if matchedVars.get('node_command') == 'status':
node = matchedVars.get('node_name')
self.statusNode(node)
return True
def _statusClientAction(self, matchedVars):
if matchedVars.get('client_command') == 'status':
client = matchedVars.get('client_name')
self.statusClient(client)
return True
# def _keyShareAction(self, matchedVars):
# if matchedVars.get('node_command') == 'keyshare':
# name = matchedVars.get('node_name')
# self.keyshare(name)
# return True
def _clientCommand(self, matchedVars):
if matchedVars.get('client') == 'client':
client_name = matchedVars.get('client_name')
client_action = matchedVars.get('cli_action')
if client_action == 'send':
msg = matchedVars.get('msg')
try:
actualMsgRepr = ast.literal_eval(msg)
except Exception as ex:
self.print("error evaluating msg expression: {}".
format(ex), Token.BoldOrange)
return True
self.sendMsg(client_name, actualMsgRepr)
return True
elif client_action == 'show':
req_id = matchedVars.get('req_id')
self.getReply(client_name, self.activeWallet.defaultId, req_id)
return True
def _loadPluginDirAction(self, matchedVars):
if matchedVars.get('load_plugins') == 'load plugins from':
pluginsPath = matchedVars.get('plugin_dir')
try:
plugins = PluginLoader(
pluginsPath).plugins # type: Dict[str, Set]
for pluginSet in plugins.values():
for plugin in pluginSet:
if hasattr(
plugin, "supportsCli") and plugin.supportsCli:
plugin.cli = self
parserReInitNeeded = False
if hasattr(plugin, "grams") and \
isinstance(plugin.grams,
list) and plugin.grams:
self._allGrams.append(plugin.grams)
parserReInitNeeded = True
# TODO Need to check if `plugin.cliActionNames`
# conflicts with any of `self.cliActions`
if hasattr(
plugin,
"cliActionNames") and isinstance(
plugin.cliActionNames,
set) and plugin.cliActionNames:
self.cliActions.update(plugin.cliActionNames)
# TODO: Find better way to reinitialize completers
# , also might need to reinitialize lexers
self._completers = {}
parserReInitNeeded = True
if parserReInitNeeded:
self.initializeInputParser()
self.cli.application.buffer.completer = \
self.grammarCompleter
self.cli.application.layout.children[
1].children[
1].content.content.lexer = self.grammarLexer
if hasattr(plugin, "actions") and \
isinstance(plugin.actions, list):
self._actions.extend(plugin.actions)
self.plugins.update(plugins)
self.pluginPaths.append(pluginsPath)
except FileNotFoundError as ex:
_, err = ex.args
self.print(err, Token.BoldOrange)
return True
def _addKeyAction(self, matchedVars):
if matchedVars.get('add_key') == 'add key':
verkey = matchedVars.get('verkey')
# TODO make verkey case insensitive
DID = matchedVars.get('DID')
if DID in self.externalClientKeys:
self.print("DID already added", Token.Error)
return
self.externalClientKeys[DID] = verkey
for n in self.nodes.values():
n.clientAuthNr.addIdr(DID, verkey)
return True
def _addSignerToGivenWallet(self, signer, wallet: Wallet=None,
showMsg: bool=False):
if not wallet:
wallet = self._createWallet()
wallet.addIdentifier(signer=signer)
if showMsg:
self.print("Key created in wallet " + wallet.name)
def _newSigner(self,
wallet=None,
DID=None,
seed=None,
alias=None):
cseed = cleanSeed(seed)
signer = DidSigner(identifier=DID, seed=cseed, alias=alias)
self._addSignerToGivenWallet(signer, wallet, showMsg=True)
self.print("DID for key is {}".format(signer.identifier))
self.print("Verification key is {}".format(signer.verkey))
if alias:
self.print("Alias for DID is {}".format(signer.alias))
self._setActiveIdentifier(signer.identifier)
self.bootstrapClientKeys(signer.identifier, signer.verkey,
self.nodes.values())
return signer
@staticmethod
def bootstrapClientKeys(idr, verkey, nodes):
bootstrapClientKeys(idr, verkey, nodes)
def isValidSeedForNewKey(self, seed):
if seed:
seed = seed.strip()
if len(seed) != 32 and not seedFromHex(seed):
self.print(
'Seed needs to be 32 or 64 characters (if hex) long '
'but is {} characters long'.format(
len(seed)), Token.Error)
return False
return True
def _newKeyAction(self, matchedVars):
if matchedVars.get('new_key') == 'new key':
seed = matchedVars.get('seed')
if not self.isValidSeedForNewKey(seed):
return True
alias = matchedVars.get('alias')
if alias:
alias = alias.strip()
self._newSigner(seed=seed, alias=alias, wallet=self.activeWallet)
return True
def _buildWalletClass(self, nm):
return self.walletClass(nm)
@property
def walletClass(self):
return Wallet
def _createWallet(self, walletName=None):
nm = walletName or self.defaultWalletName
while True:
conflictFound = self._checkIfIdentifierConflicts(
nm, checkInAliases=False, checkInSigners=False,
printAppropriateMsg=False)
if not conflictFound:
break
if walletName and conflictFound:
raise NameAlreadyExists
nm = "{}_{}".format(nm, randomString(5))
if nm in self.wallets:
self.print("Wallet {} already exists".format(nm))
wallet = self._wallets[nm]
self.activeWallet = wallet # type: Wallet
return wallet
wallet = self._buildWalletClass(nm)
self._wallets[nm] = wallet
self.print("New wallet {} created".format(nm))
self.activeWallet = wallet
# TODO when the command is implemented
# if nm == self.defaultWalletName:
# self.print("Note, you can rename this wallet by:")
# self.print(" rename wallet {} to NewName".format(nm))
return wallet
def _listWalletsAction(self, matchedVars):
if matchedVars.get('list_wallets') == 'list wallets':
# TODO move file system related routine to WalletStorageHelper
walletBaseDir = self.getWalletsBaseDir()
contextDirPath = self.getContextBasedWalletsBaseDir()
dirs_to_scan = self.getAllSubDirNamesForWallets()
if contextDirPath not in dirs_to_scan:
dirs_to_scan.insert(0, contextDirPath)
dirs_to_scan = [os.path.join(walletBaseDir, e)
for e in dirs_to_scan]
anyWalletFound = False
for dir in dirs_to_scan:
# removed os path separator at the end
cleaned_dir_name = dir.rstrip(os.sep)
dir_name = basename(cleaned_dir_name)
files = glob.glob(
"{}/*.{}".format(cleaned_dir_name, WALLET_FILE_EXTENSION))
persistedWalletNames = []
unpersistedWalletNames = []
if len(files) > 0:
for f in files:
walletName = Cli.getWalletKeyName(basename(f))
persistedWalletNames.append(walletName)
if contextDirPath == cleaned_dir_name:
unpersistedWalletNames = [
n for n in self.wallets.keys()
if n.lower() not in persistedWalletNames]
if len(persistedWalletNames) > 0 or \
len(unpersistedWalletNames) > 0:
anyWalletFound = True
self.print("\nContext Name: {}".format(
dir_name), newline=False)
self.print(" (path:{})".format(dir), Token.Gray)
if len(persistedWalletNames) > 0:
self.print(" Persisted wallets:")
for pwn in persistedWalletNames:
f = os.path.join(cleaned_dir_name,
normalizedWalletFileName(pwn))
lastModifiedTime = time.ctime(os.path.getmtime(f))
isThisActiveWallet = True if contextDirPath == cleaned_dir_name and \
self._activeWallet is not None and \
self._activeWallet.name.lower() == pwn.lower() \
else False
activeWalletMsg = " [Active wallet, may have some unsaved changes]" \
if isThisActiveWallet else ""
activeWalletSign = "* " if isThisActiveWallet \
else " "
self.print(
" {}{}{}".format(
activeWalletSign,
pwn,
activeWalletMsg),
newline=False)
self.print(" (last modified at: {})".
format(lastModifiedTime), Token.Gray)
if len(unpersistedWalletNames) > 0:
self.print(" Un-persisted wallets:")
for n in unpersistedWalletNames:
self.print(" {}".format(n))
if not anyWalletFound:
self.print("No wallets exists")
return True
def _listIdsAction(self, matchedVars):
if matchedVars.get('list_ids') == 'list ids':
if self._activeWallet:
self.print("Active wallet: {}".
format(self._activeWallet.name), newline=False)
if self._activeWallet.defaultId:
self.print(
" (active DID: {})\n". format(
self._activeWallet.defaultId),
Token.Gray)
if len(self._activeWallet.listIds()) > 0:
self.print("DIDs:")
withVerkeys = matchedVars.get(
'with_verkeys') == 'with verkeys'
for id in self._activeWallet.listIds():
verKey = ""
if withVerkeys:
aliasId = self._activeWallet.aliasesToIds.get(id)
actualId = aliasId if aliasId else id
signer = self._activeWallet.idsToSigners.get(
actualId)
verKey = ", verkey: {}".format(signer.verkey)
self.print(" {}{}".format(id, verKey))
else:
self.print("\nNo DIDs")
else:
self.print("No active wallet found.")
return True
def checkIfPersistentWalletExists(self, name, inContextDir=None):
toBeWalletFileName = normalizedWalletFileName(name)
contextDir = inContextDir or self.getContextBasedWalletsBaseDir()
toBeWalletFilePath = getWalletFilePath(
contextDir, toBeWalletFileName)
if os.path.exists(toBeWalletFilePath):
return toBeWalletFilePath
def _checkIfIdentifierConflicts(self, origName, checkInWallets=True,
checkInAliases=True, checkInSigners=True,
printAppropriateMsg=True,
checkPersistedFile=True):
def _checkIfWalletExists(origName, checkInWallets=True,
checkInAliases=True, checkInSigners=True,
checkPersistedFile=True):
if origName:
name = origName.lower()
allAliases = []
allSigners = []
allWallets = []
for wk, wv in self.wallets.items():
if checkInAliases:
allAliases.extend(
[k.lower() for k in wv.aliasesToIds.keys()])
if checkInSigners:
allSigners.extend(list(wv.listIds()))
if checkInWallets:
allWallets.append(wk.lower())
if name in allWallets:
return True, 'wallet'
if name in allAliases:
return True, 'alias'
if name in allSigners:
return True, 'DID'
if checkPersistedFile:
toBeWalletFilePath = self.checkIfPersistentWalletExists(
origName)
if toBeWalletFilePath:
return True, 'wallet (stored at: {})'.\
format(toBeWalletFilePath)
return False, None
else:
return False, None
status, foundIn = _checkIfWalletExists(origName, checkInWallets,
checkInAliases, checkInSigners,
checkPersistedFile)
if foundIn and printAppropriateMsg:
self.print('"{}" conflicts with an existing {}. '
'Please choose a new name.'.
format(origName, foundIn), Token.Warning)
return status
def _loadWalletIfExistsAndNotLoaded(
self, name, copyAs=None, override=False):
wallet = self._getWalletByName(name)
if not wallet:
walletFileName = normalizedWalletFileName(name)
self.restoreWalletByName(walletFileName, copyAs=copyAs,
override=override)
def _loadFromPath(self, path, copyAs=None, override=False):
if os.path.exists(path):
self.restoreWalletByPath(path, copyAs=copyAs, override=override)
def _getWalletByName(self, name) -> Wallet:
wallets = {k.lower(): v for k, v in self.wallets.items()}
return wallets.get(name.lower())
def checkIfWalletBelongsToCurrentContext(self, wallet):
self.logger.debug("wallet context check: {}".format(wallet.name))
self.logger.debug(" wallet.getEnvName: {}".format(wallet.getEnvName))
self.logger.debug(" active env: {}".format(self.getActiveEnv))
if wallet.getEnvName and wallet.getEnvName != self.getActiveEnv:
self.logger.debug(" doesn't belong to the context")
return False
return True
def _isWalletFilePathBelongsToCurrentContext(self, filePath):
contextBasedWalletsBaseDir = self.getContextBasedWalletsBaseDir()
fileBaseDir = dirname(filePath)
self.logger.debug("wallet file path: {}".format(filePath))
self.logger.debug(" contextBasedWalletsBaseDir: {}".
format(contextBasedWalletsBaseDir))
self.logger.debug(" fileBaseDir: {}".format(fileBaseDir))
if contextBasedWalletsBaseDir != fileBaseDir:
self.logger.debug(" doesn't belong to the context")
return False
return True
def getAllSubDirNamesForWallets(self):
return [NO_ENV]
def checkIfWalletPathBelongsToCurrentContext(self, filePath):
walletsBaseDir = self.getWalletsBaseDir()
baseWalletDirName = dirname(filePath)
if not self._isWalletFilePathBelongsToCurrentContext(filePath):
self.print("\nWallet base directory is: {}"
"\nGiven wallet file {} "
"should be in one of it's sub directories "
"(you can create it if it doesn't exists) "
"according to the environment it belongs to."
"\nPossible sub directory names are: {}".
format(walletsBaseDir, filePath,
self.getAllSubDirNamesForWallets()))
return False
curContextDirName = self.getContextBasedWalletsBaseDir()
if baseWalletDirName != curContextDirName:
self.print(
self.getWalletFileIncompatibleForGivenContextMsg(filePath))
return False
return True
def getWalletFileIncompatibleForGivenContextMsg(self, filePath):
noEnvWalletsBaseDir = self.getNoEnvWalletsBaseDir()
baseWalletDirName = dirname(filePath)
msg = "Given wallet file ({}) doesn't belong to current context.".\
format(filePath)
if baseWalletDirName == noEnvWalletsBaseDir:
msg += "\nPlease disconnect and try again."
else:
msg += "\nPlease connect to '{}' environment and try again.".\
format(basename(baseWalletDirName))
return msg
def _searchAndSetWallet(self, name, copyAs=None, override=False):
if self._activeWallet and self._activeWallet.name.lower() == name.lower():
self.print("Wallet already in use.")
return True
if os.path.isabs(name) and os.path.exists(name):
self._loadFromPath(name, copyAs=copyAs, override=override)
else:
self._loadWalletIfExistsAndNotLoaded(name, copyAs=copyAs,
override=override)
wallet = self._getWalletByName(name)
if wallet and self._activeWallet.name != wallet.name:
self._saveActiveWallet()
self.activeWallet = wallet
if not wallet:
self.print("No such wallet found in current context.")
return True
def _saveWalletAction(self, matchedVars):
if matchedVars.get('save_wallet') == 'save wallet':
name = matchedVars.get('wallet')
if not self._activeWallet:
self.print("No active wallet to be saved.\n")
return True
if name:
wallet = self._getWalletByName(name)
if not wallet:
self.print("No such wallet loaded or exists.")
return True
elif wallet.name != self._activeWallet.name:
self.print("Given wallet is not active "
"and it must be already saved.")
return True
self._saveActiveWallet()
return True
def _useWalletAction(self, matchedVars):
if matchedVars.get('use_wallet') == 'use wallet':
name = matchedVars.get('wallet')
override = True if matchedVars.get('override') else False
copyAs = matchedVars.get('copy_as_name')
self._searchAndSetWallet(name, copyAs=copyAs, override=override)
return True
def _setActiveIdentifier(self, idrOrAlias):
if self.activeWallet:
wallet = self.activeWallet
if idrOrAlias not in wallet.aliasesToIds and \
idrOrAlias not in wallet.idsToSigners:
return False
idrFromAlias = wallet.aliasesToIds.get(idrOrAlias)
# If alias found
if idrFromAlias:
self.activeDID = idrFromAlias
self.activeAlias = idrOrAlias
else:
alias = [k for k, v
in wallet.aliasesToIds.items()
if v == idrOrAlias]
self.activeAlias = alias[0] if alias else None
self.activeDID = idrOrAlias
wallet.defaultId = self.activeDID
self.print("Current DID set to {}".
format(self.activeAlias or self.activeDID))
return True
return False
def _useIdentifierAction(self, matchedVars):
if matchedVars.get('use_id') == 'use DID':
nymOrAlias = matchedVars.get('DID')
found = self._setActiveIdentifier(nymOrAlias)
if not found:
self.print("No such DID found in current wallet")
return True
def _setPrompt(self, promptText):
app = create_prompt_application('{}> '.format(promptText),
lexer=self.grammarLexer,
completer=self.grammarCompleter,
style=self.style,
history=self.pers_hist)
self.cli.application = app
self.currPromptText = promptText
# getTokens = lambda _: [(Token.Prompt, promptText + "> ")]
# self.cli.application.layout.children[1].children[0]\
# .content.content.get_tokens = getTokens
def performEnvCompatibilityCheck(self, wallet, walletFilePath):
if not self.checkIfWalletBelongsToCurrentContext(wallet):
self.print(self.getWalletFileIncompatibleForGivenContextMsg(
walletFilePath))
return False
if not self.checkIfWalletPathBelongsToCurrentContext(walletFilePath):
return False
return True
@property
def getWalletContextMistmatchMsg(self):
return "The active wallet '{}' doesn't belong to current " \
"environment. \nBefore you perform any transaction signing, " \
"please create or activate compatible wallet.".\
format(self._activeWallet.name)
def printWarningIfIncompatibleWalletIsRestored(self, walletFilePath):
if not self.checkIfWalletBelongsToCurrentContext(self._activeWallet) \
or not self._isWalletFilePathBelongsToCurrentContext(walletFilePath):
self.print(self.getWalletContextMistmatchMsg)
self.print("Any changes made to this wallet won't be persisted.",
Token.BoldOrange)
def performValidationCheck(self, wallet, walletFilePath, override=False):
if not self.performEnvCompatibilityCheck(wallet, walletFilePath):
return False
conflictFound = self._checkIfIdentifierConflicts(
wallet.name, checkInAliases=False, checkInSigners=False,
checkPersistedFile=False, printAppropriateMsg=False)
if conflictFound and not override:
self.print(
"A wallet with given name already loaded, "
"here are few options:\n"
"1. If you still want to load given persisted wallet at the "
"risk of overriding the already loaded wallet, then add this "
"clause to same command and retry: override\n"
"2. If you want to create a copy of persisted wallet with "
"different name, then, add this clause to "
"same command and retry: copy-as <new-wallet-name>")
return False
return True
def restoreWalletByPath(self, walletFilePath, copyAs=None, override=False):
try:
wallet = self.walletSaver.loadWallet(walletFilePath)
if copyAs:
wallet.name = copyAs
if not self.performValidationCheck(wallet, walletFilePath,
override):
return False
# As the persisted wallet restored and validated successfully,
# before we restore it, lets save active wallet (if exists)
if self._activeWallet:
self._saveActiveWallet()
self._wallets[wallet.name] = wallet
self.print('\nSaved wallet "{}" restored'.
format(wallet.name), newline=False)
self.print(" ({})".format(walletFilePath), Token.Gray)
self.activeWallet = wallet
self.activeDID = wallet.defaultId
self.printWarningIfIncompatibleWalletIsRestored(walletFilePath)
return True
except (ValueError, AttributeError) as e:
self.logger.warning(
"error occurred while restoring wallet {}: {}".
format(walletFilePath, e))
except IOError as e:
self.logger.debug("No such wallet file exists ({})".
format(walletFilePath))
def restoreLastActiveWallet(self):
baseFileName = None
try:
walletPath = self.getContextBasedWalletsBaseDir()
baseFileName = getLastSavedWalletFileName(walletPath)
self._searchAndSetWallet(os.path.join(walletPath, baseFileName))
except ValueError as e:
if not str(e) == "max() arg is an empty sequence":
self.errorDuringRestoringLastActiveWallet(baseFileName, e)
except Exception as e:
self.errorDuringRestoringLastActiveWallet(baseFileName, e)
def errorDuringRestoringLastActiveWallet(self, baseFileName, e):
self.logger.warning("Error occurred during restoring last "
"active wallet ({}), error: {}".
format(baseFileName, str(e)))
raise e
def restoreWalletByName(self, walletFileName, copyAs=None, override=False):
walletFilePath = getWalletFilePath(
self.getContextBasedWalletsBaseDir(), walletFileName)
self.restoreWalletByPath(
walletFilePath, copyAs=copyAs, override=override)
@staticmethod
def getWalletKeyName(walletFileName):
return walletFileName.replace(
".{}".format(WALLET_FILE_EXTENSION), "")
@staticmethod
def getPromptAndEnv(cliName, currPromptText):
if PROMPT_ENV_SEPARATOR not in currPromptText:
return cliName, NO_ENV
else:
return currPromptText.rsplit(PROMPT_ENV_SEPARATOR, 1)
def getActiveWalletPersitentFileName(self):
fileName = self._activeWallet.name if self._activeWallet \
else self.name
return normalizedWalletFileName(fileName)
@property
def walletFileName(self):
return self.getActiveWalletPersitentFileName()
def getNoEnvWalletsBaseDir(self):
return os.path.expanduser(
os.path.join(self.getWalletsBaseDir(), NO_ENV))
def getWalletsBaseDir(self):
return os.path.expanduser(os.path.join(self.basedirpath,
self.config.walletsDir))
def getContextBasedWalletsBaseDir(self):
walletsBaseDir = self.getWalletsBaseDir()
prompt, envName = Cli.getPromptAndEnv(self.name,
self.currPromptText)
envWalletDir = walletsBaseDir
if envName != "":
envWalletDir = os.path.join(walletsBaseDir, envName)
return envWalletDir
def isAnyWalletFileExistsForGivenEnv(self, env):
walletPath = self.getWalletsBaseDir()
envWalletPath = os.path.join(walletPath, env)
pattern = "{}/*.{}".format(envWalletPath, WALLET_FILE_EXTENSION)
return self.isAnyWalletFileExistsForGivenContext(pattern)
def isAnyWalletFileExistsForGivenContext(self, pattern):
# TODO move that to WalletStorageHelper
files = glob.glob(pattern)
if files:
return True
else:
return False
def isAnyWalletFileExistsForCurrentContext(self):
walletPath = self.getContextBasedWalletsBaseDir()
pattern = "{}/*.{}".format(walletPath, WALLET_FILE_EXTENSION)
return self.isAnyWalletFileExistsForGivenContext(pattern)
@property
def getActiveEnv(self):
return None
def updateEnvNameInWallet(self):
pass
def performCompatibilityCheckBeforeSave(self):
if self._activeWallet.getEnvName != self.getActiveEnv:
walletEnvName = self._activeWallet.getEnvName \
if self._activeWallet.getEnvName else "a different"
currEnvName = " ({})".format(self.getActiveEnv) \
if self.getActiveEnv else ""
self.print("Active wallet belongs to '{}' environment and can't "
"be saved to the current environment{}.".
format(walletEnvName, currEnvName),
Token.BoldOrange)
return False
return True
def _saveActiveWalletInDir(self, contextDir, printMsgs=True):
try:
walletFilePath = self.walletSaver.saveWallet(
self._activeWallet,
getWalletFilePath(contextDir, self.walletFileName))
if printMsgs:
self.print('Active wallet "{}" saved'.format(
self._activeWallet.name), newline=False)
self.print(' ({})'.format(walletFilePath), Token.Gray)
except IOError as ex:
self.logger.info("Error occurred while saving wallet. " +
"error no.{}, error.{}"
.format(ex.errno, ex.strerror))
def _saveActiveWallet(self):
if self._activeWallet:
# We would save wallet only if user already has a wallet
# otherwise our access for `activeWallet` property
# will create a wallet
self.updateEnvNameInWallet()
if not self.performCompatibilityCheckBeforeSave():
return False
walletsDir = self.getContextBasedWalletsBaseDir()
self._saveActiveWalletInDir(walletsDir, printMsgs=True)
def mask_seed(self, cmd_text):
parts = cmd_text.split()
prev_seed = False
for idx, val in enumerate(parts):
if prev_seed:
parts[idx] = "[redacted]"
prev_seed = (val == "seed")
return " ".join(parts)
def parse(self, cmdText):
cmdText = cmdText.strip()
m = self.grammar.match(cmdText)
# noinspection PyProtectedMember
if m and len(m.variables()._tuples):
matchedVars = m.variables()
self.logger.info(
"CLI command entered: {}".format(
self.mask_seed(cmdText)), extra={
"cli": False})
for action in self.actions:
r = action(matchedVars)
if r:
break
else:
self.invalidCmd(cmdText)
else:
if cmdText != "":
self.invalidCmd(cmdText)
@staticmethod
def createEntities(name: str, moreNames: str, matchedVars, initializer):
entity = matchedVars.get(name)
more = matchedVars.get(moreNames)
more = more.split(',') if more is not None and len(more) > 0 else []
names = [n for n in [entity] + more if len(n) != 0]
seed = matchedVars.get("seed")
DID = matchedVars.get("nym")
if len(names) == 1 and (seed or DID):
initializer(names[0].strip(), seed=seed, identifier=DID)
else:
for name in names:
initializer(name.strip())
def invalidCmd(self, cmdText):
matchedHelpMsg = self.getMatchedHelpableMsg(cmdText)
if matchedHelpMsg:
self.print("Invalid syntax: '{}'".format(cmdText))
self.print(str(matchedHelpMsg))
else:
self.print("Invalid command: '{}'".format(cmdText))
self.printHelp()
# def nextAvailableClientAddr(self, curClientPort=8100):
# self.curClientPort = self.curClientPort or curClientPort
# # TODO: Find a better way to do this
# self.curClientPort += random.randint(1, 200)
# host = "0.0.0.0"
# try:
# checkPortAvailable((host, self.curClientPort))
# assert not isPortUsed(self.basedirpath, self.curClientPort), \
# "Port used by a remote"
# return host, self.curClientPort
# except Exception as ex:
# tokens = [(Token.Error, "Cannot bind to port {}: {}, "
# "trying another port.\n".
# format(self.curClientPort, ex))]
# self.printTokens(tokens)
# return self.nextAvailableClientAddr(self.curClientPort)
@property
def hasAnyKey(self):
if not self._activeWallet or not self._activeWallet.defaultId:
self.printNoKeyMsg()
return False
return True
def printNoKeyMsg(self):
self.print("No key present in wallet")
self.printUsage(("new key [with seed <32 byte string>]", ))
def printUsage(self, msgs):
self.print("\nUsage:")
for m in msgs:
self.print(' {}'.format(m))
self.print("\n")
# TODO: Do we keep this? What happens when we allow the CLI to connect
# to remote nodes?
def cleanUp(self):
dataPath = os.path.join(self.basedirpath, "data")
try:
shutil.rmtree(dataPath, ignore_errors=True)
except FileNotFoundError:
pass
def __hash__(self):
return hash((self.name, self.unique_name, self.basedirpath))
def __eq__(self, other):
return (self.name, self.unique_name, self.basedirpath) == \
(other.name, self.unique_name, other.basedirpath)
class Exit(Exception):
pass
| 39.974172
| 119
| 0.573104
|
7bc4ffdff2074a7dd1a2664af7adbe879862ba32
| 1,769
|
py
|
Python
|
django_project/billing/views.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
django_project/billing/views.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
django_project/billing/views.py
|
aliyaandabekova/DJANGO_PROJECT
|
7b94f80fa56acf936da014aa5d91da79457bf4eb
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
from .forms import CardCreateForm,PayForm, TransactionForm
from .models import Card
def cardCreate(request):
form = CardCreateForm(initial={'profile':request.user.profile})
if request.method == 'POST':
form = CardCreateForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse('Card create successfully!')
return render(request,'card_form.html',{'form':form})
def incrementBalance(request):
form = PayForm()
if request.method == 'POST':
form = PayForm(request.POST)
if form.is_valid():
amount = form.cleaned_data.get('amount')
profile = request.user.profile
card = Card.objects.get(profile=profile)
if card.balance >= amount:
card.balance -= amount
profile.wallet += amount
card.save()
profile.save()
return render(request,'pay_form.html',{'form':form})
def transactionPage(request):
form = TransactionForm
if request.method == 'POST':
form = TransactionForm(request.POST)
if form.is_valid():
from_profile = request.user.profile
to_profile = form.instance.to_profile
amount = form.instance.amount
if from_profile is not to_profile:
if from_profile.wallet >= amount:
from_profile.wallet -= amount
to_profile.wallet += amount
from_profile.save()
to_profile.save()
form.instance.from_profile = from_profile
form.save()
return render(request,'transaction_form.html',{'form':form})
| 38.456522
| 67
| 0.600339
|
09bdaf9fa0a6c7f037e16c29f3c31d565ef27bd2
| 2,165
|
py
|
Python
|
azure-mgmt-batchai/azure/mgmt/batchai/models/file.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-batchai/azure/mgmt/batchai/models/file.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-batchai/azure/mgmt/batchai/models/file.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class File(Model):
"""Properties of the file or directory.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the file.
:vartype name: str
:ivar file_type: Contains information about file type. Possible values
include: 'file', 'directory'
:vartype file_type: str or ~azure.mgmt.batchai.models.FileType
:ivar download_url: Will contain an URL to download the corresponding
file. The downloadUrl is not returned for directories.
:vartype download_url: str
:ivar last_modified: The time at which the file was last modified. The
time at which the file was last modified.
:vartype last_modified: datetime
:ivar content_length: The file size. The file size.
:vartype content_length: long
"""
_validation = {
'name': {'readonly': True},
'file_type': {'readonly': True},
'download_url': {'readonly': True},
'last_modified': {'readonly': True},
'content_length': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'file_type': {'key': 'fileType', 'type': 'str'},
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'content_length': {'key': 'properties.contentLength', 'type': 'long'},
}
def __init__(self, **kwargs):
super(File, self).__init__(**kwargs)
self.name = None
self.file_type = None
self.download_url = None
self.last_modified = None
self.content_length = None
| 36.694915
| 80
| 0.609238
|
122e4183e2a98897ac0a71cf75fa9fdf7e7e3295
| 586
|
py
|
Python
|
var/spack/repos/builtin/packages/sandbox/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/sandbox/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/sandbox/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Sandbox(AutotoolsPackage):
"""sandbox'd LD_PRELOAD hack by Gentoo Linux"""
homepage = "https://www.gentoo.org/proj/en/portage/sandbox/"
url = "https://dev.gentoo.org/~mgorny/dist/sandbox-2.12.tar.xz"
version('2.12', sha256='265a490a8c528237c55ad26dfd7f62336fa5727c82358fc9cfbaa2e52c47fc50')
depends_on('gawk', type='build')
| 32.555556
| 94
| 0.737201
|
c9afbd465f28e2e08b6f5e71619f7d7ded93e8ad
| 2,273
|
py
|
Python
|
test/distributed/fsdp/test_fsdp_multiple_forward.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 2
|
2020-03-13T06:57:49.000Z
|
2020-05-17T04:18:14.000Z
|
test/distributed/fsdp/test_fsdp_multiple_forward.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | 1
|
2019-07-23T15:23:32.000Z
|
2019-07-23T15:32:23.000Z
|
test/distributed/fsdp/test_fsdp_multiple_forward.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | 2
|
2019-07-23T14:37:31.000Z
|
2019-07-23T14:47:13.000Z
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn import Linear, Module
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
get_full_params,
)
from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Model(Module):
def __init__(self, wrap_fsdp):
super().__init__()
# keep everything deterministic for model initialization
torch.manual_seed(0)
self.inner = Linear(4, 4)
if wrap_fsdp:
self.inner = FSDP(self.inner)
self.outer = Linear(4, 5)
def forward(self, x):
# Forward twice.
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
class TestMultiForward(FSDPTest):
def _dist_train(self, wrap_fsdp):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp).cuda()
if wrap_fsdp:
model = FSDP(model)
else:
model = DistributedDataParallel(model, device_ids=[self.rank])
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
return get_full_params(model)
return list(model.parameters())
@skip_if_lt_x_gpu(2)
def test_multi_forward(self):
# DDP
ddp_state = self._dist_train(wrap_fsdp=False)
# FSDP
fsdp_state = self._dist_train(wrap_fsdp=True)
self.assertEqual(ddp_state, fsdp_state)
if __name__ == "__main__":
run_tests()
| 26.430233
| 82
| 0.650242
|
6b5672afad589a19b1252b223067f45d787a4eb3
| 30
|
py
|
Python
|
quel/__init__.py
|
eppingere/hackcmu18-backend
|
696c050c4ce5acdf49aeaeeaded730a33443f5bd
|
[
"MIT"
] | 2
|
2018-09-22T00:18:06.000Z
|
2018-09-23T04:49:29.000Z
|
quel/__init__.py
|
eppingere/hackcmu18-backend
|
696c050c4ce5acdf49aeaeeaded730a33443f5bd
|
[
"MIT"
] | null | null | null |
quel/__init__.py
|
eppingere/hackcmu18-backend
|
696c050c4ce5acdf49aeaeeaded730a33443f5bd
|
[
"MIT"
] | null | null | null |
from .sort import sort_a_list
| 15
| 29
| 0.833333
|
3d746a2ab4eeb7e97cb109141356c26daa107ce7
| 221
|
py
|
Python
|
frappe/patches/v13_0/disable_system_update_notification.py
|
fproldan/frappe
|
7547bb04d7375b546d9662899dd13c31b8ecc3fb
|
[
"MIT"
] | null | null | null |
frappe/patches/v13_0/disable_system_update_notification.py
|
fproldan/frappe
|
7547bb04d7375b546d9662899dd13c31b8ecc3fb
|
[
"MIT"
] | 17
|
2021-03-22T18:47:14.000Z
|
2022-03-15T12:21:00.000Z
|
frappe/patches/v13_0/disable_system_update_notification.py
|
fproldan/frappe
|
7547bb04d7375b546d9662899dd13c31b8ecc3fb
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("core", "doctype", "system_settings")
frappe.db.set_value('System Settings', None, "disable_system_update_notification", 1)
| 27.625
| 89
| 0.769231
|
7b9567176c3066c9795afed144ab00601d9bc24d
| 83
|
py
|
Python
|
template.py
|
byarmis/AdventOfCode
|
9c91808c2ea06d49f7e726779ac44918a99136f0
|
[
"Unlicense"
] | 3
|
2020-08-05T10:18:59.000Z
|
2022-01-19T08:28:16.000Z
|
template.py
|
byarmis/AdventOfCode
|
9c91808c2ea06d49f7e726779ac44918a99136f0
|
[
"Unlicense"
] | 2
|
2016-03-24T15:28:51.000Z
|
2019-12-10T03:54:47.000Z
|
template.py
|
byarmis/AdventOfCode
|
9c91808c2ea06d49f7e726779ac44918a99136f0
|
[
"Unlicense"
] | 4
|
2020-08-19T05:06:16.000Z
|
2021-02-03T09:53:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
pass
| 13.833333
| 26
| 0.566265
|
5cd9fe69482202e3241424e9da5b38564d6f8200
| 2,467
|
py
|
Python
|
aiida/utils/capturing.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/utils/capturing.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/utils/capturing.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | 1
|
2018-12-21T11:10:09.000Z
|
2018-12-21T11:10:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from cStringIO import StringIO
import sys
class Capturing(object):
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
def __init__(self, capture_stderr=False):
self.stdout_lines = list()
super(Capturing, self).__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = list()
else:
self.stderr_lines = None
def __enter__(self):
self._stdout = sys.stdout
self._stringioout = StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
| 35.753623
| 79
| 0.568707
|
3ed802774b9a1329007a9dc332e1a02e8db62687
| 914
|
py
|
Python
|
Python/Algorithms/Dynamic Programming/MultiStageGraph.py
|
arghyadeep99/DS_Algorithms
|
ac1ea351204f8cf37c41033e40338270f042118e
|
[
"MIT"
] | 2
|
2019-03-21T04:41:05.000Z
|
2019-05-09T05:01:03.000Z
|
Python/Algorithms/Dynamic Programming/MultiStageGraph.py
|
arghyadeep99/DS_Algorithms
|
ac1ea351204f8cf37c41033e40338270f042118e
|
[
"MIT"
] | null | null | null |
Python/Algorithms/Dynamic Programming/MultiStageGraph.py
|
arghyadeep99/DS_Algorithms
|
ac1ea351204f8cf37c41033e40338270f042118e
|
[
"MIT"
] | null | null | null |
from prettytable import PrettyTable
pr1=PrettyTable()
stages,minimum,n=4,float('inf'),8
cost,d,path,=[0 for i in range(9)],[0 for i in range(9)],[0 for i in range(9)]
graph=[[0,0,0,0,0,0,0,0,0],[0,0,2,1,3,0,0,0,0],[0,0,0,0,0,2,3,0,0],[0,0,0,0,0,6,7,0,0],[0,0,0,0,0,6,8,9,0],[0,0,0,0,0,0,0,0,6],[0,0,0,0,0,0,0,0,4],[0,0,0,0,0,0,0,0,5],[0,0,0,0,0,0,0,0,0]]
for i in range(n-1,0,-1):
minimum=float('inf')
for k in range(i+1,n+1):
if graph[i][k]!=0 and graph[i][k]+cost[k]<minimum:
minimum=graph[i][k]+cost[k]
d[i]=k
cost[i]=minimum
pr1.field_names=['Vertex','Cost','Destination']
zipped=list(zip(list(i for i in range(9)), cost,d))
for row in zipped:
pr1.add_row(row)
print (pr1.get_string(header=True, border=True))
path[1],path[stages]=1,n
for i in range(2,stages):
path[i]=d[path[i-1]]
print('The path is: ')
for i in range(1,stages):
print(path[i],'->', end=' ')
print(path[stages])
| 36.56
| 187
| 0.609409
|
0d619081bee4402576d6e915a8358efd7c42126c
| 15,078
|
py
|
Python
|
blahtex/__init__.py
|
amuramatsu/blahtex-py
|
9644ec8e4edaee707c8e7f2d3094f469cb1c3727
|
[
"BSD-3-Clause"
] | null | null | null |
blahtex/__init__.py
|
amuramatsu/blahtex-py
|
9644ec8e4edaee707c8e7f2d3094f469cb1c3727
|
[
"BSD-3-Clause"
] | null | null | null |
blahtex/__init__.py
|
amuramatsu/blahtex-py
|
9644ec8e4edaee707c8e7f2d3094f469cb1c3727
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License
#
# Copyright (c) 2020, MURAMATSU Atshshi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import _blahtex # type: ignore
import enum
import textwrap
BlahtexException = _blahtex.BlahtexException
class Blahtex(object):
'''
Usage
=====
>> from blahtex import Blahtex
>> bl = Blahtex(spacing=Blatex.SPACING.RELAXED)
>> bl.indented = True
>> bl.convert(r'\sqrt{3} * \pi')
'<math xmlns="http://www.w3.org/1998/Math/MathML" display="inline"><mrow><msqrt><mn>3</mn></msqrt><mo>*</mo><mi>π</mi></mrow></math>'
Options
-------
The options of blahtex are appeared by method variables of Blatex object.
indented: bool
Output of each MathML tag on a separate line, with indenting.
Default is False.
texvc_compatibility: bool
Enables use of commands thar are specific to texvc, but that are not
standard TeX/LaTeX/AMS-LaTeX commands
Default is False.
spacing: Blatex.SPACING
Controls how much MathML spacing markup to use (i.e. ``<mspace>`` tags,
and ``lspace/rspace`` attributes). Blahtex always uses TeX's rules
(or an approximation thereof) to compute how much space to place
between symbols in the equation, but this option describes how often
it will actually emit MathML spacing markup to implement its spacing
decisions.
Blatex.SPACING.STRICT
Output spacing markup everywhere possible; leave as little choice
as possible to the MathML renderer. This will result in the most
bloated output, but hopefully will look as much like TeX output as
possible.
Blatex.SPACING.MODERATE
Output spacing commands whenever blahtex thinks a typical MathML
renderer is likely to do something visually unsatisfactory without
additional help. The aim is to get good agree- ment with TeX
without overly bloated MathML markup. (It's very difficult to get
this right, so I expect it to be under continual review.)
Blatex.SPACING.RELAXED
Only output spacing commands when the user specifically asks for
them, using TeX commands like ``\,`` or ``\quad``.
Default is Blatex.SPACING.RELAXED
The magic command ``\strictspacing`` will override this setting.
disallow_plane_1: bool
Any characters that is not placed at Unicode BMP plane is replaced by
XML numeric entries or not.
Default is False.
mathml_encoding: Blatex.ENCODING
Controls the way blahtex output MathML charaters.
Blatex.ENCODING.RAW
Output unicode characters.
Blatex.ENCODING.NUMERIC
Use XML numeric entries.
Blatex.ENCODING.SHORT
Use **short** MathML entity names.
Blatex.ENCODING.lONG
Use **long** MathML entity names.
Default is Blatex.ENCODING.RAW
other_encoding: Blatex.ENCODING
Controls the way blahtex output charaters except for ASCII/MathML
charaters.
Blatex.ENCODING.RAW
Output unicode characters.
Blatex.ENCODING.NUMERIC
Use XML numeric entries.
Default is Blatex.ENCODING.RAW
mathml_version1_fonts: bool
Forbids use of the ``mathvariant`` attribute, which is only avaiable
in MathML 2.0 or later. Instead, blahtex will use MathML version 1.x
font attributes: ``fontfamily``, ``fontstyle`` and ``fontweight``,
which are all deprecated in MathML 2.0.
Default is False.
'''
class ENCODING(enum.Enum):
RAW = 0
NUMERIC = 1
SHORT = 2
LONG = 3
class SPACING(enum.Enum):
STRICT = 0
MODERATE = 1
RELAXED = 2
def __init__(self, **opts):
'''Constructor.
You can set options by keyword arguments.
'''
super().__setattr__('_core', _blahtex.Blahtex())
super().__setattr__('_inputted', False)
o = {
"disallow_plane_1": False,
"spacing": self.SPACING.RELAXED,
"mathml_encoding": self.ENCODING.RAW,
"other_encoding": self.ENCODING.RAW,
}
o.update(opts)
self.set_options(o)
def __setattr__(self, key, value):
if key == "indented":
self._core.indented = value
elif key == "texvc_compatibility":
self._core.texvc_compatibility = value
elif key == "spacing":
if value == self.SPACING.STRICT:
v = _blahtex.MathmlOptions.SpacingControl.STRICT
elif value == self.SPACING.MODERATE:
v = _blahtex.MathmlOptions.SpacingControl.MODERATE
elif value == self.SPACING.RELAXED:
v = _blahtex.MathmlOptions.SpacingControl.RELAXED
else:
raise ValueError(
"spacing must be one of "
"Blahtex.SPACING.{STRICT,MODERATE,RELAXED}")
self._core.mathml_options.spacing_control = v
elif key == "disallow_plane_1":
self._core.mathml_options.allow_plane1 = not value
self._core.encoding_options.allow_plane1 = not value
elif key == "mathml_encoding":
if value == self.ENCODING.RAW:
v = _blahtex.EncodingOptions.MathmlEncoding.RAW
elif value == self.ENCODING.NUMERIC:
v = _blahtex.EncodingOptions.MathmlEncoding.NUMERIC
elif value == self.ENCODING.LONG:
v = _blahtex.EncodingOptions.MathmlEncoding.LONG
elif value == self.ENCODING.SHORT:
v = _blahtex.EncodingOptions.MathmlEncoding.SHORT
else:
raise ValueError(
"mathml_encoding must be one of "
"Blahtex.ENCODING.{RAW,NUMERIC,LONG,SHORT}")
self._core.encoding_options.mathml_encoding = v
elif key == "other_encoding":
if value == self.ENCODING.RAW:
v = True
elif value == self.ENCODING.NUMERIC:
v = False
else:
raise ValueError(
"other_encoding must be one of "
"Blahtex.ENCODING.{RAW,NUMERIC}")
self._core.encoding_options.other_encoding_raw = v
elif key == "mathml_version1_fonts":
self._core.mathml_options.use_version1_font_attributes = value
elif key == "use_ucs_package":
self._core.purified_tex_options.allow_ucs = value
elif key == "use_cjk_package":
self._core.purified_tex_options.allow_cjk = value
elif key == "use_preview_package":
self._core.purified_tex_options.allow_preview = value
elif key == "japanese_font":
self._core.purified_tex_options.japanese_font = value
elif key == "latex_preamble":
self._core.purified_tex_options.latex_preamble = value
elif key == "latex_before_math":
self._core.purified_tex_options.latex_before_math = value
else:
raise ValueError("Unknown attribute '{}'".format(key))
def __getattr__(self, key):
if key == "indented":
return self._core.indented
elif key == "texvc_compatibility":
return self._core.texvc_compatibility
elif key == "spacing":
v = self._core.mathml_options.spacing_control
if v == _blahtex.MathmlOptions.SpacingControl.STRICT:
return self.SPACING.STRICT
elif v == _blahtex.MathmlOptions.SpacingControl.MODERATE:
return self.SPACING.MODERATE
elif v == _blahtex.MathmlOptions.SpacingControl.RELAXED:
return self.SPACING.RELAXED
else:
raise Exception()
elif key == "disallow_plane_1":
return not self._core.mathml_options.allow_plane1
elif key == "mathml_encoding":
v = self._core.encoding_options.mathml_encoding
if v == _blahtex.EncodingOptions.MathmlEncoding.RAW:
return self.ENCODING.RAW
elif v == _blahtex.EncodingOptions.MathmlEncoding.NUMERIC:
return self.ENCODING.NUMERIC
elif v == _blahtex.EncodingOptions.MathmlEncoding.LONG:
return self.ENCODING.LONG
elif v == _blahtex.EncodingOptions.MathmlEncoding.SHORT:
return self.ENCODING.SHORT
else:
raise Exception()
elif key == "other_encoding":
if self._core.encoding_options.other_encoding_raw:
return self.ENCODING.RAW
else:
return self.ENCODING.NUMERIC
elif key == "mathml_version1_fonts":
return self._core.mathml_options.use_version1_font_attributes
elif key == "use_ucs_package":
return self._core.purified_tex_options.allow_ucs
elif key == "use_cjk_package":
return self._core.purified_tex_options.allow_cjk
elif key == "use_preview_package":
return self._core.purified_tex_options.allow_preview
elif key == "japanese_font":
return self._core.purified_tex_options.japanese_font
elif key == "latex_preamble":
return self._core.purified_tex_options.latex_preamble
elif key == "latex_before_math":
return self._core.purified_tex_options.latex_before_math
def set_options(self, *args, **kargs) -> None:
'''Set options of blahtex.
You can set options by keyword argument like as
>> bl.set_options(indented=True)
or by dictionay like as
>> opts = { 'spacing': bl.SPACING.STRICT }
>> bl.set_options(opts)
List of options is shown at docstring of this class.
'''
if len(args):
if len(args) == 1 and isinstance(args[0], dict):
opts = args[0]
else:
raise ValueError('Argument must be a dictionay '
'or keyword argments')
else:
opts = kargs
for k, v in opts.items():
setattr(self, k, v)
def get_options(self) -> dict:
'''Get all options of blahtex.
List of options is shown at docstring of this class.
Returns
-------
dict
Options
'''
result = {}
for key in ("indented", "texvc_compatibility", "spacing",
"disallow_plane_1", "mathml_encoding", "other_encoding",
"mathml_version1_fonts",
"use_ucs_package", "use_cjk_package", "use_preview_package",
"japanese_font", "latex_preamble", "latex_before_math"):
result[key] = getattr(self, key)
return result
def process_input(self, s: str, display_math: bool=False) -> None:
'''Set input TeX-string to blahtex.
Paramters
---------
s : str
TeX/LaTeX/AMS-LaTeX string. Supported commands are listed on
a document of blahtex-0.9.
display_math: bool=Flase
Input string are assumed at display-math environment. When this
argmuent is False (default), TeX-stirng is assumed at inline-math.
Raises
------
BlahtexException
If s is not recognized by blahtex.
'''
super().__setattr__('_inputted', True)
self._core.purified_tex_options.display_math = display_math
self._core.process_input(s, display_math)
def get_mathml(self) -> str:
'''Get MathML string converted by blahtex.
Returns
-------
str
MathML converted form TeX-string
Raises
------
ValueError
If no TeX-string is inputted by ``process_input()``.
'''
if not self._inputted:
raise ValueError("no TeX-string is processed")
if self._core.purified_tex_options.display_math:
display = "block"
else:
display = "inline"
head = ('<math xmlns="http://www.w3.org/1998/Math/MathML" ' +
'display="{}">'.format(display))
body = self._core.get_mathml()
if self.indented:
return head + "\n" + textwrap.indent(body, " ") + "</math>\n"
return head + body + "</math>"
def get_purified_tex(self) -> str:
if not self._inputted:
raise ValueError("no TeX-string is processed")
return self._core.get_purified_tex()
def get_purified_tex_only(self) -> str:
if not self._inputted:
raise ValueError("no TeX-string is processed")
return self._core.get_purified_tex_only()
def convert(self, latex: str, display_math: bool=False) -> str:
'''Convert TeX-string to MathML.
Paramters
---------
s : str
TeX/LaTeX/AMS-LaTeX string. Supported commands are listed on
a document of blahtex-0.9.
display_math: bool=Flase
Input string are assumed at display-math environment. When this
argmuent is False (default), TeX-stirng is assumed at inline-math.
Returns
-------
str
MathML converted form TeX-string
Raises
------
BlahtexException
If s is not recognized by blahtex.
'''
self.process_input(latex, display_math)
return self.get_mathml()
| 38.366412
| 137
| 0.616262
|
ca6b9333870c2bdac870bc495bdcfab6490e60c8
| 962
|
py
|
Python
|
final_project/run.py
|
sabbir420/Automation-with-Python
|
c689f3132ae2c55e5f5e46539e98e17547463d54
|
[
"MIT"
] | null | null | null |
final_project/run.py
|
sabbir420/Automation-with-Python
|
c689f3132ae2c55e5f5e46539e98e17547463d54
|
[
"MIT"
] | null | null | null |
final_project/run.py
|
sabbir420/Automation-with-Python
|
c689f3132ae2c55e5f5e46539e98e17547463d54
|
[
"MIT"
] | 2
|
2020-10-04T06:55:18.000Z
|
2022-03-04T19:31:39.000Z
|
#! /usr/bin/env python3
import os, glob
import requests
text_files = glob.glob("/home/student-04-e1a4e4b25306/supplier-data/descriptions/*.txt")
keys = ["name", "weight", "description","image_name"]
feed_list = []
#parsing through the text files
for files in text_files:
with open(files) as f:
dict = {}
reader = f.read().split("\n")
for i in range(3):
dict.update({keys[i] : reader[i]}) #append values to the dictionary
files = os.path.basename(files)
img = files.replace(".txt",".jpeg" )
dict.update({keys[3] : img}) #appending the image files
feed_list.append(dict) #creating a list of dictionary
#convert the weight value to integer
for keys in feed_list:
i = keys["weight"][:3]
keys["weight"] = i
for keys in feed_list:
keys["weight"] = int(keys["weight"])
#feed data to the website
url = "http://127.0.0.1/fruits/"
for i in range(len(feed_list)):
response = requests.post(url, json=feed_list[i])
response.raise_for_status()
| 27.485714
| 88
| 0.691268
|
54dd527fc7920703a50c2a0f7eaeb4d0e86218dd
| 3,258
|
py
|
Python
|
src/om_aiv_navigation/om_aiv_navigation/localize_at_point.py
|
zach-goh/Omron_AMR_ROS2
|
50d98b31cd1d9a1e694a92c3f59d7f173ecd5a52
|
[
"BSD-3-Clause"
] | 2
|
2022-01-04T02:55:51.000Z
|
2022-03-18T05:43:33.000Z
|
src/om_aiv_navigation/om_aiv_navigation/localize_at_point.py
|
zach-goh/Omron_AMR_ROS2
|
50d98b31cd1d9a1e694a92c3f59d7f173ecd5a52
|
[
"BSD-3-Clause"
] | 3
|
2021-08-25T13:56:12.000Z
|
2021-10-13T00:51:14.000Z
|
src/om_aiv_navigation/om_aiv_navigation/localize_at_point.py
|
zach-goh/Omron_AMR_ROS2
|
50d98b31cd1d9a1e694a92c3f59d7f173ecd5a52
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
from __future__ import print_function
import rclpy
import sys
import math
from rclpy.action import ActionClient
from rclpy.node import Node
from om_aiv_msg.action import Action
from geometry_msgs.msg import PoseWithCovarianceStamped
LOCALIZE_TO_POINT_COMMAND = "localizetopoint "
INITIAL_POSE_TOPIC = "initialpose"
class LocalizeAtPoint(Node):
# initializes class as action client to ARCL action server
def __init__(self):
super().__init__("action_client")
self._action_client = ActionClient(self, Action, 'action_server')
self.subscription = self.create_subscription(PoseWithCovarianceStamped, INITIAL_POSE_TOPIC, self.subscription_callback, 10)
# callback for subscription to "goal_pose"
def subscription_callback(self, msg):
position = msg.pose.pose.position
orientation = msg.pose.pose.orientation
degree = self.euler_from_quaternion(orientation.w, orientation.x, orientation.y, orientation.z)[2]
degree *= 57.296
# ensure that AMR does not over-turn
if degree > 180:
degree -=360
localize_to_point_coordinates = str(int(position.x*1000)) + " " + str(int(position.y*1000)) + " " + str(int(degree))
# 0 0 for xySpread and angleSpread
localize_to_point_coordinates = localize_to_point_coordinates + " 0 0"
self.send_goto_point(localize_to_point_coordinates)
def send_goto_point(self, coords):
self.command = LOCALIZE_TO_POINT_COMMAND + coords
self._action_client.wait_for_server()
self.goal = Action.Goal()
self.goal.command = self.command
self.goal.identifier = ["Localizing at point"]
self._future = self._action_client.send_goal_async(self.goal, feedback_callback=self.feedback_callback)
self._future.add_done_callback(self.response_callback)
def response_callback(self, future):
if not future.result().accepted:
self.get_logger().info('Goal Rejected!')
return
return ("Goal:" + self.command)
def feedback_callback(self, feedback_msg):
feedback = feedback_msg.feedback.feed_msg
self.get_logger().info(feedback)
def euler_from_quaternion(self, rw, rx, ry, rz):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (rw * rx + ry * rz)
t1 = +1.0 - 2.0 * (rx * rx + ry * ry)
roll_x = round(math.atan2(t0, t1), 5)
t2 = +2.0 * (rw * ry - rz * rx)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = round(math.asin(t2), 5)
t3 = +2.0 * (rw * rz + rx * ry)
t4 = +1.0 - 2.0 * (ry * ry + rz * rz)
yaw_z = round(math.atan2(t3, t4), 5)
return ([roll_x, pitch_y, yaw_z]) # in meters and radians
def main(args=None):
rclpy.init(args=args)
action_client = LocalizeAtPoint()
rclpy.spin(action_client)
if __name__ == '__main__':
main()
| 38.329412
| 131
| 0.645795
|
b0b51bc9ad66546a0a7195c35e605de582131d48
| 1,435
|
py
|
Python
|
homework_03/18_stelian_todorichkov/test_circle.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | 7
|
2021-10-05T14:54:55.000Z
|
2022-02-16T06:07:12.000Z
|
homework_03/18_stelian_todorichkov/test_circle.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | 2
|
2021-12-04T10:49:46.000Z
|
2022-02-28T06:09:06.000Z
|
homework_03/18_stelian_todorichkov/test_circle.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | null | null | null |
from circle import Circle, Point, RealativePosition
def test_same():
circle1 = Circle(Point(0.0, 0.0), 5.0)
circle2 = Circle(Point(0.0, 0.0), 5.0)
assert circle1.find_relative_position(circle2) == RealativePosition.SAME
def test_intersecting():
circle1 = Circle(Point(0.0, 0.0), 2.0)
circle2 = Circle(Point(5.0, 0.0), 5.0)
assert circle1.find_relative_position(circle2) == RealativePosition.INTERSECTING
def test_touching():
#outside
circle1 = Circle(Point(-1.0, 1.0), 3.0)
circle2 = Circle(Point(3.0, -2.0), 2.0)
assert circle1.find_relative_position(circle2) == RealativePosition.TOUCHING
#inside
circle1 = Circle(Point(-1.0, 0.0), 3.0)
circle2 = Circle(Point(3.0, -3.0), 8.0)
assert circle1.find_relative_position(circle2) == RealativePosition.TOUCHING
def test_no_common_points():
#completly outside
circle1 = Circle(Point(0.0, 0.0), 3.0)
circle2 = Circle(Point(5.0, 0.0), 9.0)
assert circle1.find_relative_position(circle2) == RealativePosition.NO_COMMON_POINTS
#common ceter
circle1 = Circle(Point(0.0, 0.0), 3.0)
circle2 = Circle(Point(0.0, 0.0), 9.0)
assert circle1.find_relative_position(circle2) == RealativePosition.NO_COMMON_POINTS
#completly inside
circle1 = Circle(Point(0.0, 0.0), 9.0)
circle2 = Circle(Point(1.0, 0.0), 3.0)
assert circle1.find_relative_position(circle2) == RealativePosition.NO_COMMON_POINTS
| 37.763158
| 88
| 0.696864
|
b094a21dba009c5bdc56ed5155ffc215a06eb6eb
| 2,219
|
py
|
Python
|
data/db/distribute_db_import.py
|
saikatgomes/recsys
|
6bbc831ec87fdb0ea01112d6cfe22676577c029b
|
[
"Apache-2.0"
] | 1
|
2017-03-08T07:53:25.000Z
|
2017-03-08T07:53:25.000Z
|
data/db/distribute_db_import.py
|
saikatgomes/recsys
|
6bbc831ec87fdb0ea01112d6cfe22676577c029b
|
[
"Apache-2.0"
] | null | null | null |
data/db/distribute_db_import.py
|
saikatgomes/recsys
|
6bbc831ec87fdb0ea01112d6cfe22676577c029b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys,glob,io,requests,json,time,datetime,os.path,socket,random,re
from lxml import html
from random import shuffle, randint
BASE_URL="http://imdb.com"
DATA_DIR="../data/tweet"
OUT_DIR="../data/imdb/parts"
RATING_ONLY=0;
C_COUNT=0
M_COUNT=0
MOVIES=[]
# current time for logging
def getTime(f=1):
ts = time.time()
fmt='%Y-%m-%d--%H-%M-%S-%f'
dt=""
if f==0:
dt = datetime.datetime.fromtimestamp(ts).strftime(fmt)
else:
dt = datetime.datetime.fromtimestamp(ts).strftime(fmt)+"|"+socket.gethostname()+"|"+str(C_COUNT)+"|"+str(M_COUNT)+"|"
return dt
def process():
DIR="../imdb/parts"
fileList=glob.glob(DIR+'/*.json')
ln=len(fileList)
idx=range(0,ln)
shuffle(idx)
for i in range(0,ln):
aFile=fileList[idx[i]]
f_num=aFile[aFile.find("_")+1:aFile.find(".json")]
out_file=DIR+"/movies_"+f_num+".db_done"
print("SRG: outfile->"+out_file)
if os.path.isfile(out_file):
print(getTime(1)+"Aleady processed "+aFile)
else:
lockfile=aFile+".lock"
#print lockfile
if os.path.isfile(lockfile):
with open(lockfile,'r') as lk:
msg=lk.read()
print(getTime(1)+aFile+" is "+msg)
else:
with open(lockfile,"w") as lk:
lk.write("Currently proccessed by "+socket.gethostname())
print(getTime(1)+"Processing "+aFile)
try:
#get_movie_url(aFile,out_file)
#CALL YOUR FUNCTION HERE
#ONCE IT IS DONE IMPORTING CREATE out_file
print("WILL BE PROCESSING "+aFile)
except:
print "ERRR"
e=sys.exc_info()[0]
print e
fail_file=DIR+"/movies_"+f_num+".db_fail"
with open(fail_file,"w") as fl:
fl.write("Failed at "+socket.gethostname())
fl.write("ERROR:")
fl.write(e)
os.remove(lockfile)
if __name__ == "__main__":
process()
#get_movie_url()
| 29.986486
| 125
| 0.525913
|
9de12a355fb2013533bbb7541ce208323ca679cd
| 18,861
|
py
|
Python
|
vcr/patch.py
|
Smosker/vcrpy
|
a56a0726d4f325f963696087d83c82b78e2a3464
|
[
"MIT"
] | null | null | null |
vcr/patch.py
|
Smosker/vcrpy
|
a56a0726d4f325f963696087d83c82b78e2a3464
|
[
"MIT"
] | null | null | null |
vcr/patch.py
|
Smosker/vcrpy
|
a56a0726d4f325f963696087d83c82b78e2a3464
|
[
"MIT"
] | 2
|
2017-12-14T07:59:31.000Z
|
2017-12-26T10:07:55.000Z
|
'''Utilities for patching in cassettes'''
import functools
import itertools
from .compat import contextlib, mock
from .stubs import VCRHTTPConnection, VCRHTTPSConnection
from six.moves import http_client as httplib
# Save some of the original types for the purposes of unpatching
_HTTPConnection = httplib.HTTPConnection
_HTTPSConnection = httplib.HTTPSConnection
# Try to save the original types for boto3
try:
import botocore.vendored.requests.packages.urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
pass
else:
_Boto3VerifiedHTTPSConnection = cpool.VerifiedHTTPSConnection
_cpoolBoto3HTTPConnection = cpool.HTTPConnection
_cpoolBoto3HTTPSConnection = cpool.HTTPSConnection
cpool = None
# Try to save the original types for urllib3
try:
import urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
pass
else:
_VerifiedHTTPSConnection = cpool.VerifiedHTTPSConnection
_cpoolHTTPConnection = cpool.HTTPConnection
_cpoolHTTPSConnection = cpool.HTTPSConnection
# Try to save the original types for requests
try:
if not cpool:
import requests.packages.urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
pass
else:
_VerifiedHTTPSConnection = cpool.VerifiedHTTPSConnection
_cpoolHTTPConnection = cpool.HTTPConnection
_cpoolHTTPSConnection = cpool.HTTPSConnection
# Try to save the original types for httplib2
try:
import httplib2
except ImportError: # pragma: no cover
pass
else:
_HTTPConnectionWithTimeout = httplib2.HTTPConnectionWithTimeout
_HTTPSConnectionWithTimeout = httplib2.HTTPSConnectionWithTimeout
_SCHEME_TO_CONNECTION = httplib2.SCHEME_TO_CONNECTION
# Try to save the original types for boto
try:
import boto.https_connection
except ImportError: # pragma: no cover
pass
else:
_CertValidatingHTTPSConnection = boto.https_connection.CertValidatingHTTPSConnection
# Try to save the original types for Tornado
try:
import tornado.simple_httpclient
except ImportError: # pragma: no cover
pass
else:
_SimpleAsyncHTTPClient_fetch_impl = \
tornado.simple_httpclient.SimpleAsyncHTTPClient.fetch_impl
try:
import tornado.curl_httpclient
except ImportError: # pragma: no cover
pass
else:
_CurlAsyncHTTPClient_fetch_impl = \
tornado.curl_httpclient.CurlAsyncHTTPClient.fetch_impl
try:
import aiohttp.client
except ImportError: # pragma: no cover
pass
else:
_AiohttpClientSessionRequest = aiohttp.client.ClientSession._request
class CassettePatcherBuilder(object):
def _build_patchers_from_mock_triples_decorator(function):
@functools.wraps(function)
def wrapped(self, *args, **kwargs):
return self._build_patchers_from_mock_triples(
function(self, *args, **kwargs)
)
return wrapped
def __init__(self, cassette):
self._cassette = cassette
self._class_to_cassette_subclass = {}
def build(self):
return itertools.chain(
self._httplib(), self._requests(), self._boto3(), self._urllib3(),
self._httplib2(), self._boto(), self._tornado(), self._aiohttp(),
self._build_patchers_from_mock_triples(
self._cassette.custom_patches
),
)
def _build_patchers_from_mock_triples(self, mock_triples):
for args in mock_triples:
patcher = self._build_patcher(*args)
if patcher:
yield patcher
def _build_patcher(self, obj, patched_attribute, replacement_class):
if not hasattr(obj, patched_attribute):
return
return mock.patch.object(obj, patched_attribute,
self._recursively_apply_get_cassette_subclass(
replacement_class))
def _recursively_apply_get_cassette_subclass(self, replacement_dict_or_obj):
"""One of the subtleties of this class is that it does not directly
replace HTTPSConnection with `VCRRequestsHTTPSConnection`, but a
subclass of the aforementioned class that has the `cassette`
class attribute assigned to `self._cassette`. This behavior is
necessary to properly support nested cassette contexts.
This function exists to ensure that we use the same class
object (reference) to patch everything that replaces
VCRRequestHTTP[S]Connection, but that we can talk about
patching them with the raw references instead, and without
worrying about exactly where the subclass with the relevant
value for `cassette` is first created.
The function is recursive because it looks in to dictionaries
and replaces class values at any depth with the subclass
described in the previous paragraph.
"""
if isinstance(replacement_dict_or_obj, dict):
for key, replacement_obj in replacement_dict_or_obj.items():
replacement_obj = self._recursively_apply_get_cassette_subclass(
replacement_obj)
replacement_dict_or_obj[key] = replacement_obj
return replacement_dict_or_obj
if hasattr(replacement_dict_or_obj, 'cassette'):
replacement_dict_or_obj = self._get_cassette_subclass(
replacement_dict_or_obj)
return replacement_dict_or_obj
def _get_cassette_subclass(self, klass):
if klass.cassette is not None:
return klass
if klass not in self._class_to_cassette_subclass:
subclass = self._build_cassette_subclass(klass)
self._class_to_cassette_subclass[klass] = subclass
return self._class_to_cassette_subclass[klass]
def _build_cassette_subclass(self, base_class):
bases = (base_class,)
if not issubclass(base_class, object): # Check for old style class
bases += (object,)
return type('{0}{1}'.format(base_class.__name__, self._cassette._path),
bases, dict(cassette=self._cassette))
@_build_patchers_from_mock_triples_decorator
def _httplib(self):
yield httplib, 'HTTPConnection', VCRHTTPConnection
yield httplib, 'HTTPSConnection', VCRHTTPSConnection
def _requests(self):
try:
from .stubs import requests_stubs
except ImportError: # pragma: no cover
return ()
return self._urllib3_patchers(cpool, requests_stubs)
def _boto3(self):
try:
import botocore.vendored.requests.packages.urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
return ()
from .stubs import boto3_stubs
return self._urllib3_patchers(cpool, boto3_stubs)
def _patched_get_conn(self, connection_pool_class, connection_class_getter):
get_conn = connection_pool_class._get_conn
@functools.wraps(get_conn)
def patched_get_conn(pool, timeout=None):
connection = get_conn(pool, timeout)
connection_class = (
pool.ConnectionCls if hasattr(pool, 'ConnectionCls')
else connection_class_getter())
# We need to make sure that we are actually providing a
# patched version of the connection class. This might not
# always be the case because the pool keeps previously
# used connections (which might actually be of a different
# class) around. This while loop will terminate because
# eventually the pool will run out of connections.
while not isinstance(connection, connection_class):
connection = get_conn(pool, timeout)
return connection
return patched_get_conn
def _patched_new_conn(self, connection_pool_class, connection_remover):
new_conn = connection_pool_class._new_conn
@functools.wraps(new_conn)
def patched_new_conn(pool):
new_connection = new_conn(pool)
connection_remover.add_connection_to_pool_entry(pool, new_connection)
return new_connection
return patched_new_conn
def _urllib3(self):
try:
import urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
return ()
from .stubs import urllib3_stubs
return self._urllib3_patchers(cpool, urllib3_stubs)
@_build_patchers_from_mock_triples_decorator
def _httplib2(self):
try:
import httplib2 as cpool
except ImportError: # pragma: no cover
pass
else:
from .stubs.httplib2_stubs import VCRHTTPConnectionWithTimeout
from .stubs.httplib2_stubs import VCRHTTPSConnectionWithTimeout
yield cpool, 'HTTPConnectionWithTimeout', VCRHTTPConnectionWithTimeout
yield cpool, 'HTTPSConnectionWithTimeout', VCRHTTPSConnectionWithTimeout
yield cpool, 'SCHEME_TO_CONNECTION', {'http': VCRHTTPConnectionWithTimeout,
'https': VCRHTTPSConnectionWithTimeout}
@_build_patchers_from_mock_triples_decorator
def _boto(self):
try:
import boto.https_connection as cpool
except ImportError: # pragma: no cover
pass
else:
from .stubs.boto_stubs import VCRCertValidatingHTTPSConnection
yield cpool, 'CertValidatingHTTPSConnection', VCRCertValidatingHTTPSConnection
@_build_patchers_from_mock_triples_decorator
def _tornado(self):
try:
import tornado.simple_httpclient as simple
except ImportError: # pragma: no cover
pass
else:
from .stubs.tornado_stubs import vcr_fetch_impl
new_fetch_impl = vcr_fetch_impl(
self._cassette, _SimpleAsyncHTTPClient_fetch_impl
)
yield simple.SimpleAsyncHTTPClient, 'fetch_impl', new_fetch_impl
try:
import tornado.curl_httpclient as curl
except ImportError: # pragma: no cover
pass
else:
from .stubs.tornado_stubs import vcr_fetch_impl
new_fetch_impl = vcr_fetch_impl(
self._cassette, _CurlAsyncHTTPClient_fetch_impl
)
yield curl.CurlAsyncHTTPClient, 'fetch_impl', new_fetch_impl
@_build_patchers_from_mock_triples_decorator
def _aiohttp(self):
try:
import aiohttp.client as client
except ImportError: # pragma: no cover
pass
else:
from .stubs.aiohttp_stubs import vcr_request
new_request = vcr_request(
self._cassette, _AiohttpClientSessionRequest
)
yield client.ClientSession, '_request', new_request
def _urllib3_patchers(self, cpool, stubs):
http_connection_remover = ConnectionRemover(
self._get_cassette_subclass(stubs.VCRRequestsHTTPConnection)
)
https_connection_remover = ConnectionRemover(
self._get_cassette_subclass(stubs.VCRRequestsHTTPSConnection)
)
mock_triples = (
(cpool, 'VerifiedHTTPSConnection', stubs.VCRRequestsHTTPSConnection),
(cpool, 'HTTPConnection', stubs.VCRRequestsHTTPConnection),
(cpool, 'HTTPSConnection', stubs.VCRRequestsHTTPSConnection),
(cpool, 'is_connection_dropped', mock.Mock(return_value=False)), # Needed on Windows only
(cpool.HTTPConnectionPool, 'ConnectionCls', stubs.VCRRequestsHTTPConnection),
(cpool.HTTPSConnectionPool, 'ConnectionCls', stubs.VCRRequestsHTTPSConnection),
)
# These handle making sure that sessions only use the
# connections of the appropriate type.
mock_triples += ((cpool.HTTPConnectionPool, '_get_conn',
self._patched_get_conn(cpool.HTTPConnectionPool,
lambda: cpool.HTTPConnection)),
(cpool.HTTPSConnectionPool, '_get_conn',
self._patched_get_conn(cpool.HTTPSConnectionPool,
lambda: cpool.HTTPSConnection)),
(cpool.HTTPConnectionPool, '_new_conn',
self._patched_new_conn(cpool.HTTPConnectionPool,
http_connection_remover)),
(cpool.HTTPSConnectionPool, '_new_conn',
self._patched_new_conn(cpool.HTTPSConnectionPool,
https_connection_remover)))
return itertools.chain(self._build_patchers_from_mock_triples(mock_triples),
(http_connection_remover, https_connection_remover))
class ConnectionRemover(object):
def __init__(self, connection_class):
self._connection_class = connection_class
self._connection_pool_to_connections = {}
def add_connection_to_pool_entry(self, pool, connection):
if isinstance(connection, self._connection_class):
self._connection_pool_to_connections.setdefault(pool, set()).add(connection)
def remove_connection_to_pool_entry(self, pool, connection):
if isinstance(connection, self._connection_class):
self._connection_pool_to_connections[self._connection_class].remove(connection)
def __enter__(self):
return self
def __exit__(self, *args):
for pool, connections in self._connection_pool_to_connections.items():
readd_connections = []
while pool.pool and not pool.pool.empty() and connections:
connection = pool.pool.get()
if isinstance(connection, self._connection_class):
connections.remove(connection)
else:
readd_connections.append(connection)
for connection in readd_connections:
pool._put_conn(connection)
def reset_patchers():
yield mock.patch.object(httplib, 'HTTPConnection', _HTTPConnection)
yield mock.patch.object(httplib, 'HTTPSConnection', _HTTPSConnection)
try:
import requests
if requests.__build__ < 0x021603:
# Avoid double unmock if requests 2.16.3
# First, this is pointless, requests.packages.urllib3 *IS* urllib3 (see packages.py)
# Second, this is unmocking twice the same classes with different namespaces
# and is creating weird issues and bugs:
# > AssertionError: assert <class 'urllib3.connection.HTTPConnection'>
# > is <class 'requests.packages.urllib3.connection.HTTPConnection'>
# This assert should work!!!
# Note that this also means that now, requests.packages is never imported
# if requests 2.16.3 or greater is used with VCRPy.
import requests.packages.urllib3.connectionpool as cpool
else:
raise ImportError("Skip requests not vendored anymore")
except ImportError: # pragma: no cover
pass
else:
# unpatch requests v1.x
yield mock.patch.object(cpool, 'VerifiedHTTPSConnection', _VerifiedHTTPSConnection)
yield mock.patch.object(cpool, 'HTTPConnection', _cpoolHTTPConnection)
# unpatch requests v2.x
if hasattr(cpool.HTTPConnectionPool, 'ConnectionCls'):
yield mock.patch.object(cpool.HTTPConnectionPool, 'ConnectionCls',
_cpoolHTTPConnection)
yield mock.patch.object(cpool.HTTPSConnectionPool, 'ConnectionCls',
_cpoolHTTPSConnection)
if hasattr(cpool, 'HTTPSConnection'):
yield mock.patch.object(cpool, 'HTTPSConnection', _cpoolHTTPSConnection)
try:
import urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
pass
else:
yield mock.patch.object(cpool, 'VerifiedHTTPSConnection', _VerifiedHTTPSConnection)
yield mock.patch.object(cpool, 'HTTPConnection', _cpoolHTTPConnection)
yield mock.patch.object(cpool, 'HTTPSConnection', _cpoolHTTPSConnection)
if hasattr(cpool.HTTPConnectionPool, 'ConnectionCls'):
yield mock.patch.object(cpool.HTTPConnectionPool, 'ConnectionCls', _cpoolHTTPConnection)
yield mock.patch.object(cpool.HTTPSConnectionPool, 'ConnectionCls', _cpoolHTTPSConnection)
try:
import botocore.vendored.requests.packages.urllib3.connectionpool as cpool
except ImportError: # pragma: no cover
pass
else:
# unpatch requests v1.x
yield mock.patch.object(cpool, 'VerifiedHTTPSConnection', _Boto3VerifiedHTTPSConnection)
yield mock.patch.object(cpool, 'HTTPConnection', _cpoolBoto3HTTPConnection)
# unpatch requests v2.x
if hasattr(cpool.HTTPConnectionPool, 'ConnectionCls'):
yield mock.patch.object(cpool.HTTPConnectionPool, 'ConnectionCls',
_cpoolBoto3HTTPConnection)
yield mock.patch.object(cpool.HTTPSConnectionPool, 'ConnectionCls',
_cpoolBoto3HTTPSConnection)
if hasattr(cpool, 'HTTPSConnection'):
yield mock.patch.object(cpool, 'HTTPSConnection', _cpoolBoto3HTTPSConnection)
try:
import httplib2 as cpool
except ImportError: # pragma: no cover
pass
else:
yield mock.patch.object(cpool, 'HTTPConnectionWithTimeout', _HTTPConnectionWithTimeout)
yield mock.patch.object(cpool, 'HTTPSConnectionWithTimeout', _HTTPSConnectionWithTimeout)
yield mock.patch.object(cpool, 'SCHEME_TO_CONNECTION', _SCHEME_TO_CONNECTION)
try:
import boto.https_connection as cpool
except ImportError: # pragma: no cover
pass
else:
yield mock.patch.object(cpool, 'CertValidatingHTTPSConnection',
_CertValidatingHTTPSConnection)
try:
import tornado.simple_httpclient as simple
except ImportError: # pragma: no cover
pass
else:
yield mock.patch.object(
simple.SimpleAsyncHTTPClient,
'fetch_impl',
_SimpleAsyncHTTPClient_fetch_impl,
)
try:
import tornado.curl_httpclient as curl
except ImportError: # pragma: no cover
pass
else:
yield mock.patch.object(
curl.CurlAsyncHTTPClient,
'fetch_impl',
_CurlAsyncHTTPClient_fetch_impl,
)
@contextlib.contextmanager
def force_reset():
with contextlib.ExitStack() as exit_stack:
for patcher in reset_patchers():
exit_stack.enter_context(patcher)
yield
| 39.959746
| 102
| 0.66831
|
8c828f51679330db431d1d2fa0a60eb275113e7b
| 9,293
|
py
|
Python
|
tests/parser/functions/test_raw_call.py
|
abdullathedruid/vyper
|
02b1b207f453b704cf1c491741bc85be9168a373
|
[
"Apache-2.0"
] | 2
|
2022-02-08T16:17:10.000Z
|
2022-03-06T11:01:46.000Z
|
tests/parser/functions/test_raw_call.py
|
abdullathedruid/vyper
|
02b1b207f453b704cf1c491741bc85be9168a373
|
[
"Apache-2.0"
] | 4
|
2018-12-06T23:21:02.000Z
|
2022-02-07T15:28:01.000Z
|
tests/parser/functions/test_raw_call.py
|
charles-cooper/vyper
|
bbbd8618f8427d416d6751214dd560872f8848f3
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from hexbytes import HexBytes
from vyper import compiler
from vyper.builtin_functions import get_create_forwarder_to_bytecode
from vyper.exceptions import ArgumentException, StateAccessViolation
pytestmark = pytest.mark.usefixtures("memory_mocker")
def test_max_outsize_exceeds_returndatasize(get_contract):
source_code = """
@external
def foo() -> Bytes[7]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=7)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_raw_call_non_memory(get_contract):
source_code = """
_foo: Bytes[5]
@external
def foo() -> Bytes[5]:
self._foo = b"moose"
return raw_call(0x0000000000000000000000000000000000000004, self._foo, max_outsize=5)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_returndatasize_exceeds_max_outsize(get_contract):
source_code = """
@external
def foo() -> Bytes[3]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=3)
"""
c = get_contract(source_code)
assert c.foo() == b"moo"
def test_returndatasize_matches_max_outsize(get_contract):
source_code = """
@external
def foo() -> Bytes[5]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=5)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_multiple_levels(w3, get_contract_with_gas_estimation):
inner_code = """
@external
def returnten() -> int128:
return 10
"""
c = get_contract_with_gas_estimation(inner_code)
outer_code = """
@external
def create_and_call_returnten(inp: address) -> int128:
x: address = create_forwarder_to(inp)
o: int128 = extract32(raw_call(x, convert("\xd0\x1f\xb1\xb8", Bytes[4]), max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501
return o
@external
def create_and_return_forwarder(inp: address) -> address:
x: address = create_forwarder_to(inp)
return x
"""
c2 = get_contract_with_gas_estimation(outer_code)
assert c2.create_and_call_returnten(c.address) == 10
c2.create_and_call_returnten(c.address, transact={})
_, preamble, callcode = get_create_forwarder_to_bytecode()
c3 = c2.create_and_return_forwarder(c.address, call={})
c2.create_and_return_forwarder(c.address, transact={})
c3_contract_code = w3.toBytes(w3.eth.get_code(c3))
assert c3_contract_code[:10] == HexBytes(preamble)
assert c3_contract_code[-15:] == HexBytes(callcode)
print("Passed forwarder test")
# TODO: This one is special
# print(f'Gas consumed: {(chain.head_state.receipts[-1].gas_used - chain.head_state.receipts[-2].gas_used - chain.last_tx.intrinsic_gas_used)}') # noqa: E501
def test_multiple_levels2(assert_tx_failed, get_contract_with_gas_estimation):
inner_code = """
@external
def returnten() -> int128:
assert False
return 10
"""
c = get_contract_with_gas_estimation(inner_code)
outer_code = """
@external
def create_and_call_returnten(inp: address) -> int128:
x: address = create_forwarder_to(inp)
o: int128 = extract32(raw_call(x, convert("\xd0\x1f\xb1\xb8", Bytes[4]), max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501
return o
@external
def create_and_return_forwarder(inp: address) -> address:
return create_forwarder_to(inp)
"""
c2 = get_contract_with_gas_estimation(outer_code)
assert_tx_failed(lambda: c2.create_and_call_returnten(c.address))
print("Passed forwarder exception test")
def test_delegate_call(w3, get_contract):
inner_code = """
a: address # this is required for storage alignment...
owners: public(address[5])
@external
def set_owner(i: int128, o: address):
self.owners[i] = o
"""
inner_contract = get_contract(inner_code)
outer_code = """
owner_setter_contract: public(address)
owners: public(address[5])
@external
def __init__(_owner_setter: address):
self.owner_setter_contract = _owner_setter
@external
def set(i: int128, owner: address):
# delegate setting owners to other contract.s
cdata: Bytes[68] = concat(method_id("set_owner(int128,address)"), convert(i, bytes32), convert(owner, bytes32)) # noqa: E501
raw_call(
self.owner_setter_contract,
cdata,
gas=msg.gas,
max_outsize=0,
is_delegate_call=True
)
"""
a0, a1, a2 = w3.eth.accounts[:3]
outer_contract = get_contract(outer_code, *[inner_contract.address])
# Test setting on inners contract's state setting works.
inner_contract.set_owner(1, a2, transact={})
assert inner_contract.owners(1) == a2
# Confirm outer contract's state is empty and contract to call has been set.
assert outer_contract.owner_setter_contract() == inner_contract.address
assert outer_contract.owners(1) is None
# Call outer contract, that make a delegate call to inner_contract.
tx_hash = outer_contract.set(1, a1, transact={})
assert w3.eth.get_transaction_receipt(tx_hash)["status"] == 1
assert outer_contract.owners(1) == a1
def test_gas(get_contract, assert_tx_failed):
inner_code = """
bar: bytes32
@external
def foo(_bar: bytes32):
self.bar = _bar
"""
inner_contract = get_contract(inner_code)
outer_code = """
@external
def foo_call(_addr: address):
cdata: Bytes[40] = concat(
method_id("foo(bytes32)"),
0x0000000000000000000000000000000000000000000000000000000000000001
)
raw_call(_addr, cdata, max_outsize=0{})
"""
# with no gas value given, enough will be forwarded to complete the call
outer_contract = get_contract(outer_code.format(""))
outer_contract.foo_call(inner_contract.address)
# manually specifying a sufficient amount should succeed
outer_contract = get_contract(outer_code.format(", gas=50000"))
outer_contract.foo_call(inner_contract.address)
# manually specifying an insufficient amount should fail
outer_contract = get_contract(outer_code.format(", gas=15000"))
assert_tx_failed(lambda: outer_contract.foo_call(inner_contract.address))
def test_static_call(get_contract):
target_source = """
@external
@view
def foo() -> int128:
return 42
"""
caller_source = """
@external
@view
def foo(_addr: address) -> int128:
_response: Bytes[32] = raw_call(
_addr,
method_id("foo()"),
max_outsize=32,
is_static_call=True,
)
return convert(_response, int128)
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert caller.foo(target.address) == 42
def test_static_call_fails_nonpayable(get_contract, assert_tx_failed):
target_source = """
baz: int128
@external
def foo() -> int128:
self.baz = 31337
return self.baz
"""
caller_source = """
@external
@view
def foo(_addr: address) -> int128:
_response: Bytes[32] = raw_call(
_addr,
method_id("foo()"),
max_outsize=32,
is_static_call=True,
)
return convert(_response, int128)
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert_tx_failed(lambda: caller.foo(target.address))
def test_checkable_raw_call(get_contract, assert_tx_failed):
target_source = """
baz: int128
@external
def fail1(should_raise: bool):
if should_raise:
raise "fail"
# test both paths for raw_call -
# they are different depending if callee has or doesn't have returntype
@external
def fail2(should_raise: bool) -> int128:
if should_raise:
self.baz = self.baz + 1
return self.baz
"""
caller_source = """
@external
@view
def foo(_addr: address, should_raise: bool) -> uint256:
success: bool = True
response: Bytes[32] = b""
success, response = raw_call(
_addr,
_abi_encode(should_raise, method_id=method_id("fail1(bool)")),
max_outsize=32,
is_static_call=True,
revert_on_failure=False,
)
assert success == (not should_raise)
return 1
@external
@view
def bar(_addr: address, should_raise: bool) -> uint256:
success: bool = True
response: Bytes[32] = b""
success, response = raw_call(
_addr,
_abi_encode(should_raise, method_id=method_id("fail2(bool)")),
max_outsize=32,
is_static_call=True,
revert_on_failure=False,
)
assert success == (not should_raise)
return 2
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert caller.foo(target.address, True) == 1
assert caller.foo(target.address, False) == 1
assert caller.bar(target.address, True) == 2
assert caller.bar(target.address, False) == 2
uncompilable_code = [
(
"""
@external
@view
def foo(_addr: address):
raw_call(_addr, method_id("foo()"))
""",
StateAccessViolation,
),
(
"""
@external
def foo(_addr: address):
raw_call(_addr, method_id("foo()"), is_delegate_call=True, is_static_call=True)
""",
ArgumentException,
),
]
@pytest.mark.parametrize("source_code,exc", uncompilable_code)
def test_invalid_type_exception(source_code, exc):
with pytest.raises(exc):
compiler.compile_code(source_code)
| 26.627507
| 162
| 0.696976
|
5fd383b7dee34a5dfdb36134a44adbf2699c5dfe
| 17,393
|
py
|
Python
|
Next_gen_systems/Blockchain/myblockchain_mod.py
|
UAH-s-Telematics-Engineering-Tasks/ortega_collado_telematics_reports
|
7759fda9552c5a59f37b137c20357d4da0f29e21
|
[
"MIT"
] | null | null | null |
Next_gen_systems/Blockchain/myblockchain_mod.py
|
UAH-s-Telematics-Engineering-Tasks/ortega_collado_telematics_reports
|
7759fda9552c5a59f37b137c20357d4da0f29e21
|
[
"MIT"
] | null | null | null |
Next_gen_systems/Blockchain/myblockchain_mod.py
|
UAH-s-Telematics-Engineering-Tasks/ortega_collado_telematics_reports
|
7759fda9552c5a59f37b137c20357d4da0f29e21
|
[
"MIT"
] | 1
|
2020-05-17T18:50:36.000Z
|
2020-05-17T18:50:36.000Z
|
from hashlib import sha256
from flask import Flask, jsonify, request
import requests, json, time, copy
class Block:
def __init__(self, index, transactions, timestamp, previous_hash, nonce = 0):
self.index = index
self.transactions = transactions
self.timestamp = timestamp
self.previous_hash = previous_hash
self.nonce = nonce
def compute_hash(self):
block_string = json.dumps(self.__dict__, sort_keys = True)
return sha256(block_string.encode()).hexdigest()
class Blockchain:
difficulty = 4
max_unconfirmed_txs = 4
def __init__(self):
self.unconfirmed_transactions = []
self.chain = []
self.create_genesis_block()
@property
def last_block(self):
return self.chain[-1]
###################################################################################################################################################
################################################################# New/Tweaked Methods #############################################################
###################################################################################################################################################
def create_genesis_block(self):
genesis_block = Block(0, [], 0, "0")
# Podríamos "hardcodear" el valor del nonce (nonce = 25163) ya que el bloque Génesis siempre será igual!
# En ese caso no hace falta llamar al método proof_of_work() y simplemente calculamos el hash normal descomentando
# la línea 40. (O usamos la línea 39 o la 40, ojo)
genesis_block.hash = self.proof_of_work(genesis_block)
# genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
# Esta propiedad devuelve la longitud de la lista de transacciones por confirmar
@property
def unconfirmed_tx(self):
return len(self.unconfirmed_transactions)
# Como veremos en un "endpoint" más adelante, esta propiedad devuelve una copia recursiva de la cadena.
# Esto es, copiamos los objetos de la clase Block de la cadena y copiamos todos los atributos de estos
# bloques también! Si no las referencias contenidas dentro de los propios bloques serían siempre las mismas
# y acabaríamos sobreescribiendo las transacciones de ese bloque, por ejemplo.
@property
def chain_cpy(self):
# Para devolver esta lista utilizamos una compresión de lista (list comprehension) que genera una lista con cada
# bloque copiado. En definitiva, generamos una cadena idéntica a la que tenemos pero totalmente independiente.
return [copy.deepcopy(blk) for blk in self.chain]
@classmethod
def check_chain_validity(cls, chain):
previous_hash = "0"
for block in chain:
block_hash = block.hash
delattr(block, "hash")
# Tenemos que quitar la recompensa de haber minado el bloque
try:
block.transactions.pop()
except IndexError:
# El bloque Génesis no tiene transacciones así que al hacer pop() a una lista vacía salta esta excepción...
# Simplemente la ignoramos y seguimos
pass
# Si el bloque no es válido salimos devolviendo false
if (not cls.is_valid_proof(block, block_hash) or previous_hash != block.previous_hash) and block.timestamp != 0:
return False
block.hash, previous_hash = block_hash, block_hash
return True
# Estos dos métodos solo varían un par de atributos de la clase con los valores que les pasemos.
@staticmethod
def change_difficulty(diff):
Blockchain.difficulty = diff
@staticmethod
def change_max_pending_txs(pend):
Blockchain.max_unconfirmed_txs = pend
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
def add_block(self, block, proof):
previous_hash = self.last_block.hash
if previous_hash != block.previous_hash or not Blockchain.is_valid_proof(block, proof):
return False
reward_trx = {
'Sender': "Blockchain Master",
'Recipient': "Node_Identifier",
'Amount': 1,
'Timestamp': time.time(),
}
blockchain.add_new_transaction(reward_trx)
block.hash = proof
self.chain.append(block)
return True
@staticmethod
def proof_of_work(block):
computed_hash = block.compute_hash()
while not computed_hash.startswith('0' * Blockchain.difficulty):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def add_new_transaction(self, transaction):
self.unconfirmed_transactions.append(transaction)
@staticmethod
def is_valid_proof(block, block_hash):
return block_hash.startswith('0' * Blockchain.difficulty) and block_hash == block.compute_hash()
def mine(self):
if not self.unconfirmed_transactions:
return False
last_block = self.last_block
new_block = Block(index = last_block.index + 1,
transactions = self.unconfirmed_transactions,
timestamp = time.time(),
previous_hash = last_block.hash)
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
self.unconfirmed_transactions = []
return True
app = Flask(__name__)
blockchain = Blockchain()
peers = set()
###################################################################################################################################################
############################################################### New/Tweaked Endpoints #############################################################
###################################################################################################################################################
# Cuando hablamos de un "endpoint" nos referimos al "final" de las URLs que hay en las peticiones que hacemos al servidor. Con cada decorador
# @app.route estamos definiendo un "endpoint" nuevo.
# Aquí solo recogemos el valor que se nos pasa en un objeto JSON para cambiar la dificultad que imponemos a los bloques.
# Si se importa la colección de Postman que tenemos en el archivo 'Postman_API_calls.json' en esta misma carpeta se puede ver un ejemplo
# del objeto JSON que debe ir en la llamada.
@app.route('/change_difficulty', methods = ['POST'])
def update_difficulty():
tx_data = request.get_json()
# Comprobamos que el objeto JSON esté bien formado
if not tx_data.get("n_diff"):
return "Invalid data...", 400
n_diff = tx_data['n_diff']
# Comprobamos que el valor sea coherente
if n_diff <= 0:
return "Bad value...", 400
# Llamamos al método que hemos definido para cambiar la dificultad.
blockchain.change_difficulty(n_diff)
return "New difficulty -> {}".format(n_diff), 201
# Este endoppint es clavado al anterior pero cambiamos el número máximo de transacciones pendientes admisible.
@app.route('/change_max_pending_txs', methods = ['POST'])
def update_max_pending_txs():
tx_data = request.get_json()
if not tx_data.get("n_pend"):
return "Invalid data...", 400
n_pend = tx_data['n_pend']
if n_pend <= 0:
return "Bad value...", 400
blockchain.change_max_pending_txs(n_pend)
return "New maximum pending transactions -> {}".format(n_pend), 201
@app.route('/validate_chain', methods = ['GET'])
def validate_chain():
# Si pasamos la cadena por referencia modificaremos la original...
# por ello pasamos el atributo chain_cpy que llama al método chain_cpy()
# de la clase Blockchain para hacer una copia recursiva de la propia cadena
# y devolverla
if blockchain.check_chain_validity(blockchain.chain_cpy):
return "Valid chain!", 200
return "The chain has been tampered with!", 200
# Con este endpoint podemos variar un bloque en base a lo que se pase a través de un objeto JSON.
# Los parámetros a especificar son el bloque a modificar, la transacción dentro de ese bloque y el
# nuevo receptor de la transacción. Los valores que especifican el bloque y la transacción son índices
# cuyos valores oscilan entre 0 y 'número de bloques - 1' o 'número de transacciones - 1' respectivamente.
# Si se dan valores incorrectos el servidor lanzará un error y posiblemente se cuelgue... Una mejora sería
# comprobar que los valores son válidos en base a la longitud de la cadena y la de las transacciones del
# bloque seleccionado :P
# Esta comprobación se puede hacer a través del método len() o capturando la excepción IndexError que se lanza
# si el índice está fuera del rango de la lista en cuestión.
@app.route('/tamper_chain', methods = ['POST'])
def tamper_chain():
tx_data = request.get_json()
if tx_data.get("blk_n") == None or tx_data.get("tx_n") == None or tx_data.get("new_recipient") == None:
return "Invalid data...", 400
blk_n = tx_data['blk_n']
tx_n = tx_data['tx_n']
new_recipient = tx_data['new_recipient']
if blk_n < 0 or blk_n > len(blockchain.chain) - 1:
return "Invalid data...", 400
blockchain.chain[blk_n].transactions[tx_n]['Recipient'] = new_recipient
return "Changed the chain!", 200
# Lo único que hemos añadido aquí es comprobar si las transacciones pendientes son iguales al máximo para minar un bloque automáticamente.
@app.route('/new_transaction', methods = ['POST'])
def new_transaction():
tx_data = request.get_json()
required_fields = ["Recipient", "Sender", "Amount"]
for field in required_fields:
if not tx_data.get(field):
return "Invalid transaction data", 400
tx_data["timestamp"] = time.time()
blockchain.add_new_transaction(tx_data)
# Si hemos llegado al máximo...
if blockchain.unconfirmed_tx == Blockchain.max_unconfirmed_txs:
# Minamos un bloque haciendo una petición desde el servidor al propio servidor. Puede resultar raro pero es perfectamente posible
# gracias a la dirección de loopback.
requests.get('http://127.0.0.1:{}/mine'.format(port))
return "Automatically mined block #{} as we reached {} pending transactions".format(blockchain.last_block.index, Blockchain.max_unconfirmed_txs), 201
return "Success", 201
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods = ['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
@app.route('/chain', methods = ['GET'])
def get_chain():
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
response = {
"length": len(chain_data),
"chain": chain_data,
"peers": list(peers)
}
return jsonify(response), 200
@app.route('/mine', methods = ['GET'])
def mine_unconfirmed_transactions():
result = blockchain.mine()
if not result:
return "No transactions to mine"
else:
# Conseguimos la longitud de nuestro blockchain
chain_length = len(blockchain.chain)
consensus()
if chain_length == len(blockchain.chain):
# Si nuestro blockchain era el más largo anunciamos que hemos añadido un bloque nuevo a los demás.
announce_new_block(blockchain.last_block)
return "Block #{} is mined.".format(blockchain.last_block.index)
@app.route('/register_node', methods = ['POST'])
def register_new_peers():
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
# Añadimos el nodo a la lista
peers.add(node_address)
# Le pasamos el blockchain actual para que se sincronice con nosotros
return get_chain()
@app.route('/register_with', methods = ['POST'])
def register_with_existing_node():
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
data = {"node_address": request.host_url}
headers = {'Content-Type': "application/json"}
# Hacemos una petición para registrarnos con el nodo que especifiquemos al hacer la petición a '/register_with'
response = requests.post(node_address + "/register_node", data = json.dumps(data), headers = headers)
# Si nos hemos registrado correctamente
if response.status_code == 200:
global blockchain
global peers
# Actualizamos nuestra copia del blockchain y la lista de peers o nodos de la red
chain_dump = response.json()['chain']
blockchain = create_chain_from_dump(chain_dump)
peers.update(response.json()['peers'])
# Devolvemoos un 200 OK
return "Registration successful", 200
else:
# Si ocurre algún error lo maneja la API de response
return response.content, response.status_code
def create_chain_from_dump(chain_dump):
generated_blockchain = Blockchain()
for idx, block_data in enumerate(chain_dump):
if idx == 0:
# Nos saltamos el bloque génesis ya que ya lo hemos creado al instanciar un bloockchain vacío.
continue
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
# La función add_block() comprueba que el bloque sea válido antes de añadirlo. Si no se ha añadido un
# bloque lanzamos una excepción avisando de que la cadena que hemos recibido no es correcta y ha sido
# modificada.
added = generated_blockchain.add_block(block, proof)
if not added:
raise Exception("The chain dump is tampered!!")
# Si todo ha ido bien se devuelve la cadena construida.
return generated_blockchain
@app.route('/add_block', methods = ['POST'])
def verify_and_add_block():
block_data = request.get_json()
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = blockchain.add_block(block, proof)
if not added:
return "The block was discarded by the node", 400
return "Block added to the chain", 201
# Al hacer peticiones a '/pending_tx' se devuelve la lista de transacciones pendientes del nodo como
# un objeto JSON
@app.route('/pending_tx')
def get_pending_tx():
return json.dumps(blockchain.unconfirmed_transactions)
def consensus():
global blockchain
longest_chain = None
current_len = len(blockchain.chain)
for node in peers:
# Conseguimos un objeto que contiene la longitud de la cadena a través del
# endpoint '/chain' definido en la línea 387
response = requests.get('{}chain'.format(node))
# Accedemos a elementos de este objeto
length = response.json()['length']
chain = response.json()['chain']
if length > current_len and blockchain.check_chain_validity(chain):
current_len = length
longest_chain = chain
if longest_chain:
blockchain = longest_chain
return True
return False
def announce_new_block(block):
for peer in peers:
url = "{}add_block".format(peer)
headers = {'Content-Type': "application/json"}
requests.post(url, data = json.dumps(block.__dict__, sort_keys = True), headers = headers)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default = 8000, type = int, help = 'port to listen on')
args = parser.parse_args()
port = args.port
app.run(host = '0.0.0.0', port = port)
| 40.543124
| 158
| 0.588858
|
43e9c9f31ee37cd2340c2682b8290fca1c91cecc
| 10,288
|
py
|
Python
|
tokenization.py
|
a1da4/bert-japanese
|
a8e74022d589cefb00a3467ca43f38e62db06908
|
[
"Apache-2.0"
] | null | null | null |
tokenization.py
|
a1da4/bert-japanese
|
a8e74022d589cefb00a3467ca43f38e62db06908
|
[
"Apache-2.0"
] | null | null | null |
tokenization.py
|
a1da4/bert-japanese
|
a8e74022d589cefb00a3467ca43f38e62db06908
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team,
# and Masatoshi Suzuki.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Japanese BERT models."""
import collections
import logging
import os
import unicodedata
from transformers import BertTokenizer, WordpieceTokenizer
from transformers.tokenization_bert import load_vocab
logger = logging.getLogger(__name__)
class MecabBertTokenizer(BertTokenizer):
"""BERT tokenizer for Japanese text; MeCab tokenization + WordPiece"""
def __init__(self, vocab_file, do_lower_case=False,
do_basic_tokenize=True, do_wordpiece_tokenize=True,
mecab_dict_path=None, unk_token='[UNK]', sep_token='[SEP]',
pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
"""Constructs a MecabBertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file.
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization with MeCab before wordpiece.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token, sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token, **kwargs)
self.vocab = load_vocab(vocab_file)
print(self.vocab)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
print(self.max_len_single_sentence)
print(self.max_len_sentences_pair)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'.".format(vocab_file))
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
self.do_wordpiece_tokenize = do_wordpiece_tokenize
if do_basic_tokenize:
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case,
mecab_dict_path=mecab_dict_path)
if do_wordpiece_tokenize:
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=self.unk_token)
def _tokenize(self, text):
if self.do_basic_tokenize:
tokens = self.basic_tokenizer.tokenize(text,
never_split=self.all_special_tokens)
else:
tokens = [text]
if self.do_wordpiece_tokenize:
split_tokens = [sub_token for token in tokens
for sub_token in self.wordpiece_tokenizer.tokenize(token)]
else:
split_tokens = tokens
return split_tokens
class MecabCharacterBertTokenizer(BertTokenizer):
"""BERT character tokenizer for with information of MeCab tokenization"""
def __init__(self, vocab_file, do_lower_case=False, do_basic_tokenize=True,
mecab_dict_path=None, unk_token='[UNK]', sep_token='[SEP]',
pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
"""Constructs a MecabCharacterBertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file.
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization with MeCab before wordpiece.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token, sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token, **kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'.".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case,
mecab_dict_path=mecab_dict_path,
preserve_spaces=True)
self.wordpiece_tokenizer = CharacterTokenizer(vocab=self.vocab,
unk_token=self.unk_token,
with_markers=True)
def _convert_token_to_id(self, token):
"""Converts a token (str/unicode) to an id using the vocab."""
if token[:2] == '##':
token = token[2:]
return self.vocab.get(token, self.vocab.get(self.unk_token))
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
out_string = ' '.join(tokens).replace('##', '').strip()
return out_string
class MecabBasicTokenizer(object):
"""Runs basic tokenization with MeCab morphological parser."""
def __init__(self, do_lower_case=False, never_split=None,
mecab_dict_path=None, preserve_spaces=False):
"""Constructs a MecabBasicTokenizer.
Args:
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
**preserve_spaces**: (`optional`) boolean (default True)
Whether to preserve whitespaces in the output tokens.
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
import MeCab
if mecab_dict_path is not None:
self.mecab = MeCab.Tagger('-d {}'.format(mecab_dict_path))
else:
self.mecab = MeCab.Tagger()
self.preserve_spaces = preserve_spaces
def tokenize(self, text, never_split=None, with_info=False, **kwargs):
"""Tokenizes a piece of text."""
never_split = self.never_split + (never_split if never_split is not None else [])
text = unicodedata.normalize('NFKC', text)
tokens = []
token_infos = []
cursor = 0
for line in self.mecab.parse(text).split('\n'):
if line == 'EOS':
if self.preserve_spaces and len(text[cursor:]) > 0:
tokens.append(text[cursor:])
token_infos.append(None)
break
token, token_info = line.split('\t')
token_start = text.index(token, cursor)
token_end = token_start + len(token)
if self.preserve_spaces and cursor < token_start:
tokens.append(text[cursor:token_start])
token_infos.append(None)
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
token_infos.append(token_info)
cursor = token_end
assert len(tokens) == len(token_infos)
if with_info:
return tokens, token_infos
else:
return tokens
class CharacterTokenizer(object):
"""Runs Character tokenziation."""
def __init__(self, vocab, unk_token,
max_input_chars_per_word=100, with_markers=True):
"""Constructs a CharacterTokenizer.
Args:
vocab: Vocabulary object.
unk_token: A special symbol for out-of-vocabulary token.
with_markers: If True, "#" is appended to each output character except the
first one.
"""
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
self.with_markers = with_markers
def tokenize(self, text):
"""Tokenizes a piece of text into characters.
For example:
input = "apple"
output = ["a", "##p", "##p", "##l", "##e"] (if self.with_markers is True)
output = ["a", "p", "p", "l", "e"] (if self.with_markers is False)
Args:
text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
Returns:
A list of characters.
"""
output_tokens = []
for i, char in enumerate(text):
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
if self.with_markers and i != 0:
output_tokens.append('##' + char)
else:
output_tokens.append(char)
return output_tokens
| 39.722008
| 91
| 0.606532
|
5825d43645cb9f0eb50dede80a06842331dd20e5
| 6,789
|
py
|
Python
|
tests/integration/test_uninstall.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 6,263
|
2017-01-20T17:41:36.000Z
|
2022-02-15T20:48:57.000Z
|
tests/integration/test_uninstall.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 1,100
|
2017-01-20T19:41:52.000Z
|
2017-12-06T09:15:13.000Z
|
tests/integration/test_uninstall.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 366
|
2017-01-21T10:06:52.000Z
|
2021-11-25T17:09:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import shutil
import pytest
from pipenv.utils import temp_environ
@pytest.mark.uninstall
@pytest.mark.install
def test_uninstall_requests(PipenvInstance):
# Uninstalling requests can fail even when uninstall Django below
# succeeds, if requests was de-vendored.
# See https://github.com/pypa/pipenv/issues/3644 for problems
# caused by devendoring
with PipenvInstance() as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
c = p.pipenv("run python -m requests.help")
assert c.return_code == 0
c = p.pipenv("uninstall requests")
assert c.return_code == 0
assert "requests" not in p.pipfile["dev-packages"]
c = p.pipenv("run python -m requests.help")
assert c.return_code > 0
@pytest.mark.uninstall
def test_uninstall_django(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install Django==1.11.13")
assert c.return_code == 0
assert "django" in p.pipfile["packages"]
assert "django" in p.lockfile["default"]
assert "pytz" in p.lockfile["default"]
c = p.pipenv("run python -m django --version")
assert c.return_code == 0
c = p.pipenv("uninstall Django")
assert c.return_code == 0
assert "django" not in p.pipfile["dev-packages"]
assert "django" not in p.lockfile["develop"]
assert p.lockfile["develop"] == {}
c = p.pipenv("run python -m django --version")
assert c.return_code > 0
@pytest.mark.install
@pytest.mark.uninstall
def test_mirror_uninstall(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
mirror_url = os.environ.pop(
"PIPENV_TEST_INDEX", "https://pypi.python.org/simple"
)
assert "pypi.org" not in mirror_url
c = p.pipenv("install Django==1.11.13 --pypi-mirror {0}".format(mirror_url))
assert c.return_code == 0
assert "django" in p.pipfile["packages"]
assert "django" in p.lockfile["default"]
assert "pytz" in p.lockfile["default"]
# Ensure the --pypi-mirror parameter hasn't altered the Pipfile or Pipfile.lock sources
assert len(p.pipfile["source"]) == 1
assert len(p.lockfile["_meta"]["sources"]) == 1
assert "https://pypi.org/simple" == p.pipfile["source"][0]["url"]
assert "https://pypi.org/simple" == p.lockfile["_meta"]["sources"][0]["url"]
c = p.pipenv("run python -m django --version")
assert c.return_code == 0
c = p.pipenv("uninstall Django --pypi-mirror {0}".format(mirror_url))
assert c.return_code == 0
assert "django" not in p.pipfile["dev-packages"]
assert "django" not in p.lockfile["develop"]
assert p.lockfile["develop"] == {}
# Ensure the --pypi-mirror parameter hasn't altered the Pipfile or Pipfile.lock sources
assert len(p.pipfile["source"]) == 1
assert len(p.lockfile["_meta"]["sources"]) == 1
assert "https://pypi.org/simple" == p.pipfile["source"][0]["url"]
assert "https://pypi.org/simple" == p.lockfile["_meta"]["sources"][0]["url"]
c = p.pipenv("run python -m django --version")
assert c.return_code > 0
@pytest.mark.files
@pytest.mark.install
@pytest.mark.uninstall
def test_uninstall_all_local_files(PipenvInstance, testsroot):
file_name = "tablib-0.12.1.tar.gz"
# Not sure where travis/appveyor run tests from
source_path = os.path.abspath(os.path.join(testsroot, "pypi", "tablib", file_name))
with PipenvInstance(chdir=True) as p:
shutil.copy(source_path, os.path.join(p.path, file_name))
os.mkdir(os.path.join(p.path, "tablib"))
c = p.pipenv("install {}".format(file_name))
assert c.return_code == 0
c = p.pipenv("uninstall --all")
assert c.return_code == 0
assert "tablib" in c.out
# Uninstall --all is not supposed to remove things from the pipfile
# Note that it didn't before, but that instead local filenames showed as hashes
assert "tablib" in p.pipfile["packages"]
@pytest.mark.install
@pytest.mark.uninstall
def test_uninstall_all_dev(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install --dev Django==1.11.13 six")
assert c.return_code == 0
c = p.pipenv("install tablib")
assert c.return_code == 0
assert "tablib" in p.pipfile["packages"]
assert "django" in p.pipfile["dev-packages"]
assert "six" in p.pipfile["dev-packages"]
assert "tablib" in p.lockfile["default"]
assert "django" in p.lockfile["develop"]
assert "six" in p.lockfile["develop"]
c = p.pipenv('run python -c "import django"')
assert c.return_code == 0
c = p.pipenv("uninstall --all-dev")
assert c.return_code == 0
assert p.pipfile["dev-packages"] == {}
assert "django" not in p.lockfile["develop"]
assert "six" not in p.lockfile["develop"]
assert "tablib" in p.pipfile["packages"]
assert "tablib" in p.lockfile["default"]
c = p.pipenv('run python -c "import django"')
assert c.return_code > 0
c = p.pipenv('run python -c "import tablib"')
assert c.return_code == 0
@pytest.mark.uninstall
def test_normalize_name_uninstall(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
# Pre comment
[packages]
Requests = "*"
python_DateUtil = "*"
"""
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
c = p.pipenv("uninstall python_dateutil")
assert "Requests" in p.pipfile["packages"]
assert "python_DateUtil" not in p.pipfile["packages"]
with open(p.pipfile_path) as f:
contents = f.read()
assert "# Pre comment" in contents
@pytest.mark.install
@pytest.mark.uninstall
def test_uninstall_all_dev_with_shared_dependencies(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install pytest")
assert c.return_code == 0
c = p.pipenv("install --dev six")
assert c.return_code == 0
c = p.pipenv("uninstall --all-dev")
assert c.return_code == 0
assert "six" in p.lockfile["develop"]
@pytest.mark.uninstall
def test_uninstall_missing_parameters(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests")
assert c.return_code == 0
c = p.pipenv("uninstall")
assert c.return_code != 0
assert "No package provided!" in c.err
| 34.115578
| 95
| 0.628222
|
842abd3429eccf4f74606f30aadbde84363cd512
| 5,839
|
py
|
Python
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 681
|
2019-09-09T19:34:37.000Z
|
2022-03-31T12:17:58.000Z
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 212
|
2019-09-18T14:43:44.000Z
|
2022-03-27T22:21:00.000Z
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 157
|
2019-09-12T05:06:05.000Z
|
2022-03-29T14:47:24.000Z
|
import numpy as np
from gym.spaces import Box
from scipy.spatial.transform import Rotation
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerPushEnvV2(SawyerXYZEnv):
"""
Motivation for V2:
V1 was very difficult to solve because the observation didn't say where
to move after reaching the puck.
Changelog from V1 to V2:
- (7/7/20) Removed 3 element vector. Replaced with 3 element position
of the goal (for consistency with other environments)
- (6/15/20) Added a 3 element vector to the observation. This vector
points from the end effector to the goal coordinate.
i.e. (self._target_pos - pos_hand)
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
TARGET_RADIUS=0.05
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.02)
obj_high = (0.1, 0.7, 0.02)
goal_low = (-0.1, 0.8, 0.01)
goal_high = (0.1, 0.9, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0., 0.6, 0.02]),
'hand_init_pos': np.array([0., 0.6, 0.2]),
}
self.goal = np.array([0.1, 0.8, 0.02])
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([+1, +1, +1, +1]),
)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.num_resets = 0
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_push_v2.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
obj = obs[4:7]
(
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.03),
'grasp_success': float(
self.touching_main_object and
(tcp_opened > 0) and
(obj[2] - 0.02 > self.obj_init_pos[2])
),
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_quat_objects(self):
return Rotation.from_matrix(
self.data.get_geom_xmat('objGeom')
).as_quat()
def _get_pos_objects(self):
return self.get_body_com('obj')
def fix_extreme_obj_pos(self, orig_init_pos):
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
diff = self.get_body_com('obj')[:2] - \
self.get_body_com('obj')[:2]
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
return [
adjusted_pos[0],
adjusted_pos[1],
self.get_body_com('obj')[-1]
]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.fix_extreme_obj_pos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self._set_obj_xyz(self.obj_init_pos)
self.num_resets += 1
return self._get_obs()
def compute_reward(self, action, obs):
obj = obs[4:7]
tcp_opened = obs[3]
tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
target_to_obj = np.linalg.norm(obj - self._target_pos)
target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=target_to_obj_init,
sigmoid='long_tail',
)
object_grasped = self._gripper_caging_reward(
action,
obj,
object_reach_radius=0.01,
obj_radius=0.015,
pad_success_thresh=0.05,
xz_thresh=0.005,
high_density=True
)
reward = 2 * object_grasped
if tcp_to_obj < 0.02 and tcp_opened > 0:
reward += 1. + reward + 5. * in_place
if target_to_obj < self.TARGET_RADIUS:
reward = 10.
return (
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
)
| 32.988701
| 93
| 0.57801
|
d235ce422ff23540520acb984183b8d8c19f27d7
| 2,794
|
py
|
Python
|
testcases/logger_test.py
|
Richard-L-Johnson/pyalgotrader
|
ad2bcc6b25c06c66eee4a8d522ce844504d8ec62
|
[
"Apache-2.0"
] | 3,719
|
2015-01-06T09:00:02.000Z
|
2022-03-31T20:55:01.000Z
|
testcases/logger_test.py
|
Richard-L-Johnson/pyalgotrader
|
ad2bcc6b25c06c66eee4a8d522ce844504d8ec62
|
[
"Apache-2.0"
] | 122
|
2015-01-01T17:06:22.000Z
|
2022-03-22T13:33:38.000Z
|
testcases/logger_test.py
|
Richard-L-Johnson/pyalgotrader
|
ad2bcc6b25c06c66eee4a8d522ce844504d8ec62
|
[
"Apache-2.0"
] | 1,428
|
2015-01-01T17:07:38.000Z
|
2022-03-31T10:02:37.000Z
|
# PyAlgoTrade
#
# Copyright 2011-2018 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
from testcases import common
class TestCase(common.TestCase):
# Check that strategy and custom logs have the proper datetime, this is, the bars date time.
def testBacktestingLog1(self):
code = """from testcases import logger_test_1
logger_test_1.main()
"""
res = common.run_python_code(code)
expectedLines = [
"2000-01-01 00:00:00 strategy [INFO] bla",
"2000-01-01 00:00:00 custom [INFO] ble",
]
self.assertEqual(res.get_output_lines(), expectedLines)
self.assertTrue(res.exit_ok())
# Check that strategy and custom logs have the proper datetime, this is, the bars date time.
def testBacktestingLog2(self):
code = """from testcases import logger_test_2
logger_test_2.main()
"""
res = common.run_python_code(code)
self.assertEqual(len(res.get_output_lines()), 3)
self.assertEqual(res.get_output_lines()[0], "2000-01-01 00:00:00 strategy [INFO] bla")
self.assertEqual(
res.get_output_lines()[1],
"2000-01-02 00:00:00 broker.backtesting [DEBUG] Not enough cash to fill orcl order [1] for 1 share/s"
)
self.assertEqual(res.get_output_lines()[2], "2000-01-02 00:00:00 strategy [INFO] bla")
self.assertTrue(res.exit_ok())
# Check that strategy and custom logs have the proper datetime, this is, the current date.
def testNonBacktestingLog3(self):
code = """from testcases import logger_test_3
logger_test_3.main()
"""
res = common.run_python_code(code)
now = datetime.datetime.now()
self.assertEqual(len(res.get_output_lines()), 2)
for line in res.get_output_lines(True):
self.assertEqual(line.find(str(now.date())), 0)
self.assertNotEqual(res.get_output_lines()[0].find("strategy [INFO] bla"), -1)
self.assertNotEqual(res.get_output_lines()[1].find("custom [INFO] ble"), -1)
self.assertTrue(res.exit_ok())
| 40.492754
| 117
| 0.659986
|
8b8289b55cb02101265255283d074585270abaa2
| 15,285
|
py
|
Python
|
nova/tests/unit/scheduler/test_scheduler_utils.py
|
viggates/nova-1
|
d2b7ce91f2d4c34a0794efc710b9e96574f9f605
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/test_scheduler_utils.py
|
viggates/nova-1
|
d2b7ce91f2d4c34a0794efc710b9e96574f9f605
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/test_scheduler_utils.py
|
viggates/nova-1
|
d2b7ce91f2d4c34a0794efc710b9e96574f9f605
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import notifications
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
def test_build_request_spec_without_image(self):
image = None
instance = {'uuid': 'fake-uuid'}
instance_type = {'flavorid': 'fake-id'}
self.mox.StubOutWithMock(flavors, 'extract_flavor')
flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
self.mox.ReplayAll()
request_spec = scheduler_utils.build_request_spec(self.context, image,
[instance])
self.assertEqual({}, request_spec['image'])
@mock.patch.object(flavors, 'extract_flavor')
def test_build_request_spec_with_object(self, extract_flavor):
instance_type = {'flavorid': 'fake-id'}
instance = fake_instance.fake_instance_obj(self.context)
extract_flavor.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context, None,
[instance])
self.assertIsInstance(request_spec['instance_properties'], dict)
def _test_set_vm_state_and_notify(self, request_spec,
expected_uuids):
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
self.mox.StubOutWithMock(notifications, 'send_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(rpc, 'get_notifier')
notifier = self.mox.CreateMockAnything()
rpc.get_notifier(service).AndReturn(notifier)
old_ref = 'old_ref'
new_ref = 'new_ref'
inst_obj = 'inst_obj'
for _uuid in expected_uuids:
db.instance_update_and_get_original(
self.context, _uuid, updates,
columns_to_join=['system_metadata']).AndReturn(
(old_ref, new_ref))
notifications.send_update(self.context, old_ref, inst_obj,
service=service)
compute_utils.add_instance_fault_from_exc(
self.context,
new_ref, exc_info, mox.IsA(tuple))
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
notifier.error(self.context, event_type, payload)
self.mox.ReplayAll()
with mock.patch.object(objects.Instance, '_from_db_object',
return_value=inst_obj):
scheduler_utils.set_vm_state_and_notify(self.context,
service,
method,
updates,
exc_info,
request_spec,
db)
def test_set_vm_state_and_notify_rs_uuids(self):
expected_uuids = ['1', '2', '3']
request_spec = dict(instance_uuids=expected_uuids)
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def test_set_vm_state_and_notify_uuid_from_instance_props(self):
expected_uuids = ['fake-uuid']
request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if not force_hosts and not force_nodes:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if with_retry and not force_hosts and not force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if with_retry and not force_hosts and not force_nodes:
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
@mock.patch.object(scheduler_utils, '_max_attempts')
def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
_max_attempts.return_value = 2
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc=[msg]))
nvh = self.assertRaises(exception.NoValidHost,
scheduler_utils.populate_retry,
filter_properties, 'fake-uuid')
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, nvh.message)
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, ['fake_uuid'], group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy]),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_affinity_filters(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context,
['fake-uuid'])
self.assertIsNone(group_info)
def test_get_group_details_with_no_instance_uuids(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
wrong_filter = {
'affinity': 'ServerGroupAntiAffinityFilter',
'anti-affinity': 'ServerGroupAffinityFilter',
}
self.flags(scheduler_default_filters=[wrong_filter[policy]])
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
self.assertRaises(exception.NoValidHost,
scheduler_utils._get_group_details,
self.context, ['fake-uuid'])
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'])
spec = {'instance_uuids': ['fake-uuid']}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, ['fake-uuid'],
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_uuids': ['fake-uuid']}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, ['fake-uuid'],
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_uuids': ['fake-uuid']}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
| 42.34072
| 78
| 0.585541
|
c6def6a49ebf562826846a7d5e9f3597eb239b36
| 3,878
|
py
|
Python
|
src/commands/return_inventory.py
|
seisatsu/DennisMUD-ESP32
|
b63d4b914c5e8d0f9714042997c64919b20be842
|
[
"MIT"
] | 19
|
2018-10-02T03:58:46.000Z
|
2021-04-09T13:09:23.000Z
|
commands/return_inventory.py
|
seisatsu/Dennis
|
8f1892f21beba6b21b4f7b9ba3062296bb1dc4b9
|
[
"MIT"
] | 100
|
2018-09-22T22:54:35.000Z
|
2021-04-16T17:46:34.000Z
|
commands/return_inventory.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 1
|
2022-03-07T08:10:59.000Z
|
2022-03-07T08:10:59.000Z
|
#######################
# Dennis MUD #
# return_inventory.py #
# Copyright 2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "return inventory"
CATEGORIES = ["items", "ownership"]
USAGE = "return inventory"
DESCRIPTION = """Return all of the items in your inventory to their primary owners.
If the primary owner already has an item (for example if it's duplified), you will just lose it.
If you are the primary owner of an item, nothing will happen with it.
Ex. `return inventory` to return all items in your inventory."""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argc=0):
return False
# Cycle through our inventory, keeping track of how many items we returned and kept.
retcount = 0
keepcount = 0
for itemid in console.user["inventory"]:
# Lookup the target item and perform item checks.
thisitem = COMMON.check_item(NAME, console, itemid, reason=False)
if not thisitem:
console.log.error("Item in user inventory does not exist: {0} :: {1}".format(console.user["name"], itemid))
console.msg("{0}: ERROR: Item in your inventory does not exist: {0}".format(NAME, itemid))
continue
# Make sure the item's primary owner exists.
targetuser = COMMON.check_user(NAME, console, thisitem["owners"][0], live=True, reason=False)
if not targetuser:
console.log.error("Primary owner for item does not exist: {0} :: {1}".format(itemid, thisitem["owners"][0]))
console.msg("{0}: ERROR: Primary owner does not exist for this item: {0}".format(NAME,
thisitem["owners"][0]))
continue
# Make sure we are not the primary owner. Otherwise, remove the item from our inventory.
if thisitem["owners"][0] == console.user["name"]:
keepcount += 1
continue
# Remove the item from our inventory.
console.user["inventory"].remove(itemid)
retcount += 1
# If the item isn't already in the primary owner's inventory, put it there.
if itemid not in targetuser["inventory"]:
targetuser["inventory"].append(itemid)
console.database.upsert_user(targetuser)
# If they are online, notify the primary owner that they have received the item.
console.shell.msg_user(thisitem["owners"][0], "{0} appeared in your inventory.".format(
COMMON.format_item(NAME, thisitem["name"], upper=True)))
# Finished.
console.msg("{0}: Total items returned: {1}; items kept: {2}".format(NAME, retcount, keepcount))
console.database.upsert_user(console.user)
return True
| 45.623529
| 120
| 0.65936
|
8dc901142dd1b9965849a5fd11e6c03fd5cb33fe
| 916
|
py
|
Python
|
sdk/python/pulumi_aws_native/greengrassv2/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/greengrassv2/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/greengrassv2/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ComponentVersionLambdaEventSourceType',
'ComponentVersionLambdaExecutionParametersInputPayloadEncodingType',
'ComponentVersionLambdaFilesystemPermission',
'ComponentVersionLambdaLinuxProcessParamsIsolationMode',
]
class ComponentVersionLambdaEventSourceType(str, Enum):
PUB_SUB = "PUB_SUB"
IOT_CORE = "IOT_CORE"
class ComponentVersionLambdaExecutionParametersInputPayloadEncodingType(str, Enum):
JSON = "json"
BINARY = "binary"
class ComponentVersionLambdaFilesystemPermission(str, Enum):
RO = "ro"
RW = "rw"
class ComponentVersionLambdaLinuxProcessParamsIsolationMode(str, Enum):
GREENGRASS_CONTAINER = "GreengrassContainer"
NO_CONTAINER = "NoContainer"
| 27.757576
| 83
| 0.766376
|
d30504cf61e9b575a443cbd9bef1f572daddf155
| 1,353
|
py
|
Python
|
examples/pytorch-lightning/hyperparams-as-output/mnist_trainer.py
|
DAGsHub/DAGsHub
|
e1c61bed6afee3cad0b3157aa86406d0da53a455
|
[
"MIT"
] | 37
|
2019-11-12T13:49:02.000Z
|
2022-03-15T06:59:56.000Z
|
examples/pytorch-lightning/hyperparams-as-output/mnist_trainer.py
|
DAGsHub/DAGsHub
|
e1c61bed6afee3cad0b3157aa86406d0da53a455
|
[
"MIT"
] | 59
|
2019-11-13T15:16:16.000Z
|
2022-03-14T15:01:26.000Z
|
examples/pytorch-lightning/hyperparams-as-output/mnist_trainer.py
|
DAGsHub/DAGsHub
|
e1c61bed6afee3cad0b3157aa86406d0da53a455
|
[
"MIT"
] | 7
|
2020-05-09T09:32:05.000Z
|
2021-11-24T16:02:28.000Z
|
"""
This file runs the main training/val loop, etc... using Lightning Trainer
"""
from argparse import ArgumentParser
from pytorch_lightning import Trainer
from dagshub.pytorch_lightning import DAGsHubLogger
def import_model():
"""
Ugly hack to be able to import the model from ../mnist_model.py
"""
import os
examples_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import sys
sys.path.append(examples_dir)
from mnist_model import MnistModel
return MnistModel
if __name__ == '__main__':
parser = ArgumentParser(add_help=False)
parser.add_argument('--gpus', type=str, default=None)
MnistModel = import_model()
# give the module a chance to add own params
# good practice to define LightningModule speficic params in the module
parser = MnistModel.add_model_specific_args(parser)
# parse params
hparams = parser.parse_args()
# init module
model = MnistModel(hparams.batch_size, hparams.learning_rate)
# most basic trainer, uses good defaults
trainer = Trainer(
max_epochs=hparams.max_nb_epochs,
gpus=hparams.gpus,
val_check_interval=0.2,
logger=DAGsHubLogger(), # This is the main point - use the DAGsHub logger!
default_root_dir='lightning_logs',
)
trainer.fit(model)
| 28.787234
| 98
| 0.704361
|
1d69eeac521e3a69541aa0947dae8465a0d33824
| 16,746
|
py
|
Python
|
resilient_alexnet/a_to_a.py
|
maxzvyagin/resilient_alexnet
|
a492a1da93133995057dc3504312158d6c25b2b0
|
[
"MIT"
] | null | null | null |
resilient_alexnet/a_to_a.py
|
maxzvyagin/resilient_alexnet
|
a492a1da93133995057dc3504312158d6c25b2b0
|
[
"MIT"
] | null | null | null |
resilient_alexnet/a_to_a.py
|
maxzvyagin/resilient_alexnet
|
a492a1da93133995057dc3504312158d6c25b2b0
|
[
"MIT"
] | null | null | null |
import sys
from resilient_alexnet.alexnet_fashion import fashion_pytorch_alexnet, fashion_tensorflow_alexnet
from resilient_alexnet.alexnet_caltech import caltech_pytorch_alexnet, caltech_tensorflow_alexnet
from resilient_alexnet.alexnet_cinic import cinic_pytorch_alexnet, cinic_tensorflow_alexnet
from resilient_alexnet.rms_alexnet_fashion import rms_fashion_pytorch_alexnet, rms_fashion_tensorflow_alexnet
from resilient_alexnet.alexnet_caltech.caltech_pytorch_alexnet import Caltech_NP_Dataset
from resilient_alexnet.alexnet_fashion.fashion_pytorch_alexnet import Fashion_NP_Dataset
import argparse
import ray
from ray import tune
import statistics
import foolbox as fb
import tensorflow as tf
import torch
import torchvision
import tensorflow_datasets as tfds
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
import spaceray
from ray.tune.integration.wandb import wandb_mixin
import wandb
from ray.tune.integration.wandb import wandb_mixin
import pickle
# Default constants
PT_MODEL = fashion_pytorch_alexnet.fashion_pt_objective
TF_MODEL = fashion_tensorflow_alexnet.fashion_tf_objective
MODEL_TYPE = "fashion"
NUM_CLASSES = 10
TRIALS = 25
NO_FOOL = False
MNIST = True
MAX_DIFF = False
FASHION = False
DIFF_RESILIENCY = False
ONLY_CPU = False
OPTIMIZE_MODE = "max"
MAXIMIZE_CONVERGENCE = False
MINIMIZE_VARIANCE = False
MODEL_FRAMEWORK = "pt"
def found_convergence(validation_accuracy):
"""Given validation accuracy, return bool defining if convergence has been reached: <=5% change in last 10 points"""
last_ten = validation_accuracy[-10:]
diffs = []
for x in range(9):
d = last_ten[x+1] - last_ten[x]
diffs.append(abs(d))
ave_diff = statistics.mean(diffs)
if ave_diff >= .05:
return False, ave_diff
else:
return True, ave_diff
def model_attack(model, model_type, attack_type, config, num_classes=NUM_CLASSES):
print(num_classes)
global ONLY_CPU, MODEL_TYPE
if model_type == "pt":
if ONLY_CPU:
device = torch.device("cpu")
else:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
fmodel = fb.models.PyTorchModel(model, bounds=(0, 1))
# fashion
if MODEL_TYPE == "fashion":
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/fashion_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
if attack_type != "pgd":
data = DataLoader(Fashion_NP_Dataset(x_test.astype(np.float32), y_test.astype(np.float32)),
batch_size=int(config['batch_size']), shuffle=False)
else:
data = DataLoader(Fashion_NP_Dataset(x_test.astype(np.float32), y_test.astype(np.int64)),
batch_size=int(config['batch_size']), shuffle=False)
elif MODEL_TYPE == "caltech":
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/caltech_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
data = DataLoader(Caltech_NP_Dataset(x_test.astype(np.float32), y_test.astype(np.float32)),
batch_size=int(config['batch_size']), shuffle=False)
else:
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/cinic_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
if attack_type != "pgd":
data = DataLoader(Fashion_NP_Dataset(x_test.astype(np.float32), y_test.astype(np.float32)),
batch_size=int(config['batch_size']), shuffle=False)
else:
data = DataLoader(Fashion_NP_Dataset(x_test.astype(np.float32), y_test.astype(np.int64)),
batch_size=int(config['batch_size']), shuffle=False)
images, labels = [], []
for sample in data:
images.append(sample[0].to(device))
labels.append(sample[1].to(device))
# images, labels = (torch.from_numpy(images).to(device), torch.from_numpy(labels).to(device))
elif model_type == "tf":
fmodel = fb.models.TensorFlowModel(model, bounds=(0, 1))
if MODEL_TYPE == "fashion":
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/fashion_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
elif MODEL_TYPE == "caltech":
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/caltech_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
else:
f = open('/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/alexnet_datasets/cinic_splits.pkl', 'rb')
data = pickle.load(f)
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data
if attack_type != "pgd":
data = tf.data.Dataset.from_tensor_slices((x_test.astype(np.float32), y_test.astype(np.float32))).batch(
config['batch_size'])
else:
data = tf.data.Dataset.from_tensor_slices((x_test.astype(np.float32), y_test.astype(np.int64))).batch(
config['batch_size'])
images, labels = [], []
for sample in data:
images.append(sample[0])
labels.append(sample[1])
else:
print("Incorrect model type in model attack. Please try again. Must be either PyTorch or TensorFlow.")
sys.exit()
# perform the attacks
if attack_type == "uniform":
attack = fb.attacks.L2AdditiveUniformNoiseAttack()
elif attack_type == "gaussian":
attack = fb.attacks.L2AdditiveGaussianNoiseAttack()
elif attack_type == "saltandpepper":
attack = fb.attacks.SaltAndPepperNoiseAttack()
elif attack_type == "boundary":
attack = fb.attacks.BoundaryAttack()
# NOTE: Doesn't look like spatial is being supported by the devs anymore, not sure if should be using
elif attack_type == "spatial":
attack = fb.attacks.SpatialAttack()
elif attack_type == "deepfool":
attack = fb.attacks.LinfDeepFoolAttack()
elif attack_type == "pgd":
attack = fb.attacks.LinfPGD()
epsilons = [
0.0,
0.0002,
0.0005,
0.0008,
0.001,
0.0015,
0.002,
0.003,
0.01,
0.1,
0.3,
0.5,
1.0,
]
accuracy_list = []
print("Performing FoolBox Attacks for " + model_type + " with attack type " + attack_type)
for i in tqdm(range(len(images))):
raw_advs, clipped_advs, success = attack(fmodel, images[i], labels[i], epsilons=epsilons)
if model_type == "pt":
robust_accuracy = 1 - success.cpu().numpy().astype(float).flatten().mean(axis=-1)
else:
robust_accuracy = 1 - success.numpy().astype(float).flatten().mean(axis=-1)
accuracy_list.append(robust_accuracy)
return np.array(accuracy_list).mean()
@wandb_mixin
def double_train(config):
"""Definition of side by side training of pytorch and tensorflow models, plus optional resiliency testing."""
global NUM_CLASSES, DIFF_RESILIENCY, MAX_DIFF, ONLY_CPU, MAXIMIZE_CONVERGENCE, MODEL_FRAMEWORK
# print(NUM_CLASSES)
pt = False
if MODEL_FRAMEWORK == "pt":
selected_model = PT_MODEL
pt = True
else:
selected_model = TF_MODEL
if ONLY_CPU:
try:
pt_test_acc, pt_model, pt_training_history, pt_val_loss, pt_val_acc = selected_model(config, only_cpu=ONLY_CPU)
except:
print("WARNING: implementation not completed for using only CPU. Using GPU.")
pt_test_acc, pt_model, pt_training_history, pt_val_loss, pt_val_acc = selected_model(config)
else:
pt_test_acc, pt_model, pt_training_history, pt_val_loss, pt_val_acc = selected_model(config)
if pt:
pt_model.eval()
search_results = {'first_test_acc': pt_test_acc}
search_results['framework'] = MODEL_FRAMEWORK
if not NO_FOOL:
pt_attack_accs = []
for attack_type in ['gaussian', 'deepfool']:
# for attack_type in ['pgd']:
pt_acc = model_attack(pt_model, MODEL_FRAMEWORK, attack_type, config, num_classes=NUM_CLASSES)
search_results["first" + "_" + attack_type + "_" + "accuracy"] = pt_acc
pt_attack_accs.append(pt_acc)
# to avoid weird CUDA OOM errors
del pt_model
torch.cuda.empty_cache()
if ONLY_CPU:
try:
tf_test_acc, tf_model, tf_training_history, tf_val_loss, tf_val_acc = selected_model(config, only_cpu=ONLY_CPU)
except:
tf_test_acc, tf_model, tf_training_history, tf_val_loss, tf_val_acc = selected_model(config)
else:
tf_test_acc, tf_model, tf_training_history, tf_val_loss, tf_val_acc = selected_model(config)
search_results['second_test_acc'] = tf_test_acc
if pt:
tf_model.eval()
if not NO_FOOL:
tf_attack_accs = []
for attack_type in ['gaussian', 'deepfool']:
# for attack_type in ['pgd']:
tf_acc = model_attack(tf_model, MODEL_FRAMEWORK, attack_type, config, num_classes=NUM_CLASSES)
search_results["second" + "_" + attack_type + "_" + "accuracy"] = tf_acc
tf_attack_accs.append(tf_acc)
# check convergence
pt_conv, pt_ave_conv_diff = found_convergence(pt_val_acc)
tf_conv, tf_ave_conv_diff = found_convergence(tf_val_acc)
# calculated the metric required
first_results = []
second_results = []
for key, value in search_results.items():
if "first" in key:
first_results.append(value)
elif "second" in key:
second_results.append(value)
else:
pass
pt_ave = float(statistics.mean(first_results))
tf_ave = float(statistics.mean(second_results))
average_res = abs(pt_ave - tf_ave)
### log everything
search_results['first_converged_bool'] = pt_conv
search_results['first_converged_average'] = pt_ave_conv_diff
search_results['second_converged_bool'] = tf_conv
search_results['second_converged_average'] = tf_ave_conv_diff
search_results['average_res'] = average_res
search_results['second_training_loss'] = tf_training_history
search_results['second_validation_loss'] = tf_val_loss
search_results['second_validation_acc'] = tf_val_acc
search_results['first_training_loss'] = pt_training_history
search_results['first_validation_loss'] = pt_val_loss
search_results['first_validation_acc'] = pt_val_acc
# log inidividual metrics to wanbd
for key, value in search_results.items():
wandb.log({key: value})
# log custom training and validation curve charts to wandb
data = [[x, y] for (x, y) in zip(list(range(len(pt_training_history))), pt_training_history)]
table = wandb.Table(data=data, columns=["epochs", "training_loss"])
wandb.log({"First Training Loss": wandb.plot.line(table, "epochs", "training_loss", title="First Training Loss")})
data = [[x, y] for (x, y) in zip(list(range(len(pt_val_loss))), pt_val_loss)]
table = wandb.Table(data=data, columns=["epochs", "validation loss"])
wandb.log({"First Validation Loss": wandb.plot.line(table, "epochs", "validation loss", title="First Validation Loss")})
data = [[x, y] for (x, y) in zip(list(range(len(pt_val_acc))), pt_val_acc)]
table = wandb.Table(data=data, columns=["epochs", "validation accuracy"])
wandb.log({"First Validation Accuracy": wandb.plot.line(table, "epochs", "validation accuracy",
title="First Validation Accuracy")})
data = [[x, y] for (x, y) in zip(list(range(len(tf_training_history))), tf_training_history)]
table = wandb.Table(data=data, columns=["epochs", "training_loss"])
wandb.log({"Second Training Loss": wandb.plot.line(table, "epochs", "training_loss", title="Second Training Loss")})
data = [[x, y] for (x, y) in zip(list(range(len(tf_val_loss))), tf_val_loss)]
table = wandb.Table(data=data, columns=["epochs", "validation loss"])
wandb.log({"Second Validation Loss": wandb.plot.line(table, "epochs", "validation loss", title="Second Validation Loss")})
data = [[x, y] for (x, y) in zip(list(range(len(tf_val_acc))), tf_val_acc)]
table = wandb.Table(data=data, columns=["epochs", "validation accuracy"])
wandb.log({"Second Validation Accuracy": wandb.plot.line(table, "epochs", "validation accuracy", title="Second Validation Accuracy")})
try:
tune.report(**search_results)
except:
print("Couldn't report Tune results. Continuing.")
pass
return search_results
def bitune_parse_arguments(args):
"""Parsing arguments specifically for bi tune experiments"""
global PT_MODEL, TF_MODEL, NUM_CLASSES, NO_FOOL, MNIST, TRIALS, MAX_DIFF, FASHION, DIFF_RESILIENCY
global ONLY_CPU, OPTIMIZE_MODE, MODEL_TYPE, MAXIMIZE_CONVERGENCE, MINIMIZE_VARIANCE, MODEL_FRAMEWORK
if not args.model:
print("NOTE: Defaulting to fashion dataset model training...")
args.model = "fashion"
NUM_CLASSES = 10
else:
if args.model == "caltech":
PT_MODEL = caltech_pytorch_alexnet.caltech_pt_objective
TF_MODEL = caltech_tensorflow_alexnet.fashion_tf_objective
NUM_CLASSES = 102
MODEL_TYPE = "caltech"
elif args.model == "cinic":
PT_MODEL = cinic_pytorch_alexnet.cinic_pt_objective
TF_MODEL = cinic_tensorflow_alexnet.cinic_tf_objective
NUM_CLASSES = 10
MODEL_TYPE = "cinic"
elif args.model == "rms_fashion":
PT_MODEL = rms_fashion_pytorch_alexnet.rms_fashion_pt_objective
TF_MODEL = rms_fashion_tensorflow_alexnet.rms_fashion_tf_objective
NUM_CLASSES = 10
MODEL_TYPE = "fashion"
else:
print("\n ERROR: Unknown model type. Please try again. "
"Must be one of: mnist, alexnet_cifar100, segmentation_cityscapes, or segmentation_gis.\n")
sys.exit()
if not args.trials:
print("NOTE: Defaulting to 25 trials per scikit opt space...")
else:
TRIALS = int(args.trials)
if args.maximize_difference:
MAX_DIFF = True
print("NOTE: Training using Max Diff approach")
if args.different_resiliency:
DIFF_RESILIENCY = True
print("NOTE: Training using Min Resiliency approach")
if args.only_cpu:
ONLY_CPU = True
if args.minimize_difference:
MAX_DIFF = True
OPTIMIZE_MODE = "min"
print("NOTE: Training using Min Diff Approach")
if args.maximize_convergence:
print("NOTE: Training using Max Convergence Approach")
MAXIMIZE_CONVERGENCE = True
OPTIMIZE_MODE = "min"
if args.minimize_variance:
print("NOTE: Training using Min Variance Approach")
MINIMIZE_VARIANCE = True
OPTIMIZE_MODE = "min"
if args.framework:
MODEL_FRAMEWORK = args.framework
if __name__ == "__main__":
parser = argparse.ArgumentParser("Start bi model tuning with hyperspace and resiliency testing, "
"specify output csv file name.")
parser.add_argument("-o", "--out", required=True)
parser.add_argument("-m", "--model")
parser.add_argument("-t", "--trials")
parser.add_argument("-j", "--json")
parser.add_argument('-d', "--maximize_difference", action="store_true")
parser.add_argument('-r', '--different_resiliency', action="store_true")
parser.add_argument('-n', '--start_space')
parser.add_argument('-c', '--only_cpu', action='store_true')
parser.add_argument('-p', '--project_name', default="hyper_sensitive")
parser.add_argument('-f', '--framework', default='pt')
parser.add_argument('--minimize_difference', action="store_true")
parser.add_argument('--maximize_convergence', action='store_true')
parser.add_argument('--minimize_variance', action="store_true")
args = parser.parse_args()
bitune_parse_arguments(args)
# print(PT_MODEL)
print(OPTIMIZE_MODE)
spaceray.run_experiment(args, double_train, ray_dir="/lus/theta-fs0/projects/CVD-Mol-AI/mzvyagin/raylogs", cpu=8,
start_space=int(args.start_space), mode=OPTIMIZE_MODE, project_name=args.project_name,
group_name='bi_tune')
| 46.387812
| 138
| 0.662188
|
8d9918e5c6b272431e825dc3516eb0a97370e791
| 3,840
|
py
|
Python
|
quantecon/tests/test_gridtools.py
|
JMFU/QuantEcon.py
|
2840efe51d207b4d58d875e5112f038c75b995ef
|
[
"BSD-3-Clause"
] | 1
|
2022-03-09T14:43:35.000Z
|
2022-03-09T14:43:35.000Z
|
quantecon/tests/test_gridtools.py
|
JMFU/QuantEcon.py
|
2840efe51d207b4d58d875e5112f038c75b995ef
|
[
"BSD-3-Clause"
] | null | null | null |
quantecon/tests/test_gridtools.py
|
JMFU/QuantEcon.py
|
2840efe51d207b4d58d875e5112f038c75b995ef
|
[
"BSD-3-Clause"
] | 3
|
2018-06-14T04:42:50.000Z
|
2021-10-09T18:59:08.000Z
|
"""
Author: Pablo Winant
Filename: test_cartesian.py
Tests for gridtools.py file
"""
from quantecon.gridtools import cartesian, _repeat_1d
def test_cartesian_C_order():
from numpy import linspace
x = linspace(0,9,10)
prod = cartesian([x,x,x])
correct = True
for i in range(999):
n = prod[i,0]*100+prod[i,1]*10+prod[i,2]
correct *= (i == n)
assert(correct)
def test_cartesian_C_order_int_float():
from numpy import arange, linspace
x_int = arange(10)
x_float = linspace(0,9,10)
prod_int = cartesian([x_int]*3)
prod_float = cartesian([x_float]*3)
assert(prod_int.dtype==x_int.dtype)
assert(prod_float.dtype==x_float.dtype)
assert( abs(prod_int-prod_float).max()==0)
def test_cartesian_F_order():
from numpy import linspace
x = linspace(0,9,10)
prod = cartesian([x,x,x], order='F')
correct = True
for i in range(999):
n = prod[i,2]*100+prod[i,1]*10+prod[i,0]
correct *= (i == n)
assert(correct)
def test_performance_C():
from numpy import linspace, column_stack, repeat, tile
import time
N_x = 1000
N_y = 7777
x = linspace(1,N_x,N_x)
y = linspace(1,N_y,N_y)
cartesian([x[:10],y[:10]]) # warmup
t1 = time.time()
for i in range(100):
prod = cartesian([x,y])
t2 = time.time()
# print(prod.shape)
# compute the same produce using numpy:
import numpy
t3 = time.time()
for i in range(100):
prod_numpy = column_stack([
repeat(x,N_y),
tile(y,N_x)
])
t4 = time.time()
print("Timings for 'cartesian' (C order)")
print("Cartesian: {}".format(t2-t1))
print("Numpy: {}".format(t4-t3))
assert(abs(prod-prod_numpy).max()==0)
def test_performance_F():
from numpy import linspace, column_stack, repeat, tile
import time
N_x = 1000
N_y = 7777
x = linspace(1,N_x,N_x)
y = linspace(1,N_y,N_y)
cartesian([x[:10],y[:10]]) # warmup
t1 = time.time()
for i in range(100):
prod = cartesian([x,y], order='F')
t2 = time.time()
# print(prod.shape)
# compute the same produce using numpy:
import numpy
t3 = time.time()
for i in range(100):
prod_numpy = column_stack([
tile(x,N_y),
repeat(y,N_x)
])
t4 = time.time()
print("Timings for 'cartesian'(Fortran order)")
print("Cartesian: {}".format(t2-t1))
print("Numpy: {}".format(t4-t3))
assert(abs(prod-prod_numpy).max()==0)
def test_mlinsplace():
from numpy import linspace
from quantecon.cartesian import mlinspace
grid1 = mlinspace([-1,-1],[2,3],[30,50])
grid2 = cartesian([linspace(-1,2,30), linspace(-1,3,50)])
def test_tile():
from numpy import linspace, tile, zeros
x = linspace(1,100, 100)
import time
t1 = time.time()
t_repeat = zeros(100*1000)
_repeat_1d(x,1,t_repeat)
t2 = time.time()
t3 = time.time()
t_numpy = tile(x, 1000)
t4 = time.time()
print("Timings for 'tile' operation")
print("Repeat_1d: {}".format(t2-t1))
print("Numpy: {}".format(t4-t3))
assert( abs(t_numpy-t_repeat).max())
def test_repeat():
from numpy import linspace, repeat, zeros
x = linspace(1,100 , 100)
import time
t1 = time.time()
t_repeat = zeros(100*1000)
_repeat_1d(x,1000,t_repeat)
t2 = time.time()
t3 = time.time()
t_numpy = repeat(x, 1000)
t4 = time.time()
print("Timings for 'repeat' operation")
print("Repeat_1d: {}".format(t2-t1))
print("Numpy: {}".format(t4-t3))
assert( abs(t_numpy-t_repeat).max())
if __name__ == '__main__':
test_cartesian_C_order()
test_cartesian_F_order()
test_performance_C()
test_performance_F()
test_tile()
test_repeat()
| 21.452514
| 61
| 0.600781
|
d59a6b6e80935273701c12dbac41589990d6743b
| 2,867
|
py
|
Python
|
checker/src/util.py
|
enowars/bambi-service-postit
|
8743161eb4454ed73094fde1789d77e704d5a3f1
|
[
"MIT"
] | null | null | null |
checker/src/util.py
|
enowars/bambi-service-postit
|
8743161eb4454ed73094fde1789d77e704d5a3f1
|
[
"MIT"
] | null | null | null |
checker/src/util.py
|
enowars/bambi-service-postit
|
8743161eb4454ed73094fde1789d77e704d5a3f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import math
import secrets
import string
import time
from base64 import b64encode
from logging import LoggerAdapter
from os import path
from typing import Any
leetconv = {
"O": "0",
"l": "1",
"I": "1",
"Z": "2",
"E": "3",
"A": "4",
"S": "5",
"G": "6",
"T": "7",
}
srcdir = path.dirname(path.abspath(__file__))
wordlist = open(f"{srcdir}/media/wordlist").read().replace(" ", "").split("\n")
names = [line for line in wordlist if line != ""]
rickroll = open(f"{srcdir}/media/rickroll.b64").read().replace("\n", "")
messages = [
"Remember: invite Paul to lan party",
"Shopping list: tomatoes and potatoes",
"This is not the flag hehe",
"🤓🤓🤓 Try Harder 🤓🤓🤓",
"Crypto makes me go 🥴🤢🤮",
"RSA, more like (R)eal (S)Bad (A)Crypto",
"🤡 The flag is in another castle! 🤡",
"🧠 solving crypto challenges with a calculator 🧠",
]
gplmsg = "You should have received a copy of the GNU \
General Public License along with this file; if not, \
write to the Free Software Foundation, Inc., 51 Franklin St, \
Fifth Floor, Boston, MA 02110-1301 USA"
def randint(low: int, high: int) -> int:
return low + secrets.randbelow(high - low + 1)
def randbool() -> bool:
return randint(0, 1) == 1
def randstr(n: int) -> str:
alphabet = string.ascii_letters + string.digits
chars = [secrets.choice(alphabet) for i in range(n)]
return "".join(chars)
def bytes2int(s: bytes) -> int:
return int.from_bytes(s, byteorder="big")
def int2bytes(i: int) -> bytes:
assert i >= 0
blen = 1 if i == 0 else math.ceil(math.log2(i) / 8)
return int.to_bytes(i, byteorder="big", length=blen)
def spongebobcase(name: str) -> str:
return "".join([(c.upper() if randbool() else c) for c in name])
def leetify(name: str) -> str:
if randbool():
name = spongebobcase(name)
return "".join([(leetconv[c] if c in leetconv else c) for c in name])
def gen_username() -> bytes:
msg = ""
while len(msg) < randint(10, 30):
part = secrets.choice(names)
if randbool():
part = leetify(part)
msg += part
username = msg + randstr(30)
return username.encode()
def gen_noise() -> bytes:
selection = randint(0, len(messages) + 2)
if selection == 0:
msg = randstr(randint(30, 60))
elif selection == 1:
msg = rickroll
elif selection == 2:
msg = gplmsg
else:
msg = secrets.choice(messages)
if randbool():
msg = b64encode(msg.encode()).decode()
return msg.encode()
async def timed(promise: Any, logger: LoggerAdapter, ctx: str) -> Any:
logger.debug("START: {}".format(ctx))
start = time.time()
result = await promise
end = time.time()
logger.debug("DONE: {} (took {:.3f} seconds)".format(ctx, end - start))
return result
| 25.828829
| 79
| 0.60586
|
6b9c221879a9d3d664f5efff6a254844d70da7d8
| 1,289
|
py
|
Python
|
GoogleAppEngineLauncher.py
|
CKallemeres/google-appengine-wx-launcher
|
ae6ca9a073e2f8ec5fa09178a8148e9e5519aa8d
|
[
"Apache-2.0"
] | 2
|
2015-02-03T10:30:38.000Z
|
2016-11-24T06:44:55.000Z
|
GoogleAppEngineLauncher.py
|
CKallemeres/google-appengine-wx-launcher
|
ae6ca9a073e2f8ec5fa09178a8148e9e5519aa8d
|
[
"Apache-2.0"
] | null | null | null |
GoogleAppEngineLauncher.py
|
CKallemeres/google-appengine-wx-launcher
|
ae6ca9a073e2f8ec5fa09178a8148e9e5519aa8d
|
[
"Apache-2.0"
] | 1
|
2015-12-13T18:19:21.000Z
|
2015-12-13T18:19:21.000Z
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting the launcher."""
import sys
try:
import wx
except ImportError, inst:
print >>sys.stderr, 'wxPython is not available'
sys.exit(1)
REQUIRED_WX_VERSION = (2,8)
CURRENT_WX_VERSION = wx.VERSION[:2]
if CURRENT_WX_VERSION != REQUIRED_WX_VERSION:
print >>sys.stderr, ('wxPython version incorrect; is %d.%d, must be %d.%d' %
(CURRENT_WX_VERSION + REQUIRED_WX_VERSION))
sys.exit(2)
import launcher
if __name__ == '__main__':
# For Linux/Mac, redirect=False gives me a stack trace on the command line.
# TODO(jrg): remove when not debugging
redirect = False
app = launcher.App(redirect=redirect)
app.MainLoop()
| 29.295455
| 78
| 0.724593
|
9b68be82f9eefbca5387d273a6833de76961e2b6
| 281
|
py
|
Python
|
268. Missing Number/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
268. Missing Number/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
268. Missing Number/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
class Solution:
def missingNumber(self, nums: List[int]) -> int:
ideal_sum = 0
actual_sum = 0
for i in range(0, len(nums)):
ideal_sum += i
actual_sum += nums[i]
ideal_sum += len(nums)
return ideal_sum - actual_sum
| 31.222222
| 52
| 0.544484
|
7f2186f8b69bbb0059f3822184d47029767e49ad
| 5,028
|
py
|
Python
|
tests/scripts/thread-cert/border_router/test_publish_meshcop_service.py
|
visuphi/openthread
|
a193a6f23a70fd2413538d251328eb027dcb5cf3
|
[
"BSD-3-Clause"
] | 5
|
2020-09-17T04:57:13.000Z
|
2021-04-26T12:41:53.000Z
|
tests/scripts/thread-cert/border_router/test_publish_meshcop_service.py
|
visuphi/openthread
|
a193a6f23a70fd2413538d251328eb027dcb5cf3
|
[
"BSD-3-Clause"
] | 7
|
2021-06-02T09:34:31.000Z
|
2022-02-10T09:33:33.000Z
|
tests/scripts/thread-cert/border_router/test_publish_meshcop_service.py
|
visuphi/openthread
|
a193a6f23a70fd2413538d251328eb027dcb5cf3
|
[
"BSD-3-Clause"
] | 11
|
2021-06-02T09:02:04.000Z
|
2022-03-12T06:26:25.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ipaddress
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies that OTBR publishes the meshcop service using a proper
# configuration.
#
# Topology:
# ----------------(eth)-----------------------------
# | | |
# BR1 BR2 HOST (mDNS Browser)
#
#
BR1 = 1
BR2 = 2
HOST = 3
class PublishMeshCopService(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'BR_1',
'allowlist': [],
'is_otbr': True,
'version': '1.2',
},
BR2: {
'name': 'BR_2',
'allowlist': [],
'is_otbr': True,
'version': '1.2',
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
host = self.nodes[HOST]
br1 = self.nodes[BR1]
br2 = self.nodes[BR2]
br2.disable_br()
# TODO: verify the behavior when thread is disabled
host.bash('service otbr-agent stop')
host.start(start_radvd=False)
self.simulator.go(10)
br1.start()
self.simulator.go(10)
self.assertEqual('leader', br1.get_state())
self.check_meshcop_service(br1, host)
br1.disable_backbone_router()
self.simulator.go(10)
self.check_meshcop_service(br1, host)
# verify that there are two meshcop services
br2.start()
br2.disable_backbone_router()
br2.enable_br()
self.simulator.go(10)
service_instances = host.browse_mdns_services('_meshcop._udp')
self.assertEqual(len(service_instances), 2)
br1_service = self.check_meshcop_service(br1, host, 'OpenThread_BorderRouter')
for instance in service_instances:
if instance != 'OpenThread_BorderRouter':
br2_service = self.check_meshcop_service(br2, host, instance)
self.assertNotEqual(br1_service['host'], br2_service['host'])
def check_meshcop_service(self, br, host, instance_name='OpenThread_BorderRouter'):
data = host.discover_mdns_service(instance_name, '_meshcop._udp', None)
sb_data = data['txt']['sb'].encode('raw_unicode_escape')
state_bitmap = int.from_bytes(sb_data, byteorder='big')
logging.info(bin(state_bitmap))
self.assertEqual((state_bitmap & 7), 1) # connection mode = PskC
if br.get_state() == 'disabled':
self.assertEqual((state_bitmap >> 3 & 3), 0) # Thread is disabled
elif br.get_state() == 'detached':
self.assertEqual((state_bitmap >> 3 & 3), 1) # Thread is detached
else:
self.assertEqual((state_bitmap >> 3 & 3), 2) # Thread is attached
self.assertEqual((state_bitmap >> 5 & 3), 1) # high availability
self.assertEqual((state_bitmap >> 7 & 1),
br.get_backbone_router_state() != 'Disabled') # BBR is enabled or not
self.assertEqual((state_bitmap >> 8 & 1), br.get_backbone_router_state() == 'Primary') # BBR is primary or not
self.assertEqual(data['txt']['nn'], br.get_network_name())
self.assertEqual(data['txt']['rv'], '1')
self.assertIn(data['txt']['tv'], ['1.1.0', '1.1.1', '1.2.0'])
return data
if __name__ == '__main__':
unittest.main()
| 38.090909
| 119
| 0.636635
|
36997c43feccc1b026253b8db8ce58c662a8ed1d
| 6,250
|
py
|
Python
|
frappe/permissions.py
|
gangadharkadam/office_frappe
|
38ea6c9c68bf61b71b7af1d28b83d0253dd0041f
|
[
"MIT"
] | null | null | null |
frappe/permissions.py
|
gangadharkadam/office_frappe
|
38ea6c9c68bf61b71b7af1d28b83d0253dd0041f
|
[
"MIT"
] | null | null | null |
frappe/permissions.py
|
gangadharkadam/office_frappe
|
38ea6c9c68bf61b71b7af1d28b83d0253dd0041f
|
[
"MIT"
] | 1
|
2018-03-21T18:39:06.000Z
|
2018-03-21T18:39:06.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, copy
from frappe import _, msgprint
from frappe.utils import cint
rights = ("read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def has_permission(doctype, ptype="read", doc=None, verbose=True, user=None):
"""check if user has permission"""
if not user: user = frappe.session.user
if frappe.is_table(doctype):
return True
meta = frappe.get_meta(doctype)
if ptype=="submit" and not cint(meta.is_submittable):
return False
if ptype=="import" and not cint(meta.allow_import):
return False
if user=="Administrator":
return True
role_permissions = get_role_permissions(meta, user=user)
if not role_permissions.get(ptype):
return False
if doc:
if isinstance(doc, basestring):
doc = frappe.get_doc(meta.name, doc)
if role_permissions["apply_user_permissions"].get(ptype):
if not user_has_permission(doc, verbose=verbose, user=user):
return False
if not has_controller_permissions(doc, ptype, user=user):
return False
return True
def get_doc_permissions(doc, verbose=False, user=None):
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype):
return {"read":1, "write":1}
meta = frappe.get_meta(doc.doctype)
role_permissions = copy.deepcopy(get_role_permissions(meta, user=user))
if not cint(meta.is_submittable):
role_permissions["submit"] = 0
if not cint(meta.allow_import):
role_permissions["import"] = 0
if not user_has_permission(doc, verbose=verbose, user=user):
# no user permissions, switch off all user-level permissions
for ptype in role_permissions:
if role_permissions["apply_user_permissions"].get(ptype):
role_permissions[ptype] = 0
return role_permissions
def get_role_permissions(meta, user=None):
if not user: user = frappe.session.user
cache_key = (meta.name, user)
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict({ "apply_user_permissions": {} })
user_roles = frappe.get_roles(user)
for p in meta.permissions:
if cint(p.permlevel)==0 and (p.role in user_roles):
for ptype in rights:
perms[ptype] = perms.get(ptype, 0) or cint(p.get(ptype))
if ptype != "set_user_permissions" and p.get(ptype):
perms["apply_user_permissions"][ptype] = perms["apply_user_permissions"].get(ptype, 1) and p.get("apply_user_permissions")
for key, value in perms.get("apply_user_permissions").items():
if not value:
del perms["apply_user_permissions"][key]
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def user_has_permission(doc, verbose=True, user=None):
from frappe.defaults import get_user_permissions
user_permissions = get_user_permissions(user)
user_permissions_keys = user_permissions.keys()
def check_user_permission(d):
result = True
meta = frappe.get_meta(d.get("doctype"))
for df in meta.get_fields_to_check_permissions(user_permissions_keys):
if d.get(df.fieldname) and d.get(df.fieldname) not in user_permissions[df.options]:
result = False
if verbose:
msg = _("Not allowed to access {0} with {1} = {2}").format(df.options, _(df.label), d.get(df.fieldname))
if d.parentfield:
msg = "{doctype}, {row} #{idx}, ".format(doctype=_(d.doctype),
row=_("Row"), idx=d.idx) + msg
msgprint(msg)
return result
_user_has_permission = check_user_permission(doc)
for d in doc.get_all_children():
_user_has_permission = check_user_permission(d) and _user_has_permission
return _user_has_permission
def has_controller_permissions(doc, ptype, user=None):
if not user: user = frappe.session.user
for method in frappe.get_hooks("has_permission").get(doc.doctype, []):
if not frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user):
return False
return True
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user, with_message)
def add_user_permission(doctype, name, user, with_message=False):
if name not in frappe.defaults.get_user_permissions(user).get(doctype, []):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.defaults.add_default(doctype, name, user, "User Permission")
elif with_message:
msgprint(_("Permission already set"))
def remove_user_permission(doctype, name, user, default_value_name=None):
frappe.defaults.clear_default(key=doctype, value=name, parent=user, parenttype="User Permission",
name=default_value_name)
def clear_user_permissions_for_doctype(doctype):
frappe.defaults.clear_default(parenttype="User Permission", key=doctype)
def can_import(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "import")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to import: {doctype}".format(doctype=doctype))
else:
return False
return True
def can_export(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "export")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to export: {doctype}".format(doctype=doctype))
else:
return False
return True
| 32.894737
| 128
| 0.7496
|
00f242f67158ae93e2e72316530265383bb364b3
| 1,022
|
py
|
Python
|
var/spack/repos/builtin/packages/pdf2svg/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/pdf2svg/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2019-07-30T10:12:28.000Z
|
2019-12-17T09:02:27.000Z
|
var/spack/repos/builtin/packages/pdf2svg/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5
|
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pdf2svg(AutotoolsPackage):
"""A simple PDF to SVG converter using the Poppler and Cairo libraries."""
homepage = "http://www.cityinthesky.co.uk/opensource/pdf2svg"
url = "https://github.com/dawbarton/pdf2svg/archive/v0.2.3.tar.gz"
version('0.2.3', sha256='4fb186070b3e7d33a51821e3307dce57300a062570d028feccd4e628d50dea8a')
version('0.2.2', sha256='e5f1d9b78821e44cd85379fb07f38a42f00bb2bde3743b95301ff8c0a5ae229a')
depends_on('pkgconfig@0.9.0:', type='build')
depends_on('cairo@1.2.6:')
depends_on('poppler@0.5.4:+glib')
# Note: the latest version of poppler requires glib 2.41+,
# but pdf2svg uses g_type_init, which is deprecated in glib 2.36+.
# At some point, we will need to force pdf2svg to use older
# versions of poppler and glib.
| 39.307692
| 95
| 0.73092
|
1ff5b8b8ee7884b5c4ad7b751efc86f5bd5d8d36
| 32,114
|
py
|
Python
|
tests/test_reusable_executor.py
|
mgorny/loky
|
6245bb2f7c16e283ced3d2ff25a9c9cc439a8867
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_reusable_executor.py
|
mgorny/loky
|
6245bb2f7c16e283ced3d2ff25a9c9cc439a8867
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_reusable_executor.py
|
mgorny/loky
|
6245bb2f7c16e283ced3d2ff25a9c9cc439a8867
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import gc
import ctypes
import psutil
import pytest
import warnings
import threading
from time import sleep
from multiprocessing import util, current_process
from pickle import PicklingError, UnpicklingError
from distutils.version import LooseVersion
import loky
from loky import cpu_count
from loky import get_reusable_executor
from loky.process_executor import _RemoteTraceback, TerminatedWorkerError
from loky.process_executor import BrokenProcessPool, ShutdownExecutorError
from loky.reusable_executor import _ReusablePoolExecutor
import cloudpickle
from ._executor_mixin import ReusableExecutorMixin
from .utils import TimingWrapper, id_sleep, check_python_subprocess_call
from .utils import filter_match
cloudpickle_version = LooseVersion(cloudpickle.__version__)
# Compat windows
if sys.platform == "win32":
from signal import SIGTERM as SIGKILL
libc = ctypes.cdll.msvcrt
else:
from signal import SIGKILL
from ctypes.util import find_library
libc = ctypes.CDLL(find_library("libc"))
try:
import numpy as np
except ImportError:
np = None
# Backward compat for python2 cPickle module
PICKLING_ERRORS = (PicklingError,)
try:
import cPickle
PICKLING_ERRORS += (cPickle.PicklingError,)
except ImportError:
pass
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, reg):
getattr(mod, reg).clear()
def wait_dead(worker, n_tries=1000, delay=0.001):
"""Wait for process pid to die"""
for i in range(n_tries):
if worker.exitcode is not None:
return
sleep(delay)
raise RuntimeError("Process %d failed to die for at least %0.3fs" %
(worker.pid, delay * n_tries))
def crash():
"""Induces a segfault"""
import faulthandler
faulthandler._sigsegv()
def exit():
"""Induces a sys exit with exitcode 0"""
sys.exit(0)
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode)
def sleep_then_check_pids_exist(arg):
"""Sleep for some time and the check if all the passed pids exist"""
time, pids = arg
sleep(time)
res = True
for p in pids:
res &= psutil.pid_exists(p)
return res
def kill_friend(pid, delay=0):
"""Function that send SIGKILL at process pid"""
sleep(delay)
try:
os.kill(pid, SIGKILL)
except (PermissionError, ProcessLookupError) as e:
if psutil.pid_exists(pid):
util.debug("Fail to kill an alive process?!?")
raise e
util.debug("process {} was already dead".format(pid))
def raise_error(etype=UnpicklingError, message=None):
"""Function that raises an Exception in process"""
raise etype(message)
def return_instance(cls):
"""Function that returns a instance of cls"""
return cls()
class SayWhenError(ValueError):
pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
def do_nothing(arg):
"""Function that return True, test passing argument"""
return True
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return exit, ()
class CExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
c_exit()
class CExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return c_exit, ()
class ErrorAtPickle(object):
"""Bad object that raises an error at pickling time."""
def __init__(self, fail=True):
self.fail = fail
def __reduce__(self):
if self.fail:
raise PicklingError("Error in pickle")
else:
return id, (42, )
class ErrorAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __init__(self, etype=UnpicklingError, message='the error message'):
self.etype = etype
self.message = message
def __reduce__(self):
return raise_error, (self.etype, self.message)
class CrashAtGCInWorker(object):
"""Bad object that triggers a segfault at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
crash()
class CExitAtGCInWorker(object):
"""Exit worker at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
c_exit()
class TestExecutorDeadLock(ReusableExecutorMixin):
crash_cases = [
# Check problem occuring while pickling a task in
(id, (ExitAtPickle(),), PicklingError, None),
(id, (ErrorAtPickle(),), PicklingError, None),
# Check problem occuring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool, r"SystemExit"),
(id, (CExitAtUnpickle(),), TerminatedWorkerError, r"EXIT\(0\)"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool, r"UnpicklingError"),
(id, (CrashAtUnpickle(),), TerminatedWorkerError, r"SIGSEGV"),
# Check problem occuring during function execution on workers
(crash, (), TerminatedWorkerError, r"SIGSEGV"),
(exit, (), SystemExit, None),
(c_exit, (), TerminatedWorkerError, r"EXIT\(0\)"),
(raise_error, (RuntimeError,), RuntimeError, None),
# Check problem occuring while pickling a task result
# on workers
(return_instance, (CrashAtPickle,), TerminatedWorkerError, r"SIGSEGV"),
(return_instance, (ExitAtPickle,), SystemExit, None),
(return_instance, (CExitAtPickle,), TerminatedWorkerError,
r"EXIT\(0\)"),
(return_instance, (ErrorAtPickle,), PicklingError, None),
# Check problem occuring while unpickling a task in
# the result_handler thread
(return_instance, (ExitAtUnpickle,), BrokenProcessPool, r"SystemExit"),
(return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
r"UnpicklingError"),
]
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_crashes(self, func, args, expected_err, match):
"""Test various reusable_executor crash handling"""
executor = get_reusable_executor(max_workers=2)
res = executor.submit(func, *args)
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
res.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_in_callback_submit_with_crash(self, func, args, expected_err,
match):
"""Test the recovery from callback crash"""
executor = get_reusable_executor(max_workers=2, timeout=12)
def in_callback_submit(future):
future2 = get_reusable_executor(
max_workers=2, timeout=12).submit(func, *args)
# Store the future of the job submitted in the callback to make it
# easy to introspect.
future.callback_future = future2
future.callback_done.set()
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.callback_done = threading.Event()
f.add_done_callback(in_callback_submit)
assert f.result() == 42
if not f.callback_done.wait(timeout=3):
raise AssertionError('callback not done before timeout')
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
f.callback_future.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
def test_callback_crash_on_submit(self):
"""Errors in the callback execution directly in queue manager thread.
This case can break the process executor and we want to make sure
that we can detect the issue and recover by calling
get_reusable_executor.
"""
executor = get_reusable_executor(max_workers=2)
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: exit())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.1).result() == 42
executor = get_reusable_executor(max_workers=2)
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: raise_error())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.).result() == 42
def test_deadlock_kill(self):
"""Test deadlock recovery for reusable_executor"""
executor = get_reusable_executor(max_workers=1, timeout=None)
# trigger the spawning of the worker process
executor.submit(sleep, 0.1)
worker = next(iter(executor._processes.values()))
with pytest.warns(UserWarning) as recorded_warnings:
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
os.kill(worker.pid, SIGKILL)
wait_dead(worker)
# wait for the executor to be able to detect the issue and set itself
# in broken state:
sleep(.5)
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
executor.submit(id_sleep, 42, 0.1).result()
# the get_reusable_executor factory should be able to create a new
# working instance
executor = get_reusable_executor(max_workers=2, timeout=None)
assert executor.submit(id_sleep, 42, 0.).result() == 42
@pytest.mark.parametrize("n_proc", [1, 2, 5, 13])
def test_crash_races(self, n_proc):
"""Test the race conditions in reusable_executor crash handling"""
if (sys.platform == 'win32' and sys.version_info >= (3, 8)
and n_proc > 5):
pytest.skip(
"On win32, the paging size can be too small to import numpy "
"multiple times in the sub-processes (imported when loading "
"this file). Skipping while no better solution is found. See "
"https://github.com/joblib/loky/issues/279 for more details."
)
# Test for external crash signal comming from neighbor
# with various race setup
executor = get_reusable_executor(max_workers=n_proc, timeout=None)
executor.map(id, range(n_proc)) # trigger the creation of the workers
pids = list(executor._processes.keys())
assert len(pids) == n_proc
assert None not in pids
res = executor.map(sleep_then_check_pids_exist,
[(.0001 * (j // 2), pids)
for j in range(2 * n_proc)])
assert all(list(res))
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
res = executor.map(kill_friend, pids[::-1])
list(res)
def test_imap_handle_iterable_exception(self):
# The catch of the errors in imap generation depend on the
# builded version of python
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(10, 3),
chunksize=1)
# SayWhenError seen at start of problematic chunk's results
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=2)
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=4)
def test_queue_full_deadlock(self):
executor = get_reusable_executor(max_workers=1)
fs_fail = [executor.submit(do_nothing, ErrorAtPickle(True))
for i in range(100)]
fs = [executor.submit(do_nothing, ErrorAtPickle(False))
for i in range(100)]
with pytest.raises(PicklingError):
fs_fail[99].result()
assert fs[99].result()
def test_informative_error_when_fail_at_unpickle(self):
executor = get_reusable_executor(max_workers=2)
obj = ErrorAtUnpickle(RuntimeError, 'message raised in child')
f = executor.submit(id, obj)
with pytest.raises(BrokenProcessPool) as exc_info:
f.result()
assert 'RuntimeError' in str(exc_info.value.__cause__)
assert 'message raised in child' in str(exc_info.value.__cause__)
@pytest.mark.skipif(np is None, reason="requires numpy")
def test_numpy_dot_parent_and_child_no_freeze(self):
"""Test that no freeze happens in child process when numpy's thread
pool is started in the parent.
"""
a = np.random.randn(1000, 1000)
np.dot(a, a) # trigger the thread pool init in the parent process
executor = get_reusable_executor(max_workers=2)
executor.submit(np.dot, a, a).result()
executor.shutdown(wait=True)
class TestTerminateExecutor(ReusableExecutorMixin):
def test_shutdown_kill(self):
"""Test reusable_executor termination handling"""
from itertools import repeat
executor = get_reusable_executor(max_workers=5)
res1 = executor.map(id_sleep, range(100), repeat(.001))
res2 = executor.map(id_sleep, range(100), repeat(1))
assert list(res1) == list(range(100))
shutdown = TimingWrapper(executor.shutdown)
shutdown(wait=True, kill_workers=True)
assert shutdown.elapsed < 5
# We should get an error as the executor shutdowned before we fetched
# all the results from the long running operation.
with pytest.raises(ShutdownExecutorError):
list(res2)
def test_shutdown_deadlock(self):
"""Test recovery if killed after resize call"""
# Test the executor.shutdown call do not cause deadlock
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # start the worker processes
executor.submit(kill_friend, (next(iter(executor._processes.keys())),
.0))
sleep(.01)
executor.shutdown(wait=True)
def test_kill_workers_on_new_options(self):
# submit a long running job with no timeout
executor = get_reusable_executor(max_workers=2, timeout=None)
f = executor.submit(sleep, 10000)
# change the constructor parameter while requesting not to wait
# for the long running task to complete (the workers will get
# shutdown forcibly)
executor = get_reusable_executor(max_workers=2, timeout=5,
kill_workers=True)
with pytest.raises(ShutdownExecutorError):
f.result()
f2 = executor.submit(id_sleep, 42, 0)
assert f2.result() == 42
@pytest.mark.parametrize("bad_object, match", [
(CrashAtGCInWorker, r"SIGSEGV"), (CExitAtGCInWorker, r"EXIT\(0\)")])
def test_call_item_gc_crash_or_exit(self, bad_object, match):
executor = get_reusable_executor(max_workers=1)
bad_object = bad_object()
f = executor.submit(id, bad_object)
# The worker will successfully send back its result to the master
# process before crashing so this future can always be collected:
assert f.result() is not None
# The executor should automatically detect that the worker has crashed
# when processing subsequently dispatched tasks:
with pytest.raises(TerminatedWorkerError, match=filter_match(match)):
executor.submit(gc.collect).result()
for r in executor.map(sleep, [.1] * 100):
pass
class TestResizeExecutor(ReusableExecutorMixin):
def test_reusable_executor_resize(self):
"""Test reusable_executor resizing"""
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2))
# Decreasing the executor should drop a single process and keep one of
# the old one as it is still in a good shape. The resize should not
# occur while there are on going works.
pids = list(executor._processes.keys())
res1 = executor.submit(sleep_then_check_pids_exist, (.3, pids))
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(w) == 1
expected_msg = "Trying to resize an executor with running jobs"
assert expected_msg in str(w[0].message)
assert res1.result(), ("Resize should wait for current processes "
" to finish")
assert len(executor._processes) == 1
assert next(iter(executor._processes.keys())) in pids
# Requesting the same number of process should not impact the executor
# nor kill the processed
old_pid = next(iter((executor._processes.keys())))
unchanged_executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(unchanged_executor._processes) == 1
assert unchanged_executor is executor
assert next(iter(unchanged_executor._processes.keys())) == old_pid
# Growing the executor again should add a single process and keep the
# old one as it is still in a good shape
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(executor._processes) == 2
assert old_pid in list(executor._processes.keys())
@pytest.mark.parametrize("reuse", [True, False])
@pytest.mark.parametrize("kill_workers", [True, False])
def test_reusable_executor_resize_many_times(self, kill_workers, reuse):
# Tentative non-regression test for a deadlock when shutting down
# the workers of an executor prior to resizing it.
kwargs = {
'timeout': None,
'kill_workers': kill_workers,
'reuse': reuse,
}
with warnings.catch_warnings(record=True):
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
for size in [12, 2, 1, 12, 6, 1, 8, 5]:
executor = get_reusable_executor(max_workers=size, **kwargs)
executor.map(sleep, [0.01] * 6)
# Do not wait for the tasks to complete.
executor.shutdown()
def test_kill_after_resize_call(self):
"""Test recovery if killed after resize call"""
# Test the executor resizing called before a kill arrive
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # trigger the creation of worker processes
pid = next(iter(executor._processes.keys()))
executor.submit(kill_friend, (pid, .1))
with pytest.warns(UserWarning) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
assert executor.submit(id_sleep, 42, 0.).result() == 42
executor.shutdown()
def test_resize_after_timeout(self):
with warnings.catch_warnings(record=True) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=8, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
if len(recorded_warnings) > 1:
expected_msg = 'A worker stopped'
assert expected_msg in recorded_warnings[0].message.args[0]
class TestGetReusableExecutor(ReusableExecutorMixin):
def test_invalid_process_number(self):
"""Raise error on invalid process number"""
with pytest.raises(ValueError):
get_reusable_executor(max_workers=0)
with pytest.raises(ValueError):
get_reusable_executor(max_workers=-1)
executor = get_reusable_executor()
with pytest.raises(ValueError):
executor._resize(max_workers=None)
@pytest.mark.skipif(sys.platform == "win32", reason="No fork on windows")
@pytest.mark.skipif(sys.version_info <= (3, 4),
reason="No context before 3.4")
def test_invalid_context(self):
"""Raise error on invalid context"""
with pytest.warns(UserWarning):
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context="fork")
def test_pass_start_method_name_as_context(self):
executor = get_reusable_executor(max_workers=2, context='loky')
assert executor.submit(id, 42).result() >= 0
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context='bad_start_method')
def test_interactively_defined_executor_no_main(self):
# check that the init_main_module parameter works properly
# when using -c option, we don't need the safeguard if __name__ ..
# and thus test LokyProcess without the extra argument. For running
# a script, it is necessary to use init_main_module=False.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor()
e.submit(id, 42).result()
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_reused_flag(self):
executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert reused
executor.shutdown(kill_workers=True)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert not reused
@pytest.mark.xfail(cloudpickle_version >= LooseVersion("0.5.4") and
cloudpickle_version <= LooseVersion("0.7.0"),
reason="Known issue in cloudpickle")
# https://github.com/cloudpipe/cloudpickle/pull/240
def test_interactively_defined_nested_functions(self):
# Check that it's possible to call nested interactively defined
# functions and furthermore that changing the code interactively
# is taken into account by the single worker process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
# Test that it's possible to call interactively defined, nested
# functions:
def inner_func(x):
return -x
def outer_func(x):
return inner_func(x)
assert e.submit(outer_func, 1).result() == outer_func(1) == -1
# Test that changes to the definition of the inner function are
# taken into account in subsequent calls to the outer function.
def inner_func(x):
return x
assert e.submit(outer_func, 1).result() == outer_func(1) == 1
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_interactively_defined_recursive_functions(self):
# Check that it's possible to call a recursive function defined
# in a closure.
# Also check that calling several function that stems from the same
# factory with different closure states results in the expected result:
# the function definitions should not collapse in the single worker
# process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
def make_func(seed):
def func(x):
if x <= 0:
return seed
return func(x - 1) + 1
return func
func = make_func(0)
assert e.submit(func, 5).result() == func(5) == 5
func = make_func(1)
assert e.submit(func, 5).result() == func(5) == 6
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_compat_with_concurrent_futures_exception(self):
# It should be possible to use a loky process pool executor as a dropin
# replacement for a ProcessPoolExecutor, including when catching
# exceptions:
concurrent = pytest.importorskip('concurrent')
from concurrent.futures.process import BrokenProcessPool as BPPExc
with pytest.raises(BPPExc):
get_reusable_executor(max_workers=2).submit(crash).result()
e = get_reusable_executor(max_workers=2)
f = e.submit(id, 42)
# Ensure that loky.Future are compatible with concurrent.futures
# (see #155)
assert isinstance(f, concurrent.futures.Future)
(done, running) = concurrent.futures.wait([f], timeout=15)
assert len(running) == 0
thread_configurations = [
('constant', 'clean_start'),
('constant', 'broken_start'),
('varying', 'clean_start'),
('varying', 'broken_start'),
]
@pytest.mark.parametrize("workers, executor_state", thread_configurations)
def test_reusable_executor_thread_safety(self, workers, executor_state):
if executor_state == 'clean_start':
# Create a new shared executor and ensures that it's workers are
# ready:
get_reusable_executor(reuse=False).submit(id, 42).result()
else:
# Break the shared executor before launching the threads:
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGSEGV")):
executor = get_reusable_executor(reuse=False)
executor.submit(return_instance, CrashAtPickle).result()
def helper_func(output_collector, max_workers=2, n_outer_steps=5,
n_inner_steps=10):
with warnings.catch_warnings(): # ignore resize warnings
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=max_workers)
for i in range(n_outer_steps):
results = executor.map(
lambda x: x ** 2, range(n_inner_steps))
expected_result = [x ** 2 for x in range(n_inner_steps)]
assert list(results) == expected_result
output_collector.append('ok')
if workers == 'constant':
max_workers = [2] * 10
else:
max_workers = [(i % 4) + 1 for i in range(10)]
# Use the same executor with the same number of workers concurrently
# in different threads:
output_collector = []
threads = [threading.Thread(
target=helper_func, args=(output_collector, w),
name='test_thread_%02d_max_workers_%d' % (i, w))
for i, w in enumerate(max_workers)]
with warnings.catch_warnings(record=True):
for t in threads:
t.start()
for t in threads:
t.join()
assert output_collector == ['ok'] * len(threads)
def test_reusable_executor_reuse_true(self):
executor = get_reusable_executor(max_workers=3, timeout=42)
executor.submit(id, 42).result()
assert len(executor._processes) == 3
assert executor._timeout == 42
executor2 = get_reusable_executor(reuse=True)
executor2.submit(id, 42).result()
assert len(executor2._processes) == 3
assert executor2._timeout == 42
assert executor2 is executor
executor3 = get_reusable_executor()
executor3.submit(id, 42).result()
assert len(executor3._processes) == cpu_count()
assert executor3._timeout == 10
assert executor3 is not executor
executor4 = get_reusable_executor()
assert executor4 is executor3
class TestExecutorInitializer(ReusableExecutorMixin):
def _initializer(self, x):
loky._initialized_state = x
def _test_initializer(self, delay=0):
sleep(delay)
return getattr(loky, "_initialized_state", "uninitialized")
def test_reusable_initializer(self):
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=('done',))
assert executor.submit(self._test_initializer).result() == 'done'
# when the initializer change, the executor is re-spawned
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=(42,))
assert executor.submit(self._test_initializer).result() == 42
# With reuse=True, the executor use the same initializer
executor = get_reusable_executor(max_workers=4, reuse=True)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 42
# With reuse='auto', the initializer is not used anymore
executor = get_reusable_executor(max_workers=4)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 'uninitialized'
| 38.691566
| 79
| 0.641153
|
f318e07fec7dd9ba97163d5eed51a456e10e0d9c
| 2,316
|
py
|
Python
|
facebook_business/adobjects/adruleexecutionspec.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/adruleexecutionspec.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/adruleexecutionspec.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | 1
|
2020-07-27T16:34:58.000Z
|
2020-07-27T16:34:58.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdRuleExecutionSpec(
AbstractObject,
):
def __init__(self, api=None):
super(AdRuleExecutionSpec, self).__init__()
self._isAdRuleExecutionSpec = True
self._api = api
class Field(AbstractObject.Field):
execution_options = 'execution_options'
execution_type = 'execution_type'
class ExecutionType:
change_bid = 'CHANGE_BID'
change_budget = 'CHANGE_BUDGET'
notification = 'NOTIFICATION'
pause = 'PAUSE'
ping_endpoint = 'PING_ENDPOINT'
rebalance_budget = 'REBALANCE_BUDGET'
rotate = 'ROTATE'
unpause = 'UNPAUSE'
_field_types = {
'execution_options': 'list<AdRuleExecutionOptions>',
'execution_type': 'ExecutionType',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ExecutionType'] = AdRuleExecutionSpec.ExecutionType.__dict__.values()
return field_enum_info
| 35.630769
| 94
| 0.729275
|
88abbe347f91635197df385aa2d9642d30513489
| 397
|
py
|
Python
|
appengine/handlers/deferred.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 6
|
2018-07-31T16:48:07.000Z
|
2020-02-01T03:17:51.000Z
|
appengine/handlers/deferred.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 41
|
2018-08-07T16:43:07.000Z
|
2020-06-05T18:54:50.000Z
|
appengine/handlers/deferred.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 1
|
2018-08-07T16:40:18.000Z
|
2018-08-07T16:40:18.000Z
|
from __future__ import absolute_import
import os
import fix_paths
from greenday_core.utils import get_settings_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE", get_settings_name())
import django
django.setup()
from deferred_manager.handler import application as deferred_manager_app
from .middleware import django_setup_teardown
application = django_setup_teardown(deferred_manager_app)
| 23.352941
| 72
| 0.86398
|
57edad9988a7149327a9f469e1acb5fb20fcbddd
| 14,267
|
py
|
Python
|
fuse/eval/metrics/libs/classification.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
fuse/eval/metrics/libs/classification.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
fuse/eval/metrics/libs/classification.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import pandas as pd
import numpy as np
from sklearn import metrics
import sklearn
import matplotlib.pyplot as plt
class MetricsLibClass:
@staticmethod
def auc_roc(pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
pos_class_index: int = -1,
max_fpr: Optional[float] = None) -> float:
"""
Compute auc roc (Receiver operating characteristic) score using sklearn (one vs rest)
:param pred: prediction array per sample. Each element shape [num_classes]
:param target: target per sample. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted auc. Each element is float in range [0-1]
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:param max_fpr: float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC over the range [0, max_fpr] is returned.
:return auc Receiver operating characteristic score
"""
if not isinstance(pred[0], np.ndarray):
pred = [np.array(p) for p in pred]
pos_class_index = 1
y_score = np.asarray(pred)
else:
if pos_class_index < 0:
pos_class_index = pred[0].shape[0] - 1
y_score = np.asarray(pred)[:, pos_class_index]
return metrics.roc_auc_score(y_score=y_score,
y_true=np.asarray(target) == pos_class_index,
sample_weight=sample_weight,
max_fpr=max_fpr)
@staticmethod
def roc_curve(
pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
class_names: Sequence[str],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
output_filename: Optional[str] = None) -> Dict:
"""
Multi class version for roc curve
:param pred: List of arrays of shape [NUM_CLASSES]
:param target: List of arrays specifying the target class per sample
:return: saving roc curve to a file and return the input to figure to a dictionary
"""
# if class_names not specified assume binary classification
if class_names is None:
class_names = [None, "Positive"]
# extract info for the plot
results = {}
for cls, cls_name in enumerate(class_names):
if cls_name is None:
continue
fpr, tpr, _ = sklearn.metrics.roc_curve(target, np.array(pred)[:, cls], sample_weight=sample_weight, pos_label=cls)
auc = sklearn.metrics.auc(fpr, tpr)
results[cls_name] = {"fpr": fpr, "tpr": tpr, "auc": auc}
# display
if output_filename is not None:
for cls_name, cls_res in results.items():
plt.plot(cls_res["fpr"], cls_res["tpr"], label=f'{cls_name}(auc={cls_res["auc"]:0.2f})')
plt.title("ROC curve")
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
plt.savefig(output_filename)
plt.close()
return results
@staticmethod
def auc_pr(pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
pos_class_index: int = -1) -> float:
"""
Compute auc pr (precision-recall) score using sklearn (one vs rest)
:param pred: prediction array per sample. Each element shape [num_classes]
:param target: target per sample. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted auc. Each element is float in range [0-1]
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:return auc precision recall score
"""
if not isinstance(pred[0], np.ndarray):
pred = [np.array(p) for p in pred]
pos_class_index = 1
y_score = np.asarray(pred)
else:
if pos_class_index < 0:
pos_class_index = pred[0].shape[0] - 1
y_score = np.asarray(pred)[:, pos_class_index]
precision, recall, _ = metrics.precision_recall_curve(probas_pred=y_score,
y_true=np.asarray(target) == pos_class_index,
sample_weight=sample_weight)
return metrics.auc(recall, precision)
@staticmethod
def accuracy(pred: Sequence[Union[np.ndarray, int]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None):
"""
Compute accuracy score
:param pred: class prediction. Each element is an integer in range [0 - num_classes)
:param target: the target class. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted score. Each element is float in range [0-1]
:return: accuracy score
"""
pred = np.array(pred)
target = np.array(target)
return metrics.accuracy_score(target, pred, sample_weight=sample_weight)
@staticmethod
def confusion_metrics(pred: Sequence[Union[np.ndarray, int]],
target: Sequence[Union[np.ndarray, int]],
pos_class_index: int = 1,
metrics:Sequence[str] = tuple(),
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None) -> Dict[str, float]:
"""
Compute metrics derived from one-vs-rest confusion matrix such as 'sensitivity', 'recall', 'tpr', 'specificity', 'selectivity', 'npr', 'precision', 'ppv', 'f1'
Assuming that there are positive cases and negative cases in targets
:param pred: class prediction. Each element is an integer in range [0 - num_classes)
:param target: the target class. Each element is an integer in range [0 - num_classes)
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:param metrics: required metrics names, options: 'sensitivity', 'recall', 'tpr', 'specificity', 'selectivity', 'npr', 'precision', 'ppv', 'f1'
:param sample_weight: Optional - weight per sample for a weighted score. Each element is float in range [0-1]
:return: dictionary, including the computed values for the required metrics.
format: {"tp": <>, "tn": <>, "fp": <>, "fn": <>, <required metric name>: <>}
"""
pred = np.array(pred)
target = np.array(target)
class_target_t = np.where(target == pos_class_index, 1, 0)
class_pred_t = np.where(pred == pos_class_index, 1, 0)
if sample_weight is None:
sample_weight = np.ones_like(class_target_t)
res = {}
tp = (np.logical_and(class_target_t, class_pred_t)*sample_weight).sum()
fn = (np.logical_and(class_target_t, np.logical_not(class_pred_t))*sample_weight).sum()
fp = (np.logical_and(np.logical_not(class_target_t), class_pred_t)*sample_weight).sum()
tn = (np.logical_and(np.logical_not(class_target_t), np.logical_not(class_pred_t))*sample_weight).sum()
for metric in metrics:
if metric in ['sensitivity', 'recall', 'tpr']:
res[metric] = tp / (tp + fn)
elif metric in ['specificity', 'selectivity', 'tnr']:
res[metric] = tp / (tn + fp)
elif metric in ['precision', 'ppv']:
if tp + fp != 0:
res[metric] = tp / (tp + fp)
else:
res[metric] = 0
elif metric in ['f1']:
res[metric] = 2 * tp / (2 * tp + fp + fn)
elif metric in ["matrix"]:
res["tp"] = tp
res["fn"] = fn
res["fp"] = fp
res["tn"] = tn
else:
raise Exception(f'unknown metric {metric}')
return res
@staticmethod
def confusion_matrix(cls_pred: Sequence[int], target :Sequence[int], class_names: Sequence[str], sample_weight : Optional[Sequence[float]] = None) -> Dict[str, pd.DataFrame]:
"""
Calculates Confusion Matrix (multi class version)
:param cls_pred: sequence of class prediction
:param target: sequence of labels
:param class_names: string name per class
:param sample_weight: optional, weight per sample.
:return: {"count": <confusion matrix>, "percent" : <confusion matrix - percent>)
Confusion matrix whose i-th row and j-th column entry indicates the number of samples with true label being i-th class and predicted label being j-th class.
"""
conf_matrix = sklearn.metrics.confusion_matrix(y_true=target, y_pred=cls_pred, sample_weight=sample_weight)
conf_matrix_count = pd.DataFrame(conf_matrix, columns=class_names, index=class_names)
conf_matrix_total = conf_matrix.sum(axis=1)
conf_matrix_count["total"] = conf_matrix_total
conf_matrix_percent = pd.DataFrame(conf_matrix / conf_matrix_total[:, None], columns=class_names, index=class_names)
return {"count": conf_matrix_count, "percent": conf_matrix_percent}
@staticmethod
def multi_class_bs(pred: np.ndarray, target: np.ndarray) -> float:
"""
Brier Score:
bs = 1/N * SUM_n SUM_c (pred_{n,c} - target_{n,c})^2
:param pred: probability score. Expected Shape [N, C]
:param target: target class (int) per sample. Expected Shape [N]
"""
# create one hot vector
target_one_hot = np.zeros_like(pred)
target_one_hot[np.arange(target_one_hot.shape[0]), target] = 1
return float(np.mean(np.sum((pred - target_one_hot) ** 2, axis=1)))
@staticmethod
def multi_class_bss(pred: Sequence[np.ndarray], target: Sequence[np.ndarray]) -> float:
"""
Brier Skill Score:
bss = 1 - bs / bs_{ref}
bs_{ref} will be computed for a model that makes a predictions according to the prevalance of each class in dataset
:param pred: probability score. Expected Shape [N, C]
:param target: target class (int) per sample. Expected Shape [N]
"""
if isinstance(pred[0], np.ndarray) and pred[0].shape[0] > 1:
pred = np.array(pred)
else:
# binary case
pred = np.array(pred)
pred = np.stack((1-pred, pred), axis=-1)
target = np.array(target)
# BS
bs = MetricsLibClass.multi_class_bs(pred, target)
# no skill BS
no_skill_prediction = [(target == target_cls).sum() / target.shape[0] for target_cls in
range(pred.shape[-1])]
no_skill_predictions = np.tile(np.array(no_skill_prediction), (pred.shape[0], 1))
bs_ref = MetricsLibClass.multi_class_bs(no_skill_predictions, target)
return 1.0 - bs / bs_ref
@staticmethod
def convert_probabilities_to_class(pred: Sequence[Union[np.ndarray, float]], operation_point: Union[float, Sequence[Tuple[int, float]]]) -> np.array:
"""
convert probabilities to class prediction
:param pred: sequence of numpy arrays / floats of shape [NUM_CLASSES]
:param operation_point: list of tuples (class_idx, threshold) or empty sequence for argmax
:return: array of class predictions
"""
if isinstance(pred[0], np.ndarray) and pred[0].shape[0] > 1:
pred = np.array(pred)
else:
# binary case
pred = np.array(pred)
pred = np.stack((1-pred, pred), axis=-1)
# if no threshold specified, simply apply argmax
if operation_point is None or (isinstance(operation_point, Sequence) and len(operation_point) == 0):
return np.argmax(pred, -1)
# binary operation point
if isinstance(operation_point, float):
if pred[0].shape[0] == 2:
return np.where(pred[:, 1] > operation_point, 1, 0)
elif pred[0].shape[0] == 1:
return np.where(pred > operation_point, 1, 0)
else:
raise Exception(f"Error - got single float as an operation point for multiclass prediction")
# convert according to thresholds
output_class = np.array([-1 for x in range(len(pred))])
for thr in operation_point:
class_idx = thr[0]
class_thr = thr[1]
# argmax
if class_idx == "argmax":
output_class[output_class == -1] = np.argmax(pred, -1)[output_class == -1]
# among all the samples which not already predicted, set the ones that cross the threshold with this class
target_idx = np.argwhere(np.logical_and(pred[:, class_idx] > class_thr, output_class == -1))
output_class[target_idx] = class_idx
return output_class
| 47.085809
| 178
| 0.605103
|
a40677f39d4298201fb6d7dd372e94382ff19f56
| 1,652
|
py
|
Python
|
locallibrary/locallibrary/urls.py
|
Supernaturaal/django_local_library-
|
6ebd2d17060012b074809ce1a0a1d95a72937ad5
|
[
"Apache-2.0"
] | null | null | null |
locallibrary/locallibrary/urls.py
|
Supernaturaal/django_local_library-
|
6ebd2d17060012b074809ce1a0a1d95a72937ad5
|
[
"Apache-2.0"
] | null | null | null |
locallibrary/locallibrary/urls.py
|
Supernaturaal/django_local_library-
|
6ebd2d17060012b074809ce1a0a1d95a72937ad5
|
[
"Apache-2.0"
] | null | null | null |
"""
locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path
from django.contrib import admin
urlpatterns = [
path('admin/', admin.site.urls),
]
# Используйте include() чтобы добавлять URL из каталога приложения
from django.urls import include
from django.urls import path
urlpatterns += [
path('catalog/', include('catalog.urls')),
]
# Добавьте URL соотношения, чтобы перенаправить запросы с корневового URL, на URL приложения
from django.views.generic import RedirectView
urlpatterns += [
path('', RedirectView.as_view(url='/catalog/', permanent=True)),
]
# Используйте static() чтобы добавить соотношения для статических файлов
# Только на период разработки
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
path('accounts/', include('django.contrib.auth.urls')),
]
| 35.913043
| 94
| 0.720339
|
3cece2e8c40520ea24e8043964fb2b1113fa1a2f
| 4,234
|
py
|
Python
|
Raspberry_Pi_Animated_Gif_Player/animatedgif.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
Raspberry_Pi_Animated_Gif_Player/animatedgif.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
Raspberry_Pi_Animated_Gif_Player/animatedgif.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
import os
import time
from PIL import Image, ImageOps
# pylint: disable=too-few-public-methods
class Frame:
def __init__(self, duration=0):
self.duration = duration
self.image = None
# pylint: enable=too-few-public-methods
class AnimatedGif:
def __init__(self, display, include_delays=True, folder=None):
self._frame_count = 0
self._loop = 0
self._index = 0
self._duration = 0
self._gif_files = []
self._frames = []
self._running = True
self.display = display
self.include_delays = include_delays
if folder is not None:
self.load_files(folder)
self.run()
def advance(self):
self._index = (self._index + 1) % len(self._gif_files)
def back(self):
self._index = (self._index - 1 + len(self._gif_files)) % len(self._gif_files)
def load_files(self, folder):
gif_files = [f for f in os.listdir(folder) if f.endswith(".gif")]
gif_folder = folder
if gif_folder[:-1] != "/":
gif_folder += "/"
for gif_file in gif_files:
image = Image.open(gif_folder + gif_file)
# Only add animated Gifs
if image.is_animated:
self._gif_files.append(gif_folder + gif_file)
print("Found", self._gif_files)
if not self._gif_files:
print("No Gif files found in current folder")
exit() # pylint: disable=consider-using-sys-exit
def preload(self):
image = Image.open(self._gif_files[self._index])
print("Loading {}...".format(self._gif_files[self._index]))
if "duration" in image.info:
self._duration = image.info["duration"]
else:
self._duration = 0
if "loop" in image.info:
self._loop = image.info["loop"]
else:
self._loop = 1
self._frame_count = image.n_frames
self._frames.clear()
for frame in range(self._frame_count):
image.seek(frame)
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
frame_object = Frame(duration=self._duration)
if "duration" in image.info:
frame_object.duration = image.info["duration"]
frame_object.image = ImageOps.pad( # pylint: disable=no-member
image.convert("RGB"),
(self._width, self._height),
method=Image.NEAREST,
color=(0, 0, 0),
centering=(0.5, 0.5),
)
self._frames.append(frame_object)
def play(self):
self.preload()
current_frame = 0
last_action = None
# Check if we have loaded any files first
if not self._gif_files:
print("There are no Gif Images loaded to Play")
return False
self.update_display(self._frames[current_frame].image)
while self._running:
action = self.get_next_value()
if action:
if not last_action:
last_action = action
if action == "click":
self.advance()
return False
elif int(action) < int(last_action):
current_frame -= 1
else:
current_frame += 1
current_frame %= self._frame_count
frame_object = self._frames[current_frame]
start_time = time.monotonic()
self.update_display(frame_object.image)
if self.include_delays:
remaining_delay = frame_object.duration / 1000 - (
time.monotonic() - start_time
)
if remaining_delay > 0:
time.sleep(remaining_delay)
last_action = action
if self._loop == 1:
return True
if self._loop > 0:
self._loop -= 1
def run(self):
while self._running:
auto_advance = self.play()
if auto_advance:
self.advance()
| 34.422764
| 85
| 0.536845
|
c4e8ed451894ee31d1f39e6a1eb56c71d5112a68
| 48,431
|
py
|
Python
|
cclib/parser/molcasparser.py
|
chemistry-scripts/cclib
|
e8e0ea9b3e9b7091f8dfc4dd52d5e5e84a1cc258
|
[
"BSD-3-Clause"
] | null | null | null |
cclib/parser/molcasparser.py
|
chemistry-scripts/cclib
|
e8e0ea9b3e9b7091f8dfc4dd52d5e5e84a1cc258
|
[
"BSD-3-Clause"
] | null | null | null |
cclib/parser/molcasparser.py
|
chemistry-scripts/cclib
|
e8e0ea9b3e9b7091f8dfc4dd52d5e5e84a1cc258
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for Molcas output files"""
import re
import string
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class Molcas(logfileparser.Logfile):
"""A Molcas log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Molcas, self).__init__(logname="Molcas", *args, **kwargs)
def __str__(self):
"""Return a string repeesentation of the object."""
return "Molcas log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Molcas("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by Molcas.
The labels are standardized except for the first character being lowercase.
"""
return label[0].upper() + label[1:]
def after_parsing(self):
for element, ncore in self.core_array:
self._assign_coreelectrons_to_element(element, ncore)
if "package_version" in self.metadata:
# Use the short version as the legacy version.
self.metadata["legacy_package_version"] = self.metadata["package_version"]
# If there is both a tag and the full hash, place the tag
# first. Both are chosen to be local, since there isn't a
# distinction between development and release builds in their
# version cycle.
if "tag" in self.metadata and "revision" in self.metadata:
self.metadata["package_version"] = "{}+{}.{}".format(
self.metadata["package_version"],
self.metadata["tag"],
self.metadata["revision"]
)
elif "tag" in self.metadata:
self.metadata["package_version"] = "{}+{}".format(
self.metadata["package_version"],
self.metadata["tag"]
)
elif "revision" in self.metadata:
self.metadata["package_version"] = "{}+{}".format(
self.metadata["package_version"],
self.metadata["revision"]
)
def before_parsing(self):
# Compile the regex for extracting the element symbol from the
# atom label in the "Molecular structure info" block.
self.re_atomelement = re.compile(r'([a-zA-Z]+)\d?')
# Compile the dashes-and-or-spaces-only regex.
self.re_dashes_and_spaces = re.compile(r'^[\s-]+$')
# Molcas can do multiple calculations in one job, and each one
# starts from the gateway module. Onle parse the first.
# TODO: It would be best to parse each calculation as a separate
# ccData object and return an iterator - something for 2.x
self.gateway_module_count = 0
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if "Start Module: gateway" in line:
self.gateway_module_count += 1
if self.gateway_module_count > 1:
return
# Extract the version number and optionally the Git tag and hash.
if "version" in line:
match = re.search(r"\s{2,}version:?\s(\d*\.\d*)", line)
if match:
self.metadata["package_version"] = match.groups()[0]
if "tag" in line:
self.metadata["tag"] = line.split()[-1]
if "build" in line:
match = re.search(r"\*\s*build\s(\S*)\s*\*", line)
if match:
self.metadata["revision"] = match.groups()[0]
## This section is present when executing &GATEWAY.
# ++ Molecular structure info:
# -------------------------
# ************************************************
# **** Cartesian Coordinates / Bohr, Angstrom ****
# ************************************************
# Center Label x y z x y z
# 1 C1 0.526628 -2.582937 0.000000 0.278679 -1.366832 0.000000
# 2 C2 2.500165 -0.834760 0.000000 1.323030 -0.441736 0.000000
if line[25:63] == 'Cartesian Coordinates / Bohr, Angstrom':
if not hasattr(self, 'atomnos'):
self.atomnos = []
self.skip_lines(inputfile, ['stars', 'blank', 'header'])
line = next(inputfile)
atomelements = []
atomcoords = []
while line.strip() not in ('', '--'):
sline = line.split()
atomelement = sline[1].rstrip(string.digits).title()
atomelements.append(atomelement)
atomcoords.append(list(map(float, sline[5:])))
line = next(inputfile)
self.append_attribute('atomcoords', atomcoords)
if self.atomnos == []:
self.atomnos = [self.table.number[ae.title()] for ae in atomelements]
if not hasattr(self, 'natom'):
self.set_attribute('natom', len(self.atomnos))
## This section is present when executing &SCF.
# ++ Orbital specifications:
# -----------------------
# Symmetry species 1
# Frozen orbitals 0
# Occupied orbitals 3
# Secondary orbitals 77
# Deleted orbitals 0
# Total number of orbitals 80
# Number of basis functions 80
# --
if line[:29] == '++ Orbital specifications:':
self.skip_lines(inputfile, ['dashes', 'blank'])
line = next(inputfile)
symmetry_count = 1
while not line.startswith('--'):
if line.strip().startswith('Symmetry species'):
symmetry_count = int(line.split()[-1])
if line.strip().startswith('Total number of orbitals'):
nmos = line.split()[-symmetry_count:]
self.set_attribute('nmo', sum(map(int, nmos)))
if line.strip().startswith('Number of basis functions'):
nbasis = line.split()[-symmetry_count:]
self.set_attribute('nbasis', sum(map(int, nbasis)))
line = next(inputfile)
if line.strip().startswith(('Molecular charge', 'Total molecular charge')):
self.set_attribute('charge', int(float(line.split()[-1])))
# ++ Molecular charges:
# ------------------
# Mulliken charges per centre and basis function type
# ---------------------------------------------------
# C1
# 1s 2.0005
# 2s 2.0207
# 2px 0.0253
# 2pz 0.1147
# 2py 1.8198
# *s -0.0215
# *px 0.0005
# *pz 0.0023
# *py 0.0368
# *d2+ 0.0002
# *d1+ 0.0000
# *d0 0.0000
# *d1- 0.0000
# *d2- 0.0000
# *f3+ 0.0000
# *f2+ 0.0001
# *f1+ 0.0000
# *f0 0.0001
# *f1- 0.0001
# *f2- 0.0000
# *f3- 0.0003
# *g4+ 0.0000
# *g3+ 0.0000
# *g2+ 0.0000
# *g1+ 0.0000
# *g0 0.0000
# *g1- 0.0000
# *g2- 0.0000
# *g3- 0.0000
# *g4- 0.0000
# Total 6.0000
# N-E 0.0000
# Total electronic charge= 6.000000
# Total charge= 0.000000
#--
if line[:24] == '++ Molecular charges:':
atomcharges = []
while line[6:29] != 'Total electronic charge':
line = next(inputfile)
if line[6:9] == 'N-E':
atomcharges.extend(map(float, line.split()[1:]))
# Molcas only performs Mulliken population analysis.
self.set_attribute('atomcharges', {'mulliken': atomcharges})
# Ensure the charge printed here is identical to the
# charge printed before entering the SCF.
self.skip_line(inputfile, 'blank')
line = next(inputfile)
assert line[6:30] == 'Total charge='
if hasattr(self, 'charge'):
assert int(float(line.split()[2])) == self.charge
# This section is present when executing &SCF
# This section parses the total SCF Energy.
# *****************************************************************************************************************************
# * *
# * SCF/KS-DFT Program, Final results *
# * *
# * *
# * *
# * Final Results *
# * *
# *****************************************************************************************************************************
# :: Total SCF energy -37.6045426484
if line[:22] == ':: Total SCF energy' or line[:25] == ':: Total KS-DFT energy':
if not hasattr(self, 'scfenergies'):
self.scfenergies = []
scfenergy = float(line.split()[-1])
self.scfenergies.append(utils.convertor(scfenergy, 'hartree', 'eV'))
## Parsing the scftargets in this section
# ++ Optimization specifications:
# ----------------------------
# SCF Algorithm: Conventional
# Minimized density differences are used
# Number of density matrices in core 9
# Maximum number of NDDO SCF iterations 400
# Maximum number of HF SCF iterations 400
# Threshold for SCF energy change 0.10E-08
# Threshold for density matrix 0.10E-03
# Threshold for Fock matrix 0.15E-03
# Threshold for linear dependence 0.10E-08
# Threshold at which DIIS is turned on 0.15E+00
# Threshold at which QNR/C2DIIS is turned on 0.75E-01
# Threshold for Norm(delta) (QNR/C2DIIS) 0.20E-04
if line[:34] == '++ Optimization specifications:':
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
if line.strip().startswith('SCF'):
scftargets = []
self.skip_lines(inputfile,
['Minimized', 'Number', 'Maximum', 'Maximum'])
lines = [next(inputfile) for i in range(7)]
targets = [
'Threshold for SCF energy change',
'Threshold for density matrix',
'Threshold for Fock matrix',
'Threshold for Norm(delta)',
]
for y in targets:
scftargets.extend([float(x.split()[-1]) for x in lines if y in x])
self.append_attribute('scftargets', scftargets)
# ++ Convergence information
# SCF iterations: Energy and convergence statistics
#
# Iter Tot. SCF One-electron Two-electron Energy Max Dij or Max Fij DNorm TNorm AccCon Time
# Energy Energy Energy Change Delta Norm in Sec.
# 1 -36.83817703 -50.43096166 13.59278464 0.00E+00 0.16E+00* 0.27E+01* 0.30E+01 0.33E+02 NoneDa 0.
# 2 -36.03405202 -45.74525152 9.71119950 0.80E+00* 0.14E+00* 0.93E-02* 0.26E+01 0.43E+01 Damp 0.
# 3 -37.08936118 -48.41536598 11.32600480 -0.11E+01* 0.12E+00* 0.91E-01* 0.97E+00 0.16E+01 Damp 0.
# 4 -37.31610460 -50.54103969 13.22493509 -0.23E+00* 0.11E+00* 0.96E-01* 0.72E+00 0.27E+01 Damp 0.
# 5 -37.33596239 -49.47021484 12.13425245 -0.20E-01* 0.59E-01* 0.59E-01* 0.37E+00 0.16E+01 Damp 0.
# ...
# Convergence after 26 Macro Iterations
# --
if line[46:91] == 'iterations: Energy and convergence statistics':
self.skip_line(inputfile, 'blank')
while line.split() != ['Energy', 'Energy', 'Energy', 'Change', 'Delta', 'Norm', 'in', 'Sec.']:
line = next(inputfile)
iteration_regex = (r"^([0-9]+)" # Iter
r"( [ \-0-9]*\.[0-9]{6,9})" # Tot. SCF Energy
r"( [ \-0-9]*\.[0-9]{6,9})" # One-electron Energy
r"( [ \-0-9]*\.[0-9]{6,9})" # Two-electron Energy
r"( [ \-0-9]*\.[0-9]{2}E[\-\+][0-9]{2}\*?)" # Energy Change
r"( [ \-0-9]*\.[0-9]{2}E[\-\+][0-9]{2}\*?)" # Max Dij or Delta Norm
r"( [ \-0-9]*\.[0-9]{2}E[\-\+][0-9]{2}\*?)" # Max Fij
r"( [ \-0-9]*\.[0-9]{2}E[\-\+][0-9]{2}\*?)" # DNorm
r"( [ \-0-9]*\.[0-9]{2}E[\-\+][0-9]{2}\*?)" # TNorm
r"( [ A-Za-z0-9]*)" # AccCon
r"( [ \.0-9]*)$") # Time in Sec.
scfvalues = []
line = next(inputfile)
while not line.strip().startswith("Convergence"):
match = re.match(iteration_regex, line.strip())
if match:
groups = match.groups()
cols = [g.strip() for g in match.groups()]
cols = [c.replace('*', '') for c in cols]
energy = float(cols[4])
density = float(cols[5])
fock = float(cols[6])
dnorm = float(cols[7])
scfvalues.append([energy, density, fock, dnorm])
if line.strip() == "--":
self.logger.warning('File terminated before end of last SCF!')
break
line = next(inputfile)
self.append_attribute('scfvalues', scfvalues)
# Harmonic frequencies in cm-1
#
# IR Intensities in km/mol
#
# 1 2 3 4 5 6
#
# Frequency: i60.14 i57.39 128.18 210.06 298.24 309.65
#
# Intensity: 3.177E-03 2.129E-06 4.767E-01 2.056E-01 6.983E-07 1.753E-07
# Red. mass: 2.42030 2.34024 2.68044 3.66414 2.61721 3.34904
#
# C1 x -0.00000 0.00000 0.00000 -0.05921 0.00000 -0.06807
# C1 y 0.00001 -0.00001 -0.00001 0.00889 0.00001 -0.02479
# C1 z -0.03190 0.04096 -0.03872 0.00001 -0.12398 -0.00002
# C2 x -0.00000 0.00001 0.00000 -0.06504 0.00000 -0.03487
# C2 y 0.00000 -0.00000 -0.00000 0.01045 0.00001 -0.05659
# C2 z -0.03703 -0.03449 -0.07269 0.00000 -0.07416 -0.00001
# C3 x -0.00000 0.00001 0.00000 -0.06409 -0.00001 0.05110
# C3 y -0.00000 0.00001 0.00000 0.00152 0.00000 -0.03263
# C3 z -0.03808 -0.08037 -0.07267 -0.00001 0.07305 0.00000
# ...
# H20 y 0.00245 -0.00394 0.03215 0.03444 -0.10424 -0.10517
# H20 z 0.00002 -0.00001 0.00000 -0.00000 -0.00000 0.00000
#
#
#
# ++ Thermochemistry
if line[1:29] == 'Harmonic frequencies in cm-1':
self.skip_line(inputfile, 'blank')
line = next(inputfile)
while 'Thermochemistry' not in line:
if 'Frequency:' in line:
if not hasattr(self, 'vibfreqs'):
self.vibfreqs = []
vibfreqs = [float(i.replace('i', '-')) for i in line.split()[1:]]
self.vibfreqs.extend(vibfreqs)
if 'Intensity:' in line:
if not hasattr(self, 'vibirs'):
self.vibirs = []
vibirs = map(float, line.split()[1:])
self.vibirs.extend(vibirs)
if 'Red.' in line:
if not hasattr(self, 'vibrmasses'):
self.vibrmasses = []
vibrmasses = map(float, line.split()[2:])
self.vibrmasses.extend(vibrmasses)
self.skip_line(inputfile, 'blank')
line = next(inputfile)
if not hasattr(self, 'vibdisps'):
self.vibdisps = []
disps = []
for n in range(3*self.natom):
numbers = [float(s) for s in line[17:].split()]
# The atomindex should start at 0 instead of 1.
atomindex = int(re.search(r'\d+$', line.split()[0]).group()) - 1
numbermodes = len(numbers)
if len(disps) == 0:
# Appends empty array of the following
# dimensions (numbermodes, natom, 0) to disps.
for mode in range(numbermodes):
disps.append([[] for x in range(0, self.natom)])
for mode in range(numbermodes):
disps[mode][atomindex].append(numbers[mode])
line = next(inputfile)
self.vibdisps.extend(disps)
line = next(inputfile)
## Parsing thermochemistry attributes here
# ++ Thermochemistry
#
# *********************
# * *
# * THERMOCHEMISTRY *
# * *
# *********************
#
# Mass-centered Coordinates (Angstrom):
# ***********************************************************
# ...
# *****************************************************
# Temperature = 0.00 Kelvin, Pressure = 1.00 atm
# -----------------------------------------------------
# Molecular Partition Function and Molar Entropy:
# q/V (M**-3) S(kcal/mol*K)
# Electronic 0.100000D+01 0.000
# Translational 0.100000D+01 0.000
# Rotational 0.100000D+01 2.981
# Vibrational 0.100000D+01 0.000
# TOTAL 0.100000D+01 2.981
#
# Thermal contributions to INTERNAL ENERGY:
# Electronic 0.000 kcal/mol 0.000000 au.
# Translational 0.000 kcal/mol 0.000000 au.
# Rotational 0.000 kcal/mol 0.000000 au.
# Vibrational 111.885 kcal/mol 0.178300 au.
# TOTAL 111.885 kcal/mol 0.178300 au.
#
# Thermal contributions to
# ENTHALPY 111.885 kcal/mol 0.178300 au.
# GIBBS FREE ENERGY 111.885 kcal/mol 0.178300 au.
#
# Sum of energy and thermal contributions
# INTERNAL ENERGY -382.121931 au.
# ENTHALPY -382.121931 au.
# GIBBS FREE ENERGY -382.121931 au.
# -----------------------------------------------------
# ...
# ENTHALPY -382.102619 au.
# GIBBS FREE ENERGY -382.179819 au.
# -----------------------------------------------------
# --
#
# ++ Isotopic shifts:
if line[4:19] == 'THERMOCHEMISTRY':
temperature_values = []
pressure_values = []
entropy_values = []
internal_energy_values = []
enthalpy_values = []
free_energy_values = []
while 'Isotopic' not in line:
if line[1:12] == 'Temperature':
temperature_values.append(float(line.split()[2]))
pressure_values.append(float(line.split()[6]))
if line[1:48] == 'Molecular Partition Function and Molar Entropy:':
while 'TOTAL' not in line:
line = next(inputfile)
entropy_values.append(utils.convertor(float(line.split()[2]), 'kcal/mol', 'hartree'))
if line[1:40] == 'Sum of energy and thermal contributions':
internal_energy_values.append(float(next(inputfile).split()[2]))
enthalpy_values.append(float(next(inputfile).split()[1]))
free_energy_values.append(float(next(inputfile).split()[3]))
line = next(inputfile)
# When calculations for more than one temperature value are
# performed, the values corresponding to room temperature (298.15 K)
# are returned and if no calculations are performed for 298.15 K, then
# the values corresponding last temperature value are returned.
index = -1
if 298.15 in temperature_values:
index = temperature_values.index(298.15)
self.set_attribute('temperature', temperature_values[index])
if len(temperature_values) > 1:
self.logger.warning('More than 1 values of temperature found')
self.set_attribute('pressure', pressure_values[index])
if len(pressure_values) > 1:
self.logger.warning('More than 1 values of pressure found')
self.set_attribute('entropy', entropy_values[index])
if len(entropy_values) > 1:
self.logger.warning('More than 1 values of entropy found')
self.set_attribute('enthalpy', enthalpy_values[index])
if len(enthalpy_values) > 1:
self.logger.warning('More than 1 values of enthalpy found')
self.set_attribute('freeenergy', free_energy_values[index])
if len(free_energy_values) > 1:
self.logger.warning('More than 1 values of freeenergy found')
## Parsing Geometrical Optimization attributes in this section.
# ++ Slapaf input parameters:
# ------------------------
#
# Max iterations: 2000
# Convergence test a la Schlegel.
# Convergence criterion on gradient/para.<=: 0.3E-03
# Convergence criterion on step/parameter<=: 0.3E-03
# Convergence criterion on energy change <=: 0.0E+00
# Max change of an internal coordinate: 0.30E+00
# ...
# ...
# **********************************************************************************************************************
# * Energy Statistics for Geometry Optimization *
# **********************************************************************************************************************
# Energy Grad Grad Step Estimated Geom Hessian
# Iter Energy Change Norm Max Element Max Element Final Energy Update Update Index
# 1 -382.30023222 0.00000000 0.107221 0.039531 nrc047 0.085726 nrc047 -382.30533799 RS-RFO None 0
# 2 -382.30702964 -0.00679742 0.043573 0.014908 nrc001 0.068195 nrc001 -382.30871333 RS-RFO BFGS 0
# 3 -382.30805348 -0.00102384 0.014883 0.005458 nrc010 -0.020973 nrc001 -382.30822089 RS-RFO BFGS 0
# ...
# ...
# 18 -382.30823419 -0.00000136 0.001032 0.000100 nrc053 0.012319 nrc053 -382.30823452 RS-RFO BFGS 0
# 19 -382.30823198 0.00000221 0.001051 -0.000092 nrc054 0.066565 nrc053 -382.30823822 RS-RFO BFGS 0
# 20 -382.30820252 0.00002946 0.001132 -0.000167 nrc021 -0.064003 nrc053 -382.30823244 RS-RFO BFGS 0
#
# +----------------------------------+----------------------------------+
# + Cartesian Displacements + Gradient in internals +
# + Value Threshold Converged? + Value Threshold Converged? +
# +-----+----------------------------------+----------------------------------+
# + RMS + 5.7330E-02 1.2000E-03 No + 1.6508E-04 3.0000E-04 Yes +
# +-----+----------------------------------+----------------------------------+
# + Max + 1.2039E-01 1.8000E-03 No + 1.6711E-04 4.5000E-04 Yes +
# +-----+----------------------------------+----------------------------------+
if 'Convergence criterion on energy change' in line:
self.energy_threshold = float(line.split()[6])
# If energy change threshold equals zero,
# then energy change is not a criteria for convergence.
if self.energy_threshold == 0:
self.energy_threshold = numpy.inf
if 'Energy Statistics for Geometry Optimization' in line:
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.skip_lines(inputfile, ['stars', 'header'])
line = next(inputfile)
assert 'Iter Energy Change Norm' in line
# A variable keeping track of ongoing iteration.
iter_number = len(self.geovalues) + 1
# Iterate till blank line.
while line.split() != []:
for i in range(iter_number):
line = next(inputfile)
self.geovalues.append([float(line.split()[2])])
line = next(inputfile)
# Along with energy change, RMS and Max values of change in
# Cartesian Diaplacement and Gradients are used as optimization
# criteria.
self.skip_lines(inputfile, ['border', 'header', 'header', 'border'])
line = next(inputfile)
assert '+ RMS +' in line
line_rms = line.split()
line = next(inputfile)
line_max = next(inputfile).split()
if not hasattr(self, 'geotargets'):
# The attribute geotargets is an array consisting of the following
# values: [Energy threshold, Max Gradient threshold, RMS Gradient threshold, \
# Max Displacements threshold, RMS Displacements threshold].
max_gradient_threshold = float(line_max[8])
rms_gradient_threshold = float(line_rms[8])
max_displacement_threshold = float(line_max[4])
rms_displacement_threshold = float(line_rms[4])
self.geotargets = [self.energy_threshold, max_gradient_threshold, rms_gradient_threshold, max_displacement_threshold, rms_displacement_threshold]
max_gradient_change = float(line_max[7])
rms_gradient_change = float(line_rms[7])
max_displacement_change = float(line_max[3])
rms_displacement_change = float(line_rms[3])
self.geovalues[iter_number - 1].extend([max_gradient_change, rms_gradient_change, max_displacement_change, rms_displacement_change])
# *********************************************************
# * Nuclear coordinates for the next iteration / Angstrom *
# *********************************************************
# ATOM X Y Z
# C1 0.235560 -1.415847 0.012012
# C2 1.313797 -0.488199 0.015149
# C3 1.087050 0.895510 0.014200
# ...
# ...
# H19 -0.021327 -4.934915 -0.029355
# H20 -1.432030 -3.721047 -0.039835
#
# --
if 'Nuclear coordinates for the next iteration / Angstrom' in line:
self.skip_lines(inputfile, ['s', 'header'])
line = next(inputfile)
atomcoords = []
while line.split() != []:
atomcoords.append([float(c) for c in line.split()[1:]])
line = next(inputfile)
if len(atomcoords) == self.natom:
self.atomcoords.append(atomcoords)
else:
self.logger.warning(
"Parsed coordinates not consistent with previous, skipping. "
"This could be due to symmetry being turned on during the job. "
"Length was %i, now found %i. New coordinates: %s"
% (len(self.atomcoords[-1]), len(atomcoords), str(atomcoords)))
# **********************************************************************************************************************
# * Energy Statistics for Geometry Optimization *
# **********************************************************************************************************************
# Energy Grad Grad Step Estimated Geom Hessian
# Iter Energy Change Norm Max Element Max Element Final Energy Update Update Index
# 1 -382.30023222 0.00000000 0.107221 0.039531 nrc047 0.085726 nrc047 -382.30533799 RS-RFO None 0
# ...
# ...
# 23 -382.30823115 -0.00000089 0.001030 0.000088 nrc053 0.000955 nrc053 -382.30823118 RS-RFO BFGS 0
#
# +----------------------------------+----------------------------------+
# + Cartesian Displacements + Gradient in internals +
# + Value Threshold Converged? + Value Threshold Converged? +
# +-----+----------------------------------+----------------------------------+
# + RMS + 7.2395E-04 1.2000E-03 Yes + 2.7516E-04 3.0000E-04 Yes +
# +-----+----------------------------------+----------------------------------+
# + Max + 1.6918E-03 1.8000E-03 Yes + 8.7768E-05 4.5000E-04 Yes +
# +-----+----------------------------------+----------------------------------+
#
# Geometry is converged in 23 iterations to a Minimum Structure
if 'Geometry is converged' in line:
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.atomcoords))
# *********************************************************
# * Nuclear coordinates of the final structure / Angstrom *
# *********************************************************
# ATOM X Y Z
# C1 0.235547 -1.415838 0.012193
# C2 1.313784 -0.488201 0.015297
# C3 1.087036 0.895508 0.014333
# ...
# ...
# H19 -0.021315 -4.934913 -0.029666
# H20 -1.431994 -3.721026 -0.041078
if 'Nuclear coordinates of the final structure / Angstrom' in line:
self.skip_lines(inputfile, ['s', 'header'])
line = next(inputfile)
atomcoords = []
while line.split() != []:
atomcoords.append([float(c) for c in line.split()[1:]])
line = next(inputfile)
if len(atomcoords) == self.natom:
self.atomcoords.append(atomcoords)
else:
self.logger.error(
'Number of atoms (%d) in parsed atom coordinates '
'is smaller than previously (%d), possibly due to '
'symmetry. Ignoring these coordinates.'
% (len(atomcoords), self.natom))
## Parsing Molecular Gradients attributes in this section.
# ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()
#
# &ALASKA
#
# only a single process is used
# available to each process: 2.0 GB of memory, 1 thread
# ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()
# ...
# ...
# **************************************************
# * *
# * Molecular gradients *
# * *
# **************************************************
#
# Irreducible representation: a
# ---------------------------------------------------------
# X Y Z
# ---------------------------------------------------------
# C1 -0.00009983 -0.00003043 0.00001004
# ...
# H20 -0.00027629 0.00010546 0.00003317
# ---------------------------------------------------
# WARNING: "Molecular gradients, after ESPF" is found for ESPF QM/MM calculations
if "Molecular gradients " in line:
if not hasattr(self, "grads"):
self.grads = []
self.skip_lines(inputfile, ['stars', 'stars', 'blank', 'header',
'dashes', 'header', 'dashes'])
grads = []
line = next(inputfile)
while len(line.split()) == 4:
tmpgrads = list(map(float, line.split()[1:]))
grads.append(tmpgrads)
line = next(inputfile)
self.append_attribute('grads', grads)
# This code here works, but QM/MM gradients are printed after QM ones.
# Maybe another attribute is needed to store them to have both.
if "Molecular gradients, after ESPF" in line:
self.skip_lines(inputfile, ['stars', 'stars', 'blank', 'header',
'dashes', 'header', 'dashes'])
grads = []
line = next(inputfile)
while len(line.split()) == 4:
tmpgrads = list(map(float, line.split()[1:]))
grads.append(tmpgrads)
line = next(inputfile)
self.grads[-1] = grads
###
# All orbitals with orbital energies smaller than E(LUMO)+0.5 are printed
#
# ++ Molecular orbitals:
# -------------------
#
# Title: RKS-DFT orbitals
#
# Molecular orbitals for symmetry species 1: a
#
# Orbital 1 2 3 4 5 6 7 8 9 10
# Energy -10.0179 -10.0179 -10.0075 -10.0075 -10.0066 -10.0066 -10.0056 -10.0055 -9.9919 -9.9919
# Occ. No. 2.0000 2.0000 2.0000 2.0000 2.0000 2.0000 2.0000 2.0000 2.0000 2.0000
#
# 1 C1 1s -0.6990 0.6989 0.0342 0.0346 0.0264 -0.0145 -0.0124 -0.0275 -0.0004 -0.0004
# 2 C1 2s -0.0319 0.0317 -0.0034 -0.0033 -0.0078 0.0034 0.0041 0.0073 -0.0002 -0.0002
# ...
# ...
# 58 H18 1s 0.2678
# 59 H19 1s -0.2473
# 60 H20 1s 0.1835
# --
if '++ Molecular orbitals:' in line:
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
# We don't currently support parsing natural orbitals or active space orbitals.
if 'Natural orbitals' not in line and "Pseudonatural" not in line:
self.skip_line(inputfile, 'b')
# Symmetry is not currently supported, so this line can have one form.
while 'Molecular orbitals for symmetry species 1: a' not in line.strip():
line = next(inputfile)
# Symmetry is not currently supported, so this line can have one form.
if line.strip() != 'Molecular orbitals for symmetry species 1: a':
return
line = next(inputfile)
moenergies = []
homos = 0
mocoeffs = []
while line[:2] != '--':
line = next(inputfile)
if line.strip().startswith('Orbital'):
orbital_index = line.split()[1:]
for i in orbital_index:
mocoeffs.append([])
if 'Energy' in line:
energies = [utils.convertor(float(x), 'hartree', 'eV') for x in line.split()[1:]]
moenergies.extend(energies)
if 'Occ. No.' in line:
for i in line.split()[2:]:
if float(i) != 0:
homos += 1
aonames = []
tokens = line.split()
if tokens and tokens[0] == '1':
while tokens and tokens[0] != '--':
aonames.append("{atom}_{orbital}".format(atom=tokens[1], orbital=tokens[2]))
info = tokens[3:]
j = 0
for i in orbital_index:
mocoeffs[int(i)-1].append(float(info[j]))
j += 1
line = next(inputfile)
tokens = line.split()
self.set_attribute('aonames', aonames)
if len(moenergies) != self.nmo:
moenergies.extend([numpy.nan for x in range(self.nmo - len(moenergies))])
self.append_attribute('moenergies', moenergies)
if not hasattr(self, 'homos'):
self.homos = []
self.homos.extend([homos-1])
while len(mocoeffs) < self.nmo:
nan_array = [numpy.nan for i in range(self.nbasis)]
mocoeffs.append(nan_array)
self.append_attribute('mocoeffs', mocoeffs)
## Parsing MP energy from the &MBPT2 module.
# Conventional algorithm used...
#
# SCF energy = -74.9644564043 a.u.
# Second-order correlation energy = -0.0364237923 a.u.
#
# Total energy = -75.0008801966 a.u.
# Reference weight ( Cref**2 ) = 0.98652
#
# :: Total MBPT2 energy -75.0008801966
#
#
# Zeroth-order energy (E0) = -36.8202538520 a.u.
#
# Shanks-type energy S1(E) = -75.0009150108 a.u.
if 'Total MBPT2 energy' in line:
mpenergies = []
mpenergies.append(utils.convertor(utils.float(line.split()[4]), 'hartree', 'eV'))
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
self.mpenergies.append(mpenergies)
# Parsing data ccenergies from &CCSDT module.
# --- Start Module: ccsdt at Thu Jul 26 14:03:23 2018 ---
#
# ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()
#
# &CCSDT
# ...
# ...
# 14 -75.01515915 -0.05070274 -0.00000029
# 15 -75.01515929 -0.05070289 -0.00000014
# 16 -75.01515936 -0.05070296 -0.00000007
# Convergence after 17 Iterations
#
#
# Total energy (diff) : -75.01515936 -0.00000007
# Correlation energy : -0.0507029554992
if 'Start Module: ccsdt' in line:
self.skip_lines(inputfile, ['b', '()', 'b'])
line = next(inputfile)
if '&CCSDT' in line:
while not line.strip().startswith('Total energy (diff)'):
line = next(inputfile)
ccenergies = utils.convertor(utils.float(line.split()[4]), 'hartree', 'eV')
if not hasattr(self, 'ccenergies'):
self.ccenergies= []
self.ccenergies.append(ccenergies)
# ++ Primitive basis info:
# ---------------------
#
#
# *****************************************************
# ******** Primitive Basis Functions (Valence) ********
# *****************************************************
#
#
# Basis set:C.AUG-CC-PVQZ.........
#
# Type
# s
# No. Exponent Contraction Coefficients
# 1 0.339800000D+05 0.000091 -0.000019 0.000000 0.000000 0.000000 0.000000
# 2 0.508900000D+04 0.000704 -0.000151 0.000000 0.000000 0.000000 0.000000
# ...
# ...
# 29 0.424000000D+00 0.000000 1.000000
#
# Number of primitives 93
# Number of basis functions 80
#
# --
if line.startswith('++ Primitive basis info:'):
self.skip_lines(inputfile, ['d', 'b', 'b', 's', 'header', 's', 'b'])
line = next(inputfile)
gbasis_array = []
while '--' not in line and '****' not in line:
if 'Basis set:' in line:
basis_element_patterns = re.findall(r'Basis set:([A-Za-z]{1,2})\.', line)
assert len(basis_element_patterns) == 1
basis_element = basis_element_patterns[0].title()
gbasis_array.append((basis_element, []))
if 'Type' in line:
line = next(inputfile)
shell_type = line.split()[0].upper()
self.skip_line(inputfile, 'headers')
line = next(inputfile)
exponents = []
coefficients = []
func_array = []
while line.split():
exponents.append(utils.float(line.split()[1]))
coefficients.append([utils.float(i) for i in line.split()[2:]])
line = next(inputfile)
for i in range(len(coefficients[0])):
func_tuple = (shell_type, [])
for iexp, exp in enumerate(exponents):
coeff = coefficients[iexp][i]
if coeff != 0:
func_tuple[1].append((exp, coeff))
gbasis_array[-1][1].append(func_tuple)
line = next(inputfile)
atomsymbols = [self.table.element[atomno] for atomno in self.atomnos]
self.gbasis = [[] for i in range(self.natom)]
for element, gbasis in gbasis_array:
mask = [element == possible_element for possible_element in atomsymbols]
indices = [i for (i, x) in enumerate(mask) if x]
for index in indices:
self.gbasis[index] = gbasis
# ++ Basis set information:
# ----------------------
# ...
# Basis set label: MO.ECP.HAY-WADT.5S6P4D.3S3P2D.14E-LANL2DZ.....
#
# Electronic valence basis set:
# ------------------
# Associated Effective Charge 14.000000 au
# Associated Actual Charge 42.000000 au
# Nuclear Model: Point charge
# ...
#
# Effective Core Potential specification:
# =======================================
#
# Label Cartesian Coordinates / Bohr
#
# MO 0.0006141610 -0.0006141610 0.0979067106
# --
if '++ Basis set information:' in line:
self.core_array = []
basis_element = None
ncore = 0
while line[:2] != '--':
if 'Basis set label' in line:
try:
basis_element = line.split()[3].split('.')[0]
basis_element = basis_element[0] + basis_element[1:].lower()
except:
self.logger.warning('Basis set label is missing!')
basis_element = ''
if 'valence basis set:' in line.lower():
self.skip_line(inputfile, 'd')
line = next(inputfile)
if 'Associated Effective Charge' in line:
effective_charge = float(line.split()[3])
actual_charge = float(next(inputfile).split()[3])
element = self.table.element[int(actual_charge)]
ncore = int(actual_charge - effective_charge)
if basis_element:
assert basis_element == element
else:
basis_element = element
if basis_element and ncore:
self.core_array.append((basis_element, ncore))
basis_element = ''
ncore = 0
line = next(inputfile)
| 48.09434
| 161
| 0.424253
|
131c31bf7cc2e81274bbffa8af6479ad6ede2cd3
| 1,763
|
py
|
Python
|
config/urls.py
|
fabianrios/stratum
|
2481515d53e39edef4cecf056c23bc43341cac9b
|
[
"MIT"
] | 1
|
2019-10-07T15:00:38.000Z
|
2019-10-07T15:00:38.000Z
|
config/urls.py
|
fabianrios/stratum
|
2481515d53e39edef4cecf056c23bc43341cac9b
|
[
"MIT"
] | null | null | null |
config/urls.py
|
fabianrios/stratum
|
2481515d53e39edef4cecf056c23bc43341cac9b
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r"^$", TemplateView.as_view(template_name="pages/home.html"), name="home"),
url('tweet/', include('tweet.urls')),
url(
r"^about/$",
TemplateView.as_view(template_name="pages/about.html"),
name="about",
),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(
r"^users/",
include("stratum.users.urls", namespace="users"),
),
url(r"^accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(
r"^400/$",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
url(
r"^403/$",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
url(
r"^404/$",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
url(r"^500/$", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [url(r"^__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 32.648148
| 86
| 0.623369
|
98aa453690124c29eea31f69ae201c7caa4265b7
| 3,055
|
py
|
Python
|
epitran/bin/connl2ipaspace.py
|
dwijap/epitran
|
dab70c269710e73c1c7bf69c2de5d69b57a1d237
|
[
"MIT"
] | 422
|
2016-05-19T09:07:38.000Z
|
2022-03-31T15:06:36.000Z
|
epitran/bin/connl2ipaspace.py
|
dwijap/epitran
|
dab70c269710e73c1c7bf69c2de5d69b57a1d237
|
[
"MIT"
] | 80
|
2017-05-22T14:58:19.000Z
|
2022-03-28T19:23:31.000Z
|
epitran/bin/connl2ipaspace.py
|
dwijap/epitran
|
dab70c269710e73c1c7bf69c2de5d69b57a1d237
|
[
"MIT"
] | 95
|
2017-05-06T04:19:53.000Z
|
2022-03-21T10:41:42.000Z
|
#!/usr/bin/env python
import argparse
import codecs
import logging
from collections import Counter
import epitran
import panphon
import unicodecsv as csv
logging.basicConfig(level=logging.DEBUG)
def normpunc(epi, s):
def norm(c):
if c in epi.puncnorm:
return epi.puncnorm[c]
else:
return c
return ''.join(map(norm, s))
def add_record_gen(epi, ft, orth):
space = Counter()
orth = normpunc(epi, orth)
trans = epi.transliterate(orth)
while trans:
pref = ft.longest_one_seg_prefix(trans)
if pref != '':
space[pref] += 1
trans = trans[len(pref):]
else:
space[trans[0]] += 1
trans = trans[1:]
return space
def add_file_gen(epi, ft, fn):
space = Counter()
with codecs.open(fn, 'r', 'utf-8') as f:
for line in f:
fields = line.split(u'\t')
if len(fields) > 0:
orth = fields[0]
space.update(add_record_gen(epi, ft, orth))
logging.debug(u'Length of counter:\t{}'.format(len(space)))
return space
def add_file_op(epi, ft, fn):
space = Counter()
with codecs.open(fn, 'r', 'utf-8') as f:
for line in f:
fields = line.split(u'\t')
if len(fields) > 0:
orth = fields[0]
trans = epi.transliterate(orth)
while trans:
pref = ft.longest_one_seg_prefix(trans)
if pref != '':
space[pref] += 1
trans = trans[len(pref):]
else:
if trans[0] in epi.puncnorm:
space[epi.puncnorm[trans[0]]] += 1
else:
space[trans[0]] += 1
trans = trans[1:]
logging.debug(u'Length of counter:\t{}'.format(len(space)))
return space
def print_space(output, space):
pairs = enumerate(sorted(filter(lambda x: x, space.keys())))
with open(output, 'wb') as f:
writer = csv.writer(f, encoding='utf-8')
for i, char in pairs:
writer.writerow((i, char))
def main(code, op, infiles, output):
epi = epitran.Epitran(code)
ft = panphon.FeatureTable()
space = Counter()
for fn in infiles:
logging.debug(u'Scanning:\t{}'.format(fn).encode('utf-8'))
add_file = add_file_op if op else add_file_gen
space.update(add_file(epi, ft, fn))
print_space(output, space)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--op', action='store_true', help='Script uses punctuation as (parts of) letters.')
parser.add_argument('-c', '--code', help='Script code for CONNL files.')
parser.add_argument('-o', '--output', help='Output file.')
parser.add_argument('infiles', nargs='+', help='CONLL files serving as basis for segment space.')
args = parser.parse_args()
main(args.code, args.op, args.infiles, args.output)
| 30.247525
| 113
| 0.554501
|
57432fe98612bc035821e371d410fb7d9cc44d8c
| 621
|
py
|
Python
|
mindhome_alpha/erpnext/patches/v7_2/update_abbr_in_salary_slips.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/patches/v7_2/update_abbr_in_salary_slips.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/patches/v7_2/update_abbr_in_salary_slips.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('Payroll', 'doctype', 'Salary Slip')
if not frappe.db.has_column('Salary Detail', 'abbr'):
return
salary_details = frappe.db.sql("""select abbr, salary_component, name from `tabSalary Detail`
where abbr is null or abbr = ''""", as_dict=True)
for salary_detail in salary_details:
salary_component_abbr = frappe.get_value("Salary Component", salary_detail.salary_component, "salary_component_abbr")
frappe.db.sql("""update `tabSalary Detail` set abbr = %s where name = %s""",(salary_component_abbr, salary_detail.name))
| 44.357143
| 122
| 0.756844
|
c1ab8ccf26950940bd2b7b3a044b8d364aa5f888
| 1,517
|
py
|
Python
|
gemd/__init__.py
|
CitrineInformatics/gemd
|
f4bb7e5ff09e7ec89d277c26e61ed14f6bab0306
|
[
"Apache-2.0"
] | 3
|
2019-09-27T02:56:26.000Z
|
2019-11-11T22:21:18.000Z
|
gemd/__init__.py
|
CitrineInformatics/gemd
|
f4bb7e5ff09e7ec89d277c26e61ed14f6bab0306
|
[
"Apache-2.0"
] | 29
|
2019-09-09T23:20:41.000Z
|
2020-03-31T19:13:01.000Z
|
gemd/__init__.py
|
CitrineInformatics/gemd
|
f4bb7e5ff09e7ec89d277c26e61ed14f6bab0306
|
[
"Apache-2.0"
] | 3
|
2019-09-20T19:01:02.000Z
|
2019-11-26T02:43:45.000Z
|
"""Data concepts library."""
# flake8: noqa
from .entity import Condition, Parameter, Property, PropertyAndConditions, \
CategoricalBounds, CompositionBounds, IntegerBounds, \
MolecularStructureBounds, RealBounds, \
MaterialRun, MeasurementRun, ProcessRun, IngredientRun, \
MaterialSpec, MeasurementSpec, ProcessSpec, IngredientSpec, \
PerformedSource, \
PropertyTemplate, ConditionTemplate, ParameterTemplate, \
MaterialTemplate, MeasurementTemplate, ProcessTemplate, \
NominalReal, NormalReal, UniformReal, NominalInteger, \
UniformInteger, DiscreteCategorical, NominalCategorical, \
EmpiricalFormula, NominalComposition, InChI, Smiles, \
LinkByUID, \
FileLink
__all__ = ["Condition", "Parameter", "Property", "PropertyAndConditions",
"CategoricalBounds", "CompositionBounds", "IntegerBounds",
"MolecularStructureBounds", "RealBounds",
"MaterialRun", "MeasurementRun", "ProcessRun", "IngredientRun",
"MaterialSpec", "MeasurementSpec", "ProcessSpec", "IngredientSpec",
"PerformedSource",
"PropertyTemplate", "ConditionTemplate", "ParameterTemplate",
"MaterialTemplate", "MeasurementTemplate", "ProcessTemplate",
"NominalReal", "NormalReal", "UniformReal", "NominalInteger",
"UniformInteger", "DiscreteCategorical", "NominalCategorical",
"EmpiricalFormula", "NominalComposition", "InChI", "Smiles",
"LinkByUID",
"FileLink"
]
| 48.935484
| 78
| 0.696111
|
9e34c8039bd5c46ba55b2e4be6bc2bd94b5ee245
| 1,712
|
py
|
Python
|
setup.py
|
gilwoolee/bpo_baselines
|
2fbd39be8e79d69f2d2b23b4e5b8e6dfd372a58a
|
[
"MIT"
] | null | null | null |
setup.py
|
gilwoolee/bpo_baselines
|
2fbd39be8e79d69f2d2b23b4e5b8e6dfd372a58a
|
[
"MIT"
] | null | null | null |
setup.py
|
gilwoolee/bpo_baselines
|
2fbd39be8e79d69f2d2b23b4e5b8e6dfd372a58a
|
[
"MIT"
] | null | null | null |
import re
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
extras = {
'test': [
'filelock',
'pytest',
'pytest-forked',
'atari-py~=0.2.0'
],
'bullet': [
'pybullet'
],
'mpi': [
'mpi4py'
]
}
all_deps = []
for group_name in extras:
all_deps += extras[group_name]
extras['all'] = all_deps
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym',
'scipy',
'tqdm',
'joblib',
'dill',
'progressbar2',
'cloudpickle==1.2.0',
'click',
'opencv-python'
],
extras_require=extras,
description='OpenAI baselines: high quality implementations of reinforcement learning algorithms',
author='OpenAI',
url='https://github.com/openai/baselines',
author_email='gym@openai.com',
version='0.1.5')
# ensure there is some tensorflow build with version above 2.0
import pkg_resources
tf_pkg = None
for tf_pkg_name in ['tensorflow', 'tensorflow-gpu', 'tf-nightly', 'tf-nightly-gpu']:
try:
tf_pkg = pkg_resources.get_distribution(tf_pkg_name)
except pkg_resources.DistributionNotFound:
pass
assert tf_pkg is not None, 'TensorFlow needed, of version above 2.0'
from distutils.version import LooseVersion
assert LooseVersion(re.sub(r'-?rc\d+$', '', tf_pkg.version)) >= LooseVersion('2.0.0')
| 27.174603
| 104
| 0.620911
|
97e7a787fb57792188a5d80b69459edf431981fd
| 1,240
|
py
|
Python
|
profiles_api/serializers.py
|
FHSAF/prifiles-rest-api
|
4c329d42846cff50dbf2474164cbe36b818c01c6
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
FHSAF/prifiles-rest-api
|
4c329d42846cff50dbf2474164cbe36b818c01c6
|
[
"MIT"
] | 6
|
2020-06-05T20:52:01.000Z
|
2021-09-22T18:32:22.000Z
|
profiles_api/serializers.py
|
FHSAF/prifiles-rest-api
|
4c329d42846cff50dbf2474164cbe36b818c01c6
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializers a name filed for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
'''serializers a user profile object'''
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {
'input_type': 'password'
}
}
}
def create(self, validated_data):
'''Create and return a new user'''
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on', )
extra_kwargs = {
'user_profile': {
'read_only': True,
}
}
| 28.181818
| 70
| 0.579839
|
34ba5685b23e23a18bd727e9804d7ba5f6aad1d9
| 1,654
|
py
|
Python
|
services/users/project/tests/test_config.py
|
Wirya2700/testdriven-app
|
fc98c15f04e7c5e22b276d96bfbd6756201b3bfc
|
[
"MIT"
] | 1
|
2021-09-16T06:31:18.000Z
|
2021-09-16T06:31:18.000Z
|
services/users/project/tests/test_config.py
|
Wirya2700/testdriven-app
|
fc98c15f04e7c5e22b276d96bfbd6756201b3bfc
|
[
"MIT"
] | null | null | null |
services/users/project/tests/test_config.py
|
Wirya2700/testdriven-app
|
fc98c15f04e7c5e22b276d96bfbd6756201b3bfc
|
[
"MIT"
] | null | null | null |
# services/users/project/tests/test_config.py
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from project import create_app
app = create_app()
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertTrue(app.config['SECRET_KEY'] == 'my_precious')
self.assertFalse(current_app is None)
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] ==
os.environ.get('DATABASE_URL')
)
self.assertTrue(app.config['DEBUG_TB_ENABLED'])
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(app.config['SECRET_KEY'] == 'my_precious')
self.assertTrue(app.config['TESTING'])
self.assertFalse(app.config['PRESERVE_CONTEXT_ON_EXCEPTION'])
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] ==
os.environ.get('DATABASE_TEST_URL')
)
self.assertFalse(app.config['DEBUG_TB_ENABLED'])
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertTrue(app.config['SECRET_KEY'] == 'my_precious')
self.assertFalse(app.config['TESTING'])
self.assertFalse(app.config['DEBUG_TB_ENABLED'])
if __name__ == '__main__':
unittest.main()
| 28.033898
| 69
| 0.685611
|
457fac15e3b24a406ac1ea5a69f7ebb6dc44c8b0
| 4,173
|
py
|
Python
|
thzspider/scripts/archive/searchrfidvianame.py
|
jiangtianyu2009/softcake
|
a7696185e6cc4366af8f4c98f8b7053593b199dd
|
[
"MIT"
] | 1
|
2020-10-15T16:09:13.000Z
|
2020-10-15T16:09:13.000Z
|
thzspider/scripts/archive/searchrfidvianame.py
|
jiangtianyu2009/softcake
|
a7696185e6cc4366af8f4c98f8b7053593b199dd
|
[
"MIT"
] | null | null | null |
thzspider/scripts/archive/searchrfidvianame.py
|
jiangtianyu2009/softcake
|
a7696185e6cc4366af8f4c98f8b7053593b199dd
|
[
"MIT"
] | null | null | null |
from scrapinghub import ScrapinghubClient
apikey = '11befd9da9304fecb83dfa114d1926e9'
client = ScrapinghubClient(apikey)
project = client.get_project(252342)
javjob = project.jobs.list(spider='javname', state='finished')[0]
print(javjob['key'])
lastjob = project.jobs.get(javjob['key'])
namelist = []
namelist.append('秋山祥子')
namelist.append('天海つばさ')
namelist.append('友田彩也香')
namelist.append('麻倉憂')
namelist.append('瑠川リナ')
namelist.append('このは')
namelist.append('羽月希')
namelist.append('椎名みくる')
namelist.append('上原結衣')
namelist.append('早乙女らぶ')
namelist.append('葵司')
namelist.append('SARAH')
namelist.append('佐藤遥希')
namelist.append('小島みなみ')
namelist.append('上原亚衣')
namelist.append('若菜亜衣')
namelist.append('南梨央奈')
namelist.append('鶴田加奈')
namelist.append('丘咲エミリ')
namelist.append('愛内希')
namelist.append('初美沙希')
namelist.append('木村つな')
namelist.append('西野翔')
namelist.append('稲葉ゆい')
namelist.append('Rio')
namelist.append('つぼみ')
namelist.append('大橋未久')
namelist.append('板垣あずさ')
namelist.append('希志あいの')
namelist.append('瀬名あゆむ')
namelist.append('石原莉奈')
namelist.append('波多野结衣')
namelist.append('水菜丽')
namelist.append('黒木いちか')
namelist.append('希崎ジェシカ')
namelist.append('藤井シェリー')
namelist.append('藤崎セシル')
namelist.append('吉泽明步')
namelist.append('岩佐あゆみ')
namelist.append('初芽里奈')
namelist.append('みなともも')
namelist.append('绀野光')
namelist.append('樱由罗')
namelist.append('佳苗るか')
namelist.append('尾上若葉')
namelist.append('麻生希')
namelist.append('あべみかこ')
namelist.append('泷泽萝拉')
namelist.append('浜崎真緒')
namelist.append('Abigaile Johnson')
namelist.append('桜瀬奈')
namelist.append('きみと歩美')
namelist.append('古川伊织')
namelist.append('德田重男')
namelist.append('吉川あいみ')
namelist.append('入江愛美')
namelist.append('蒼乃かな')
namelist.append('保坂えり')
namelist.append('茜あずさ')
namelist.append('有本紗世')
namelist.append('白石茉莉奈')
namelist.append('饭冈加奈子')
namelist.append('葉山めい')
namelist.append('伊東紅')
namelist.append('希島あいり')
namelist.append('川村まや')
namelist.append('青井いちご')
namelist.append('逢坂はるな')
namelist.append('橋本涼')
namelist.append('裕木まゆ')
namelist.append('渡辺もも')
namelist.append('佐々木玲奈')
namelist.append('宮崎あや')
namelist.append('土屋あさみ')
namelist.append('柚月あい')
namelist.append('桃谷繪里香')
namelist.append('小川めるる')
namelist.append('天使もえ')
namelist.append('西田カリナ')
namelist.append('佐倉絆')
namelist.append('小波風')
namelist.append('ステイシー')
namelist.append('早乙女ゆい')
namelist.append('なごみ')
namelist.append('Gina Gerson')
namelist.append('みなみ愛星')
namelist.append('彩乃なな')
namelist.append('谷田部和沙')
namelist.append('市川まさみ')
namelist.append('葉山美空')
namelist.append('宮内栞')
namelist.append('長谷川るい')
namelist.append('青島かえで')
namelist.append('逢月はるな')
namelist.append('みなみもえ')
namelist.append('三上悠亜')
namelist.append('木下麻季')
namelist.append('橋本怜奈')
namelist.append('園田みおん')
namelist.append('長谷川モニカ')
namelist.append('跡美しゅり')
namelist.append('白桃心奈')
namelist.append('皆野あい')
namelist.append('アメリア・イヤハート')
namelist.append('桃乃木かな')
namelist.append('RION')
namelist.append('椎名そら')
namelist.append('向井藍')
namelist.append('橋本ありな')
namelist.append('井浦沙織')
namelist.append('南真菜果')
namelist.append('あおいれな')
namelist.append('琴羽雫')
namelist.append('来栖みさ')
namelist.append('斉藤みゆ')
namelist.append('今宮いずみ')
namelist.append('緒沢くるみ')
namelist.append('水稀みり')
namelist.append('宮沢ゆかり')
namelist.append('高桥圣子')
namelist.append('姫川ゆうな')
namelist.append('戸田真琴')
namelist.append('栄川乃亜')
namelist.append('もりの小鳥')
namelist.append('佐々木ゆう')
namelist.append('星乃ゆづき')
namelist.append('真田美樹')
namelist.append('久野せいな')
namelist.append('凰かなめ')
namelist.append('桃園みらい')
namelist.append('熊倉しょうこ')
namelist.append('雛形くるみ')
namelist.append('早乙女夏菜')
namelist.append('白咲はる')
namelist.append('椎奈さら')
namelist.append('月野ゆりあ')
namelist.append('きみかわ結衣')
namelist.append('明里つむぎ')
namelist.append('今井パオラ')
namelist.append('君色花音')
namelist.append('北川レイラ')
namelist.append('水トさくら')
namelist.append('一乃瀬るりあ')
namelist.append('ひなた澪')
namelist.append('桜空もも')
namelist.append('双葉良香')
namelist.append('藤波さとり')
namelist.append('小鳥遊みやび')
namelist.append('宝生リリー')
for actorname in namelist:
filters = [("name", "=", [actorname])]
print(lastjob.items.list(count=1, filter=filters))
for item in lastjob.items.iter(count=1, filter=filters):
print(item['href'])
| 24.839286
| 65
| 0.75006
|
079c658930d40362445de830785dc6b7a4987715
| 10,754
|
py
|
Python
|
pyrender/mesh.py
|
KailinLi/pyrender
|
cd943dac32ea943b464b0e37262367c593bbd1c9
|
[
"MIT"
] | null | null | null |
pyrender/mesh.py
|
KailinLi/pyrender
|
cd943dac32ea943b464b0e37262367c593bbd1c9
|
[
"MIT"
] | null | null | null |
pyrender/mesh.py
|
KailinLi/pyrender
|
cd943dac32ea943b464b0e37262367c593bbd1c9
|
[
"MIT"
] | null | null | null |
"""Meshes, conforming to the glTF 2.0 standards as specified in
https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-mesh
Author: Matthew Matl
"""
import copy
import numpy as np
import trimesh
from .constants import GLTF
from .material import MetallicRoughnessMaterial
from .primitive import Primitive
class Mesh(object):
"""A set of primitives to be rendered.
Parameters
----------
name : str
The user-defined name of this object.
primitives : list of :class:`Primitive`
The primitives associated with this mesh.
weights : (k,) float
Array of weights to be applied to the Morph Targets.
is_visible : bool
If False, the mesh will not be rendered.
"""
def __init__(self, primitives, name=None, weights=None, is_visible=True):
self.primitives = primitives
self.name = name
self.weights = weights
self.is_visible = is_visible
self._bounds = None
@property
def name(self):
"""str : The user-defined name of this object.
"""
return self._name
@name.setter
def name(self, value):
if value is not None:
value = str(value)
self._name = value
@property
def primitives(self):
"""list of :class:`Primitive` : The primitives associated
with this mesh.
"""
return self._primitives
@primitives.setter
def primitives(self, value):
self._primitives = value
@property
def weights(self):
"""(k,) float : Weights to be applied to morph targets.
"""
return self._weights
@weights.setter
def weights(self, value):
self._weights = value
@property
def is_visible(self):
"""bool : Whether the mesh is visible.
"""
return self._is_visible
@is_visible.setter
def is_visible(self, value):
self._is_visible = value
@property
def bounds(self):
"""(2,3) float : The axis-aligned bounds of the mesh.
"""
if self._bounds is None:
bounds = np.array([[np.infty, np.infty, np.infty], [-np.infty, -np.infty, -np.infty]])
for p in self.primitives:
bounds[0] = np.minimum(bounds[0], p.bounds[0])
bounds[1] = np.maximum(bounds[1], p.bounds[1])
self._bounds = bounds
return self._bounds
@property
def centroid(self):
"""(3,) float : The centroid of the mesh's axis-aligned bounding box
(AABB).
"""
return np.mean(self.bounds, axis=0)
@property
def extents(self):
"""(3,) float : The lengths of the axes of the mesh's AABB.
"""
return np.diff(self.bounds, axis=0).reshape(-1)
@property
def scale(self):
"""(3,) float : The length of the diagonal of the mesh's AABB.
"""
return np.linalg.norm(self.extents)
@property
def is_transparent(self):
"""bool : If True, the mesh is partially-transparent.
"""
for p in self.primitives:
if p.is_transparent:
return True
return False
@staticmethod
def from_points(points, colors=None, normals=None, is_visible=True, poses=None):
"""Create a Mesh from a set of points.
Parameters
----------
points : (n,3) float
The point positions.
colors : (n,3) or (n,4) float, optional
RGB or RGBA colors for each point.
normals : (n,3) float, optionals
The normal vectors for each point.
is_visible : bool
If False, the points will not be rendered.
poses : (x,4,4)
Array of 4x4 transformation matrices for instancing this object.
Returns
-------
mesh : :class:`Mesh`
The created mesh.
"""
primitive = Primitive(positions=points, normals=normals, color_0=colors, mode=GLTF.POINTS, poses=poses)
mesh = Mesh(primitives=[primitive], is_visible=is_visible)
return mesh
@staticmethod
def from_trimesh(mesh, material=None, is_visible=True, poses=None, wireframe=False, smooth=True):
"""Create a Mesh from a :class:`~trimesh.base.Trimesh`.
Parameters
----------
mesh : :class:`~trimesh.base.Trimesh` or list of them
A triangular mesh or a list of meshes.
material : :class:`Material`
The material of the object. Overrides any mesh material.
If not specified and the mesh has no material, a default material
will be used.
is_visible : bool
If False, the mesh will not be rendered.
poses : (n,4,4) float
Array of 4x4 transformation matrices for instancing this object.
wireframe : bool
If `True`, the mesh will be rendered as a wireframe object
smooth : bool
If `True`, the mesh will be rendered with interpolated vertex
normals. Otherwise, the mesh edges will stay sharp.
Returns
-------
mesh : :class:`Mesh`
The created mesh.
"""
if isinstance(mesh, (list, tuple, set, np.ndarray)):
meshes = list(mesh)
elif isinstance(mesh, trimesh.Trimesh):
meshes = [mesh]
else:
raise TypeError("Expected a Trimesh or a list, got a {}".format(type(mesh)))
primitives = []
for m in meshes:
positions = None
normals = None
indices = None
# Compute positions, normals, and indices
if smooth:
positions = m.vertices.copy()
normals = m.vertex_normals.copy()
indices = m.faces.copy()
else:
positions = m.vertices[m.faces].reshape((3 * len(m.faces), 3))
normals = np.repeat(m.face_normals, 3, axis=0)
# Compute colors, texture coords, and material properties
color_0, texcoord_0, primitive_material = Mesh._get_trimesh_props(m, smooth=smooth, material=material)
# Override if material is given.
if material is not None:
# primitive_material = copy.copy(material)
primitive_material = copy.deepcopy(material) # TODO
if primitive_material is None:
# Replace material with default if needed
primitive_material = MetallicRoughnessMaterial(
alphaMode="BLEND", baseColorFactor=[0.3, 0.3, 0.3, 1.0], metallicFactor=0.2, roughnessFactor=0.8
)
primitive_material.wireframe = wireframe
# Create the primitive
primitives.append(
Primitive(
positions=positions,
normals=normals,
texcoord_0=texcoord_0,
color_0=color_0,
indices=indices,
material=primitive_material,
mode=GLTF.TRIANGLES,
poses=poses,
)
)
return Mesh(primitives=primitives, is_visible=is_visible)
@staticmethod
def _get_trimesh_props(mesh, smooth=False, material=None):
"""Gets the vertex colors, texture coordinates, and material properties
from a :class:`~trimesh.base.Trimesh`.
"""
colors = None
texcoords = None
# If the trimesh visual is undefined, return none for both
if not mesh.visual.defined:
return colors, texcoords, material
# Process vertex colors
if material is None:
if mesh.visual.kind == "vertex":
vc = mesh.visual.vertex_colors.copy()
if smooth:
colors = vc
else:
colors = vc[mesh.faces].reshape((3 * len(mesh.faces), vc.shape[1]))
material = MetallicRoughnessMaterial(
alphaMode="BLEND", baseColorFactor=[1.0, 1.0, 1.0, 1.0], metallicFactor=0.2, roughnessFactor=0.8
)
# Process face colors
elif mesh.visual.kind == "face":
if smooth:
raise ValueError("Cannot use face colors with a smooth mesh")
else:
colors = np.repeat(mesh.visual.face_colors, 3, axis=0)
material = MetallicRoughnessMaterial(
alphaMode="BLEND", baseColorFactor=[1.0, 1.0, 1.0, 1.0], metallicFactor=0.2, roughnessFactor=0.8
)
# Process texture colors
if mesh.visual.kind == "texture":
# Configure UV coordinates
if mesh.visual.uv is not None:
uv = mesh.visual.uv.copy()
if smooth:
texcoords = uv
else:
texcoords = uv[mesh.faces].reshape((3 * len(mesh.faces), uv.shape[1]))
if material is None:
# Configure mesh material
mat = mesh.visual.material
if isinstance(mat, trimesh.visual.texture.PBRMaterial):
material = MetallicRoughnessMaterial(
normalTexture=mat.normalTexture,
occlusionTexture=mat.occlusionTexture,
emissiveTexture=mat.emissiveTexture,
emissiveFactor=mat.emissiveFactor,
alphaMode="BLEND",
baseColorFactor=mat.baseColorFactor,
baseColorTexture=mat.baseColorTexture,
metallicFactor=mat.metallicFactor,
roughnessFactor=mat.roughnessFactor,
metallicRoughnessTexture=mat.metallicRoughnessTexture,
doubleSided=mat.doubleSided,
alphaCutoff=mat.alphaCutoff,
)
elif isinstance(mat, trimesh.visual.texture.SimpleMaterial):
glossiness = mat.kwargs.get("Ns", 1.0)
if isinstance(glossiness, list):
glossiness = float(glossiness[0])
roughness = (2 / (glossiness + 2)) ** (1.0 / 4.0)
material = MetallicRoughnessMaterial(
alphaMode="BLEND",
roughnessFactor=roughness,
baseColorFactor=mat.diffuse,
baseColorTexture=mat.image,
)
elif isinstance(mat, MetallicRoughnessMaterial):
material = mat
return colors, texcoords, material
| 34.915584
| 116
| 0.553654
|
8df2de09156a5a83e1c901cf4151fdbc995731dc
| 2,523
|
py
|
Python
|
scripts/txt_replace.py
|
chargio/koku-metrics-operator
|
dc79ca3a8680c47230f6dd51c6e2e9868f2025b8
|
[
"Apache-2.0"
] | 5
|
2021-01-21T15:31:28.000Z
|
2021-12-19T04:18:44.000Z
|
scripts/txt_replace.py
|
chargio/koku-metrics-operator
|
dc79ca3a8680c47230f6dd51c6e2e9868f2025b8
|
[
"Apache-2.0"
] | 86
|
2020-12-03T22:58:32.000Z
|
2022-01-25T21:38:56.000Z
|
scripts/txt_replace.py
|
chargio/koku-metrics-operator
|
dc79ca3a8680c47230f6dd51c6e2e9868f2025b8
|
[
"Apache-2.0"
] | 5
|
2021-03-05T09:04:20.000Z
|
2022-02-19T19:20:29.000Z
|
#!/usr/bin/env python3
import sys
from datetime import datetime
from tempfile import mkstemp
from shutil import move, copymode
from os import fdopen, name, path, remove
def check_version(v_tup):
new, old, _ = v_tup
if new == old:
print("expect new and previous versions to differ:\n\tnew version: %s\n\told version:" % new, old)
exit()
split = new.split(".")
if len(split) != 3:
print("expect version format: X.Y.Z\nactual version format: %s" % new)
exit()
for value in split:
try:
int(value)
except ValueError:
print("expect version format: X.Y.Z\nactual version format: %s" % split)
exit()
def replace(file_path, pattern, subst):
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
copymode(file_path, abs_path)
remove(file_path)
move(abs_path, file_path)
def fix_csv(version_tuple):
version, previous, sha = version_tuple
# get the operator description from docs
docs = open("docs/csv-description.md")
description = " ".join(docs.readlines())
# all the replacements that will be made in the CSV
replacements = {
"0001-01-01T00:00:00Z": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
"INSERT-CONTAINER-IMAGE": f"{sha}",
"INSERT-DESCRIPTION": "|-\n " + description,
"name: Red Hat": f"name: Red Hat\n replaces: koku-metrics-operator.v{previous}",
"type: AllNamespaces": f"type: AllNamespaces\n relatedImages:\n - name: koku-metrics-operator\n image: {sha}"
}
filename = f"koku-metrics-operator/{version}/manifests/koku-metrics-operator.clusterserviceversion.yaml"
for k,v in replacements.items():
replace(filename, k, v)
def fix_dockerfile(version_tuple):
version, *_ = version_tuple
replacements = {
"bundle/manifests": "manifests",
"bundle/metadata": "metadata",
}
filename = f"koku-metrics-operator/{version}/Dockerfile"
for k,v in replacements.items():
replace(filename, k, v)
if __name__ == "__main__":
nargs = len(sys.argv)
if nargs != 4:
print("usage: %s VERSION PREVIOUS_VERSION IMAGE_SHA" % path.basename(sys.argv[0]))
exit()
version_tuple = sys.argv[1:]
check_version(version_tuple)
fix_csv(version_tuple)
fix_dockerfile(version_tuple)
| 32.766234
| 126
| 0.640507
|
45525d4c07f28d75ab2b5376dcee9f9d80ff418e
| 8,732
|
py
|
Python
|
yt/frontends/flash/fields.py
|
Xarthisius/yt
|
321643c3abff64a6f132d98d0747f3558f7552a3
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-15T08:17:43.000Z
|
2021-09-15T08:17:43.000Z
|
yt/frontends/flash/fields.py
|
Xarthisius/yt
|
321643c3abff64a6f132d98d0747f3558f7552a3
|
[
"BSD-3-Clause-Clear"
] | 31
|
2017-04-19T21:07:18.000Z
|
2017-04-20T01:08:43.000Z
|
yt/frontends/flash/fields.py
|
stonnes/yt
|
aad3cfa3b4ebab7838352ab467275a27c26ff363
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-04-21T07:01:51.000Z
|
2021-04-21T07:01:51.000Z
|
from yt.fields.field_info_container import FieldInfoContainer
# Common fields in FLASH: (Thanks to John ZuHone for this list)
#
# dens gas mass density (g/cc) --
# eint internal energy (ergs/g) --
# ener total energy (ergs/g), with 0.5*v^2 --
# gamc gamma defined as ratio of specific heats, no units
# game gamma defined as in , no units
# gpol gravitational potential from the last timestep (ergs/g)
# gpot gravitational potential from the current timestep (ergs/g)
# grac gravitational acceleration from the current timestep (cm s^-2)
# pden particle mass density (usually dark matter) (g/cc)
# pres pressure (erg/cc)
# temp temperature (K) --
# velx velocity x (cm/s) --
# vely velocity y (cm/s) --
# velz velocity z (cm/s) --
b_units = "code_magnetic"
pres_units = "code_mass/(code_length*code_time**2)"
en_units = "code_mass * (code_length/code_time)**2"
rho_units = "code_mass / code_length**3"
class FLASHFieldInfo(FieldInfoContainer):
known_other_fields = (
("velx", ("code_length/code_time", ["velocity_x"], None)),
("vely", ("code_length/code_time", ["velocity_y"], None)),
("velz", ("code_length/code_time", ["velocity_z"], None)),
("dens", ("code_mass/code_length**3", ["density"], None)),
("temp", ("code_temperature", ["temperature"], None)),
("pres", (pres_units, ["pressure"], None)),
("gpot", ("code_length**2/code_time**2", ["gravitational_potential"], None)),
("gpol", ("code_length**2/code_time**2", [], None)),
("tion", ("code_temperature", [], None)),
("tele", ("code_temperature", [], None)),
("trad", ("code_temperature", [], None)),
("pion", (pres_units, [], None)),
("pele", (pres_units, [], "Electron Pressure, P_e")),
("prad", (pres_units, [], "Radiation Pressure")),
("eion", (en_units, [], "Ion Internal Specific Energy")),
("eele", (en_units, [], "Electron Internal Specific Energy")),
("erad", (en_units, [], "Radiation Internal Specific Energy")),
("pden", (rho_units, [], "Particle Mass Density")),
("depo", ("code_length**2/code_time**2", [], None)),
("ye", ("", [], "Y_e")),
("magp", (pres_units, [], None)),
("divb", ("code_magnetic/code_length", [], None)),
("game", ("", [], r"\gamma_e\ \rm{(ratio\ of\ specific\ heats)}")),
("gamc", ("", [], r"\gamma_c\ \rm{(ratio\ of\ specific\ heats)}")),
("flam", ("", [], None)),
("absr", ("", [], "Absorption Coefficient")),
("emis", ("", [], "Emissivity")),
("cond", ("", [], "Conductivity")),
("dfcf", ("", [], "Diffusion Equation Scalar")),
("fllm", ("", [], "Flux Limit")),
("pipe", ("", [], "P_i/P_e")),
("tite", ("", [], "T_i/T_e")),
("dbgs", ("", [], "Debug for Shocks")),
("cham", ("", [], "Chamber Material Fraction")),
("targ", ("", [], "Target Material Fraction")),
("sumy", ("", [], None)),
("mgdc", ("", [], "Emission Minus Absorption Diffusion Terms")),
("magx", (b_units, [], "B_x")),
("magy", (b_units, [], "B_y")),
("magz", (b_units, [], "B_z")),
)
known_particle_fields = (
("particle_posx", ("code_length", ["particle_position_x"], None)),
("particle_posy", ("code_length", ["particle_position_y"], None)),
("particle_posz", ("code_length", ["particle_position_z"], None)),
("particle_velx", ("code_length/code_time", ["particle_velocity_x"], None)),
("particle_vely", ("code_length/code_time", ["particle_velocity_y"], None)),
("particle_velz", ("code_length/code_time", ["particle_velocity_z"], None)),
("particle_tag", ("", ["particle_index"], None)),
("particle_mass", ("code_mass", ["particle_mass"], None)),
(
"particle_gpot",
("code_length**2/code_time**2", ["particle_gravitational_potential"], None),
),
)
def setup_fluid_fields(self):
from yt.fields.magnetic_field import setup_magnetic_field_aliases
unit_system = self.ds.unit_system
# Adopt FLASH 4.6 value for Na
Na = self.ds.quan(6.022140857e23, "g**-1")
for i in range(1, 1000):
self.add_output_field(
("flash", f"r{i:03}"),
sampling_type="cell",
units="",
display_name=f"Energy Group {i}",
)
# Add energy fields
def ekin(data):
ek = data["flash", "velx"] ** 2
if data.ds.dimensionality >= 2:
ek += data["flash", "vely"] ** 2
if data.ds.dimensionality == 3:
ek += data["flash", "velz"] ** 2
return 0.5 * ek
if ("flash", "ener") in self.field_list:
self.add_output_field(
("flash", "ener"),
sampling_type="cell",
units="code_length**2/code_time**2",
)
self.alias(
("gas", "specific_total_energy"),
("flash", "ener"),
units=unit_system["specific_energy"],
)
else:
def _ener(field, data):
ener = data["flash", "eint"] + ekin(data)
try:
ener += data["flash", "magp"] / data["flash", "dens"]
except Exception:
pass
return ener
self.add_field(
("gas", "specific_total_energy"),
sampling_type="cell",
function=_ener,
units=unit_system["specific_energy"],
)
if ("flash", "eint") in self.field_list:
self.add_output_field(
("flash", "eint"),
sampling_type="cell",
units="code_length**2/code_time**2",
)
self.alias(
("gas", "specific_thermal_energy"),
("flash", "eint"),
units=unit_system["specific_energy"],
)
else:
def _eint(field, data):
eint = data["flash", "ener"] - ekin(data)
try:
eint -= data["flash", "magp"] / data["flash", "dens"]
except Exception:
pass
return eint
self.add_field(
("gas", "specific_thermal_energy"),
sampling_type="cell",
function=_eint,
units=unit_system["specific_energy"],
)
## Derived FLASH Fields
if ("flash", "abar") in self.field_list:
self.alias(("gas", "mean_molecular_weight"), ("flash", "abar"))
elif ("flash", "sumy") in self.field_list:
def _abar(field, data):
return 1.0 / data["flash", "sumy"]
self.add_field(
("gas", "mean_molecular_weight"),
sampling_type="cell",
function=_abar,
units="",
)
elif "eos_singlespeciesa" in self.ds.parameters:
def _abar(field, data):
return data.ds.parameters["eos_singlespeciesa"] * data["index", "ones"]
self.add_field(
("gas", "mean_molecular_weight"),
sampling_type="cell",
function=_abar,
units="",
)
if ("flash", "sumy") in self.field_list:
def _nele(field, data):
return data["flash", "dens"] * data["flash", "ye"] * Na
self.add_field(
("gas", "El_number_density"),
sampling_type="cell",
function=_nele,
units=unit_system["number_density"],
)
def _nion(field, data):
return data["flash", "dens"] * data["flash", "sumy"] * Na
self.add_field(
("gas", "ion_number_density"),
sampling_type="cell",
function=_nion,
units=unit_system["number_density"],
)
def _number_density(field, data):
return (
data["gas", "El_number_density"] + data["gas", "ion_number_density"]
)
else:
def _number_density(field, data):
return data["flash", "dens"] * Na / data["gas", "mean_molecular_weight"]
self.add_field(
("gas", "number_density"),
sampling_type="cell",
function=_number_density,
units=unit_system["number_density"],
)
setup_magnetic_field_aliases(self, "flash", [f"mag{ax}" for ax in "xyz"])
| 37.800866
| 88
| 0.504695
|
4fd2a7ef8ef9bd847bb3d52b19cc90472bd773db
| 234
|
py
|
Python
|
uaber-api/uaber/settings/base.py
|
lahim/UAber
|
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
|
[
"MIT"
] | 1
|
2022-03-03T14:55:15.000Z
|
2022-03-03T14:55:15.000Z
|
uaber-api/uaber/settings/base.py
|
lahim/Code4Ukraine
|
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
|
[
"MIT"
] | null | null | null |
uaber-api/uaber/settings/base.py
|
lahim/Code4Ukraine
|
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
|
[
"MIT"
] | null | null | null |
CORS_ALLOW_ORIGINS = '*' # fixme!
CORS_ALLOW_METHODS = ['GET', 'POST', 'PATH', 'DELETE']
CORS_ALLOW_HEADERS = ['*'] # fixme!
DATABASE = {
'uri': 'mongodb://localhost:27017',
'max_pool_size': 10,
'db_name': 'uaberdb',
}
| 23.4
| 54
| 0.606838
|
2620a583dd36b2960742917eefd154f1a303b496
| 5,037
|
py
|
Python
|
docker_compose_wait.py
|
kbkk/docker-compose-wait
|
46f5aa5ba89024f96529c75dc676e1fb3b23c278
|
[
"MIT"
] | null | null | null |
docker_compose_wait.py
|
kbkk/docker-compose-wait
|
46f5aa5ba89024f96529c75dc676e1fb3b23c278
|
[
"MIT"
] | null | null | null |
docker_compose_wait.py
|
kbkk/docker-compose-wait
|
46f5aa5ba89024f96529c75dc676e1fb3b23c278
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import division, absolute_import, print_function, unicode_literals
import subprocess
import re
import time
import sys
import argparse
import yaml
from timeparse import timeparse
def call(args):
return '\n'.join(subprocess.check_output(args).decode().splitlines())
def get_all_statuses():
return [tuple(x.split(",")) for x in call(["docker", "ps", "--all", "--format", "{{.ID}},{{.Status}}"]).splitlines()]
def get_statuses_for_ids(ids):
status_list = get_all_statuses()
statuses = {}
for id in ids:
status = None
for s in status_list:
if id.find(s[0]) == 0:
status = s[1]
break
if status is None:
status = "removed"
statuses[id] = status
return statuses
def convert_status(s):
res = re.search(r"^([^\s]+)[^\(]*(?:\((.*)\).*)?$", s)
if res is None:
raise Exception("Unknown status format %s" % s)
if res.group(1) == "Up":
if res.group(2) == "health: starting":
return "starting"
elif res.group(2) == "healthy":
return "healthy"
elif res.group(2) == "unhealthy":
return "unhealthy"
elif res.group(2) is None:
return "up"
else:
raise Exception("Unknown status format %s" % s)
else:
return "down"
def get_converted_statuses(ids):
return dict([(k, convert_status(v)) for k, v in get_statuses_for_ids(ids).items()])
def get_docker_compose_args(args):
nargs = []
for f in args.file:
nargs += ['-f', f]
if args.project_name:
nargs += ['-p', args.project_name]
return nargs
def get_services_ids(dc_args):
services_names = yaml.safe_load(call(["docker-compose"] + dc_args + ["config"]))["services"].keys()
services = {}
for name in services_names:
id = call(["docker-compose"] + dc_args + ["ps", '-q', name]).strip()
if id == '':
continue
services[name] = id
return services
def get_services_statuses(services_with_ids):
statuses_by_id = get_converted_statuses(services_with_ids.values())
return list([({'name': k, 'id': v}, statuses_by_id[v]) for k, v in services_with_ids.items()])
def print_healthcheck_log_for_service_id(service_id):
print(subprocess.check_output(["docker", "inspect", "--format", "\"{{json .State.Health }}\"", service_id]))
def main():
parser = argparse.ArgumentParser(
description='Wait until all services in a docker-compose file are healthy. Options are forwarded to docker-compose.',
usage='docker-compose-wait.py [options]'
)
parser.add_argument('-f', '--file', action='append', default=[],
help='Specify an alternate compose file (default: docker-compose.yml)')
parser.add_argument('-p', '--project-name',
help='Specify an alternate project name (default: directory name)')
parser.add_argument('-w', '--wait', action='store_true',
help='Wait for all the processes to stabilize before exit (default behavior is to exit '
+ 'as soon as any of the processes is unhealthy)')
parser.add_argument('-t', '--timeout', default=None,
help='Max amount of time during which this command will run (expressed using the '
+ 'same format than in docker-compose.yml files, example: 5s, 10m,... ). If there is a '
+ 'timeout this command will exit returning 1. (default: wait for an infinite amount of time)')
parser.add_argument('-l', '--log-print', action='store_true',
help='Whether to print docker healthcheck output for unhealthy services')
args = parser.parse_args()
dc_args = get_docker_compose_args(args)
start_time = time.time()
timeout = timeparse(args.timeout) if args.timeout is not None else None
services_ids = get_services_ids(dc_args)
up_statuses = set(['healthy', 'up'])
down_statuses = set(['down', 'unhealthy', 'removed'])
stabilized_statuses = up_statuses | down_statuses
while True:
statuses = get_services_statuses(services_ids)
if args.wait:
if any([v not in stabilized_statuses for k, v in statuses]):
continue
if all([v in up_statuses for k, v in statuses]):
print("All processes up and running")
exit(0)
elif any([v in down_statuses for k, v in statuses]):
print("Some processes failed:")
for k, v in [(k, v) for k, v in statuses if v in down_statuses]:
print("%s is %s" % (k['name'], v))
if args.log_print:
print_healthcheck_log_for_service_id(k['id'])
exit(-1)
if args.timeout is not None and time.time() > start_time + timeout:
print("Timeout")
exit(1)
time.sleep(1)
if __name__ == "__main__":
# execute only if run as a script
main()
| 35.978571
| 125
| 0.604129
|
7ba2942d9d5dcdc95adaff2ce6168207eb23d2dd
| 358,370
|
py
|
Python
|
Cython/Compiler/Nodes.py
|
arigo/cython
|
c4a0aa5969258afcc22d6a76732b5c4a2aa696b0
|
[
"Apache-2.0"
] | null | null | null |
Cython/Compiler/Nodes.py
|
arigo/cython
|
c4a0aa5969258afcc22d6a76732b5c4a2aa696b0
|
[
"Apache-2.0"
] | null | null | null |
Cython/Compiler/Nodes.py
|
arigo/cython
|
c4a0aa5969258afcc22d6a76732b5c4a2aa696b0
|
[
"Apache-2.0"
] | 1
|
2020-09-09T16:10:27.000Z
|
2020-09-09T16:10:27.000Z
|
#
# Parse tree nodes
#
from __future__ import absolute_import
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
from . import Builtin
from .Errors import error, warning, InternalError, CompileError
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
from .Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from .Code import UtilityCode
from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
from ..Utils import add_metaclass
if sys.version_info[0] >= 3:
_py_int_types = int
else:
_py_int_types = (int, long)
def relative_position(pos):
return (pos[0].get_filenametable_entry(), pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def _analyse_signature_annotation(annotation, env):
base_type = None
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
else:
warning(annotation.pos, "Unknown type declaration found in signature annotation")
return base_type, arg_type
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
# no code written => undo writing marker
code.buffer.stream.truncate(pristine)
else:
marker = marker.replace('->', '<-', 1)
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print('%s %s %s' % (name, args, kwargs))
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
#return add_metaclass(CheckAnalysers)(cls)
return cls
@_with_metaclass
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
is_wrapper = False # is a DefNode wrapper for a C function
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
"""Debug helper method that returns a recursive string representation of this node.
"""
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
def dump_pos(self, mark_column=False, marker='(#)'):
"""Debug helper method that returns the source code context of this node as a string.
"""
if not self.pos:
return u''
source_desc, line, col = self.pos
contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore')
# line numbers start at 1
lines = contents[max(0, line-3):line]
current = lines[-1]
if mark_column:
current = current[:col] + marker + current[col:]
lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n'
lines += contents[line:line+2]
return u'"%s":%d:%d\n%s\n' % (
source_desc.get_escaped_description(), line, col, u''.join(lines))
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
"""
# directives {string:value} A dictionary holding the right value for
# *all* possible directives.
# body Node
child_attrs = ["body"]
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body = self.body.analyse_expressions(env)
env.directives = old
return self
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
def generate_cached_builtins_decls(self, env, code):
entries = env.global_scope().undeclared_cached_builtins
for entry in entries:
code.globalstate.add_cached_builtin_decl(entry)
del entries[:]
def generate_lambda_definitions(self, env, code):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
@staticmethod
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis needed
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
self.stats = [stat.analyse_expressions(env)
for stat in self.stats]
return self
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
child_attrs = ["body"]
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
def analyse_templates(self):
# Only C++ functions have templates.
return None
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def analyse(self, base_type, env, nonempty=0):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
base_type = base_type.specialize(env.fused_to_specific)
self.type = base_type
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse_templates(self):
return self.base.analyse_templates()
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos, "Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty=nonempty)
class CReferenceDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse_templates(self):
return self.base.analyse_templates()
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty=nonempty)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty=0):
if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
else:
args = self.dimension,
values = [v.analyse_as_type(env) for v in args]
if None in values:
ix = values.index(None)
error(args[ix].pos, "Template parameter not a type")
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
return self.base.analyse(base_type, env, nonempty=nonempty)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.get_constant_c_result_code()
if size is not None:
try:
size = int(size)
except ValueError:
# runtime constant?
pass
else:
size = None
if not base_type.is_complete():
error(self.pos, "Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos, "Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos, "Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env, nonempty=nonempty)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# templates [TemplatePlaceholderType]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# is_const_method boolean Whether this is a const method
child_attrs = ["base", "args", "exception_value"]
overridable = 0
optional_arg_count = 0
is_const_method = 0
templates = None
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from .ExprNodes import TupleNode, NameNode
template_node = self.base.dimension
if isinstance(template_node, TupleNode):
template_nodes = template_node.args
elif isinstance(template_node, NameNode):
template_nodes = [template_node]
else:
error(template_node.pos, "Template arguments must be a list of names")
return None
self.templates = []
for template in template_nodes:
if isinstance(template, NameNode):
self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
else:
error(template.pos, "Template arguments must be a list of names")
self.base = self.base.base
return self.templates
else:
return None
def analyse(self, return_type, env, nonempty=0, directive_locals=None):
if directive_locals is None:
directive_locals = {}
if nonempty:
nonempty -= 1
func_type_args = []
for i, arg_node in enumerate(self.args):
name_declarator, type = arg_node.analyse(
env, nonempty=nonempty,
is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives))
name = name_declarator.name
if name in directive_locals:
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(self.base.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
type = other_type
if name_declarator.cname:
error(self.pos, "Function argument cannot have C name specification")
if i == 0 and env.is_c_class_scope and type.is_unspecified:
# fix the type of self
type = env.parent_type
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
exc_val = None
exc_check = 0
if self.exception_check == '+':
env.add_include_file('ios') # for std::ios_base::failure
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
if (return_type.is_pyobject
and (self.exception_value or self.exception_check)
and self.exception_check != '+'):
error(self.pos, "Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
exc_val_type = self.exception_value.type
if (not exc_val_type.is_error
and not exc_val_type.is_pyobject
and not (exc_val_type.is_cfunction
and not exc_val_type.return_type.is_pyobject
and not exc_val_type.args)):
error(self.exception_value.pos,
"Exception value must be a Python exception or cdef function with no arguments.")
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(
return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
raise InternalError(
"get_constant_c_result_code not implemented for %s" %
self.exception_value.__class__.__name__)
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos, "Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
optional_arg_count=self.optional_arg_count,
exception_value=exc_val, exception_check=exc_check,
calling_convention=self.base.calling_convention,
nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable,
is_const_method=self.is_const_method,
templates=self.templates)
if self.optional_arg_count:
if func_type.is_fused:
# This is a bit of a hack... When we need to create specialized CFuncTypes
# on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
# struct
def declare_opt_arg_struct(func_type, fused_cname):
self.declare_optional_arg_struct(func_type, env, fused_cname)
func_type.declare_opt_arg_struct = declare_opt_arg_struct
else:
self.declare_optional_arg_struct(func_type, env)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if current and current != callspec:
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
Declares the optional argument struct (the struct used to hold the
values for optional arguments). For fused cdef functions, this is
deferred as analyse_declarations is called only once (on the fused
cdef function).
"""
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=1)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
if fused_cname is not None:
struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
op_args_struct = env.global_scope().declare_struct_or_union(
name=struct_cname,
kind='struct',
scope=scope,
typedef_flag=0,
pos=self.pos,
cname=struct_cname)
op_args_struct.defined_in_pxd = 1
op_args_struct.used = 1
func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type)
class CConstDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
return self.base.analyse(const, env, nonempty=nonempty)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# or_none boolean Tagged with 'or None'
# accept_none boolean Resolved boolean for not_none/or_none
# default ExprNode or None
# default_value PyObjectConst constant for default value
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
# is_kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
child_attrs = ["base_type", "declarator", "default", "annotation"]
is_self_arg = 0
is_type_arg = 0
is_generic = 1
kw_only = 0
not_none = 0
or_none = 0
type = None
name_declarator = None
default_value = None
annotation = None
is_dynamic = 0
def analyse(self, env, nonempty=0, is_self_arg=False):
if is_self_arg:
self.base_type.is_self_arg = self.is_self_arg = True
if self.type is None:
# The parser may misinterpret names as types. We fix that here.
if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
if nonempty:
if self.base_type.is_basic_c_type:
# char, short, long called "int"
type = self.base_type.analyse(env, could_be_name=True)
arg_name = type.empty_declaration_code()
else:
arg_name = self.base_type.name
self.declarator.name = EncodedString(arg_name)
self.base_type.name = None
self.base_type.is_basic_c_type = False
could_be_name = True
else:
could_be_name = False
self.base_type.is_arg = True
base_type = self.base_type.analyse(env, could_be_name=could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
# The parser is unable to resolve the ambiguity of [] as part of the
# type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
if (base_type.is_array
and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
declarator = declarator.base
declarator.base = self.base_type.array_declarator
base_type = base_type.base_type
# inject type declaration from annotations
if self.annotation and env.directives['annotation_typing'] and self.base_type.name is None:
arg_type = self.inject_type_from_annotations(env)
if arg_type is not None:
base_type = arg_type
return self.declarator.analyse(base_type, env, nonempty=nonempty)
else:
return self.name_declarator, self.type
def inject_type_from_annotations(self, env):
annotation = self.annotation
if not annotation:
return None
base_type, arg_type = _analyse_signature_annotation(annotation, env)
if base_type is not None:
self.base_type = base_type
return arg_type
def calculate_default_value_code(self, code):
if self.default_value is None:
if self.default:
if self.default.is_literal:
# will not output any code, just assign the result_code
self.default.generate_evaluation_code(code)
return self.type.cast_code(self.default.result())
self.default_value = code.get_argument_default_const(self.type)
return self.default_value
def annotate(self, code):
if self.default:
self.default.annotate(code)
def generate_assignment_code(self, code, target=None, overloaded_assignment=False):
default = self.default
if default is None or default.is_literal:
return
if target is None:
target = self.calculate_default_value_code(code)
default.generate_evaluation_code(code)
default.make_owned_reference(code)
result = default.result() if overloaded_assignment else default.result_as(self.type)
code.putln("%s = %s;" % (target, result))
if self.type.is_pyobject:
code.put_giveref(default.result())
default.generate_post_assignment_code(code)
default.free_temps(code)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
def analyse_as_type(self, env):
return self.analyse(env)
class CAnalysedBaseTypeNode(Node):
# type type
child_attrs = []
def analyse(self, env, could_be_name=False):
return self.type
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# complex boolean
# is_self_arg boolean Is self argument of C method
# ##is_type_arg boolean Is type argument of class method
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
module_path = []
is_basic_c_type = False
complex = False
def analyse(self, env, could_be_name=False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
else:
if self.module_path:
# Maybe it's a nested C++ class.
scope = env
for item in self.module_path:
entry = scope.lookup(item)
if entry is not None and entry.is_cpp_class:
scope = entry.type.scope
else:
scope = None
break
if scope is None:
# Maybe it's a cimport.
scope = env.find_imported_module(self.module_path, self.pos)
if scope:
scope.fused_to_specific = env.fused_to_specific
else:
scope = env
if scope:
if scope.is_c_class_scope:
scope = scope.global_scope()
type = scope.lookup_type(self.name)
if type is not None:
pass
elif could_be_name:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
self.arg_name = EncodedString(self.name)
else:
if self.templates:
if not self.name in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if self.complex:
if not type.is_numeric or type.is_complex:
error(self.pos, "can only complexify c numeric types")
type = PyrexTypes.CComplexType(type)
type.create_declaration_utility_code(env)
elif type is Builtin.complex_type:
# Special case: optimise builtin complex type into C's
# double complex. The parser cannot do this (as for the
# normal scalar types) as the user may have redeclared the
# 'complex' type. Testing for the exact type here works.
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
if type:
return type
else:
return PyrexTypes.error_type
class MemoryViewSliceTypeNode(CBaseTypeNode):
name = 'memoryview'
child_attrs = ['base_type_node', 'axes']
def analyse(self, env, could_be_name=False):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
from . import MemoryView
try:
axes_specs = MemoryView.get_axes_specs(env, self.axes)
except CompileError as e:
error(e.position, e.message_only)
self.type = PyrexTypes.ErrorType()
return self.type
if not MemoryView.validate_axes(self.pos, axes_specs):
self.type = error_type
else:
self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
self.type.validate_memslice_dtype(self.pos)
self.use_memview_utilities(env)
return self.type
def use_memview_utilities(self, env):
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
class CNestedBaseTypeNode(CBaseTypeNode):
# For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
child_attrs = ['base_type']
def analyse(self, env, could_be_name=None):
base_type = self.base_type.analyse(env)
if base_type is PyrexTypes.error_type:
return PyrexTypes.error_type
if not base_type.is_cpp_class:
error(self.pos, "'%s' is not a valid type scope" % base_type)
return PyrexTypes.error_type
type_entry = base_type.scope.lookup_here(self.name)
if not type_entry or not type_entry.is_type:
error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
return PyrexTypes.error_type
return type_entry.type
class TemplatedTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
dtype_node = None
name = None
def analyse(self, env, could_be_name=False, base_type=None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if base_type.is_cpp_class and base_type.is_template_type():
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
error(self.pos, "c++ templates cannot take keyword arguments")
self.type = PyrexTypes.error_type
else:
template_types = []
for template_node in self.positional_args:
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
elif base_type.is_pyobject:
# Buffer
from . import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
if sys.version_info[0] < 3:
# Py 2.x enforces byte strings as keyword arguments ...
options = dict([(name.encode('ASCII'), value)
for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(
self.pos,
base=empty_declarator,
dimension=dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
if self.type.is_fused and env.fused_to_specific:
self.type = self.type.specialize(env.fused_to_specific)
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name=False):
base = self.base_type.analyse(env, could_be_name)
_, type = self.declarator.analyse(base, env)
return type
class CTupleBaseTypeNode(CBaseTypeNode):
# components [CBaseTypeNode]
child_attrs = ["components"]
def analyse(self, env, could_be_name=False):
component_types = []
for c in self.components:
type = c.analyse(env)
if type.is_pyobject:
error(c.pos, "Tuple types can't (yet) contain Python objects.")
return error_type
component_types.append(type)
entry = env.declare_tuple_type(self.pos, component_types)
entry.used = True
return entry.type
class FusedTypeNode(CBaseTypeNode):
"""
Represents a fused type in a ctypedef statement:
ctypedef cython.fused_type(int, long, long long) integral
name str name of this fused type
types [CSimpleBaseTypeNode] is the list of types to be fused
"""
child_attrs = []
def analyse_declarations(self, env):
type = self.analyse(env)
entry = env.declare_typedef(self.name, type, self.pos)
# Omit the typedef declaration that self.declarator would produce
entry.in_cinclude = True
def analyse(self, env, could_be_name=False):
types = []
for type_node in self.types:
type = type_node.analyse_as_type(env)
if not type:
error(type_node.pos, "Not a type")
continue
if type in types:
error(type_node.pos, "Type specified multiple times")
else:
types.append(type)
# if len(self.types) == 1:
# return types[0]
return PyrexTypes.FusedType(types, name=self.name)
class CConstTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
child_attrs = ["base_type"]
def analyse(self, env, could_be_name=False):
base = self.base_type.analyse(env, could_be_name)
if base.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
return PyrexTypes.c_const_type(base)
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# overridable boolean whether it is a cpdef
# modifiers ['inline']
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope=None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
if self.declarators:
templates = self.declarators[0].analyse_templates()
else:
templates = None
if templates is not None:
if self.visibility != 'extern':
error(self.pos, "Only extern functions allowed")
if len(self.declarators) > 1:
error(self.declarators[1].pos, "Can't multiply declare template types")
env = TemplateScope('func_template', env)
env.directives = env.outer_scope.directives
for template_param in templates:
env.declare_type(template_param.name, template_param, self.pos)
base_type = self.base_type.analyse(env)
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
return error_type
self.entry = None
visibility = self.visibility
for declarator in self.declarators:
if (len(self.declarators) > 1
and not isinstance(declarator, CNameDeclaratorNode)
and env.directives['warn.multiple_declarators']):
warning(
declarator.pos,
"Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). "
"Each pointer declaration should be on its own line.", 1)
create_extern_wrapper = (self.overridable
and self.visibility == 'extern'
and env.is_module_scope)
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals)
else:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos, "Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos, "Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
self.entry = dest_scope.declare_cfunction(
name, type, declarator.pos,
cname=cname, visibility=self.visibility, in_pxd=self.in_pxd,
api=self.api, modifiers=self.modifiers, overridable=self.overridable)
if self.entry is not None:
self.entry.directive_locals = copy.copy(self.directive_locals)
if create_extern_wrapper:
self.entry.type.create_to_py_utility_code(env)
self.entry.create_wrapper = True
else:
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(
name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
api=self.api, is_cdef=1)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility=self.visibility, api=self.api,
packed=self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class CppClassNode(CStructOrUnionDefNode, BlockNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [CBaseTypeNode]
# templates [(string, bool)] or None
# decorators [DecoratorNode] or None
decorators = None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
num_optional_templates = sum(not required for _, required in self.templates)
if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]):
error(self.pos, "Required template parameters must precede optional template parameters.")
self.entry = env.declare_cpp_class(
self.name, None, self.pos, self.cname,
base_classes=[], visibility=self.visibility, templates=template_types)
def analyse_declarations(self, env):
if self.templates is None:
template_types = template_names = None
else:
template_names = [template_name for template_name, _ in self.templates]
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env, templates=template_names)
def base_ok(base_class):
if base_class.is_cpp_class or base_class.is_struct:
return True
else:
error(self.pos, "Base class '%s' not a struct or class." % base_class)
base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility=self.visibility, templates=template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if scope is not None:
scope.type = self.entry.type
defined_funcs = []
def func_attributes(attributes):
for attr in attributes:
if isinstance(attr, CFuncDefNode):
yield attr
elif isinstance(attr, CompilerDirectivesNode):
for sub_attr in func_attributes(attr.body.stats):
yield sub_attr
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
declare = getattr(attr, 'declare', None)
if declare:
attr.declare(scope)
attr.analyse_declarations(scope)
for func in func_attributes(self.attributes):
defined_funcs.append(func)
if self.templates is not None:
func.template_declaration = "template <typename %s>" % ", typename ".join(template_names)
self.body = StatListNode(self.pos, stats=defined_funcs)
self.scope = scope
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(self.entry.type.scope)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(self.entry.type.scope, code)
def generate_execution_code(self, code):
self.body.generate_execution_code(code)
def annotate(self, code):
self.body.annotate(code)
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private" or "extern"
# api boolean
# in_pxd boolean
# create_wrapper boolean
# entry Entry
child_attrs = ["items"]
def declare(self, env):
self.entry = env.declare_enum(
self.name, self.pos,
cname=self.cname, typedef_flag=self.typedef_flag,
visibility=self.visibility, api=self.api,
create_wrapper=self.create_wrapper)
def analyse_declarations(self, env):
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.visibility == 'public' or self.api:
code.mark_pos(self.pos)
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyInt_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
Naming.moddict_cname,
item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value = self.value.analyse_const_expression(env)
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value = self.value.analyse_const_expression(env)
entry = env.declare_const(
self.name, enum_entry.type,
self.value, self.pos, cname=self.cname,
visibility=enum_entry.visibility, api=enum_entry.api,
create_wrapper=enum_entry.create_wrapper and enum_entry.name is None)
enum_entry.enum_values.append(entry)
if enum_entry.name:
enum_entry.type.values.append(entry.name)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# api boolean
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(
name, type, self.pos,
cname=cname, visibility=self.visibility, api=self.api)
if type.is_fused:
entry.in_cinclude = True
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# pymethdef_required boolean Force Python method struct generation
# directive_locals { string : ExprNode } locals defined by cython.locals(...)
# directive_returns [ExprNode] type defined by cython.returns(...)
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
#
# is_async_def boolean is a Coroutine function
#
# has_fused_arguments boolean
# Whether this cdef function has fused parameters. This is needed
# by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
# with fused argument types with a FusedCFuncDefNode
py_func = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
is_async_def = False
modifiers = []
has_fused_arguments = False
star_arg = None
starstar_arg = None
is_cyfunction = False
code_object = None
def analyse_default_values(self, env):
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
else:
error(arg.pos, "This argument cannot have a default value")
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
def align_argument_type(self, env, arg):
# @cython.locals()
directive_locals = self.directive_locals
orig_type = arg.type
if arg.name in directive_locals:
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']:
type_node = arg.annotation
other_type = arg.inject_type_from_annotations(env)
if other_type is None:
return arg
else:
return arg
if other_type is None:
error(type_node.pos, "Not a type")
elif orig_type is not py_object_type and not orig_type.same_as(other_type):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env,
scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
from . import Buffer
if self.return_type.is_memoryviewslice:
from . import MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
Naming.outer_scope_cname)
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# generate lambda function definitions
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
self.entry.scope.is_c_class_scope)
is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
if is_buffer_slot:
if 'cython_unused' not in self.modifiers:
self.modifiers = self.modifiers + ['cython_unused']
preprocessor_guard = self.get_preprocessor_guard()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Profile", "Profile.c"))
# Generate C code for header and body of function
code.enter_cfunc_scope(lenv)
code.return_from_error_cleanup_label = code.new_label()
code.funcstate.gil_owned = not lenv.nogil
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or
self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(
code, with_pymethdef=with_pymethdef, proto_only=True)
self.generate_function_header(code, with_pymethdef=with_pymethdef)
# ----- Local variable declarations
# Find function scope
cenv = env
while cenv.is_py_class_scope or cenv.is_c_class_scope:
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(";")
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if not (entry.in_closure or entry.is_arg):
code.put_var_declaration(entry)
# Initialize the return variable __pyx_r
init = ""
if not self.return_type.is_void:
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = ' = ' + MemoryView.memslice_entry_init
code.putln("%s%s;" % (
self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations, or for
# refnanny only
# Closures are not currently possible for cdef nogil functions,
# but check them anyway
have_object_args = self.needs_closure or self.needs_outer_scope
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used]
acquire_gil_for_var_decls_only = (
lenv.nogil and lenv.has_with_gil_block and
(have_object_args or used_buffer_entries))
acquire_gil_for_refnanny_only = (
lenv.nogil and lenv.has_with_gil_block and not
acquire_gil_for_var_decls_only)
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
code.funcstate.gil_owned = True
elif lenv.nogil and lenv.has_with_gil_block:
code.declare_gilstate()
if profile or linetrace:
tempvardecl_code.put_trace_declarations()
code_object = self.code_object.calculate_result_code(code) if self.code_object else None
code.put_trace_frame_init(code_object)
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(
self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
# ----- Create closure scope object
if self.needs_closure:
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
if not slot_func_cname:
slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname
code.putln("%s = (%s)%s(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.empty_declaration_code(),
slot_func_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
# Scope unconditionally DECREFed on return.
code.putln("%s = %s;" % (
Naming.cur_scope_cname,
lenv.scope_class.type.cast_code("Py_None")));
code.put_incref("Py_None", py_object_type);
code.putln(code.error_goto(self.pos))
code.putln("} else {")
code.put_gotref(Naming.cur_scope_cname)
code.putln("}")
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
if self.is_cyfunction:
code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
else:
code.putln("%s = (%s) %s;" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
if lenv.is_passthrough:
code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname))
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
# ----- Trace function call
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
if self.is_wrapper:
trace_name = self.entry.name + " (wrapper)"
else:
trace_name = self.entry.name
code.put_trace_call(
trace_name, self.pos, nogil=not code.funcstate.gil_owned)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
for entry in lenv.var_entries:
if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure:
if entry.xdecref_cleanup:
code.put_var_xincref(entry)
else:
code.put_var_incref(entry)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
# -------------------------
# ----- Function body -----
# -------------------------
self.generate_function_body(env, code)
code.mark_pos(self.pos, trace=False)
code.putln("")
code.putln("/* function exit code */")
# ----- Default return value
if not self.body.is_terminator:
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=not lenv.nogil)
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
buffers_present = len(used_buffer_entries) > 0
#memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice]
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_PyThreadState_declare")
code.putln("__Pyx_PyThreadState_assign")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
# TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil and not lenv.has_with_gil_block:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil and not lenv.has_with_gil_block:
code.put_release_ensured_gil()
code.putln("}")
else:
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
default_retval = self.return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
code.put_goto(code.return_from_error_cleanup_label)
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
# See if our return value is uninitialized on non-error return
# from . import MemoryView
# MemoryView.err_if_nogil_initialized_check(self.pos, env)
cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname))
code.putln(
'if (%s) {' % cond)
if env.nogil:
code.put_ensure_gil()
code.putln(
'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln(
'}')
# ----- Return cleanup for both error and no-error return
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil)
elif entry.type.is_pyobject:
if not entry.is_arg or len(entry.cf_assignments) > 1:
if entry.xdecref_cleanup:
code.put_var_xdecref(entry)
else:
code.put_var_decref(entry)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
code.put_var_decref(entry)
elif (entry.type.is_memoryviewslice and
(not is_cdef or len(entry.cf_assignments) > 1)):
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
default_retval = self.return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval # FIXME: why is err_val not used?
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
if profile or linetrace:
code.funcstate.can_trace = False
if self.return_type.is_pyobject:
code.put_trace_return(
Naming.retval_cname, nogil=not code.funcstate.gil_owned)
else:
code.put_trace_return(
"Py_None", nogil=not code.funcstate.gil_owned)
if not lenv.nogil:
# GIL holding function
code.put_finish_refcount_context()
if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
# release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Python version
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(
UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.type.is_builtin_type and arg.type.require_exact,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class without type object name specification")
def generate_arg_none_check(self, arg, code):
# Generate None check for one argument.
if arg.type.is_memoryviewslice:
cname = "%s.memview" % arg.entry.cname
else:
cname = arg.entry.cname
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
max(200, len(arg.name)), arg.name,
code.error_goto(arg.pos)))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
code.mark_pos(self.pos)
# Evaluate and store argument default values
for arg in self.args:
if not arg.is_dynamic:
arg.generate_assignment_code(code)
#
# Special code for the __getbuffer__ function
#
def getbuffer_init(self, code):
info = self.local_scope.arg_entries[1].cname
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following block should be removed when this bug is fixed.
code.putln("if (%s != NULL) {" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
code.putln("}")
def getbuffer_error_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj != NULL) {"
% (info, info))
code.put_gotref("%s->obj" % info)
code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
% (info, info))
code.putln("}")
def getbuffer_normal_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
code.put_gotref("Py_None")
code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
return None
name = self.entry.name
slot = TypeSlots.method_name_to_slot.get(name)
if not slot:
return None
if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
return None
if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope:
return None
return slot.preprocessor_guard_code()
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# modifiers ['inline']
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# cfunc_declarator the CFuncDeclarator of this function
# (this is also available through declarator or a
# base thereof)
# body StatListNode
# api boolean
# decorators [DecoratorNode] list of decorators
#
# with_gil boolean Acquire GIL around body
# type CFuncType
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
# template_declaration String or None Used for c++ class methods
# is_const_method whether this is a const method
# is_static_method whether this is a static method
# is_c_class_method whether this is a cclass method
child_attrs = ["base_type", "declarator", "body", "py_func_stat"]
inline_in_pxd = False
decorators = None
directive_locals = None
directive_returns = None
override = None
template_declaration = None
is_const_method = False
py_func_stat = None
def unqualified_name(self):
return self.entry.name
@property
def code_object(self):
# share the CodeObject with the cpdef wrapper (if available)
return self.py_func.code_object if self.py_func else None
def analyse_declarations(self, env):
self.is_c_class_method = env.is_c_class_scope
if self.directive_locals is None:
self.directive_locals = {}
self.directive_locals.update(env.directives['locals'])
if self.directive_returns is not None:
base_type = self.directive_returns.analyse_as_type(env)
if base_type is None:
error(self.directive_returns.pos, "Not a type")
base_type = PyrexTypes.error_type
else:
base_type = self.base_type.analyse(env)
self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod')
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None),
directive_locals=self.directive_locals)
else:
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None))
if not type.is_cfunction:
error(self.pos, "Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
type.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
self.cfunc_declarator = declarator
self.args = declarator.args
opt_arg_count = self.cfunc_declarator.optional_arg_count
if (self.visibility == 'public' or self.api) and opt_arg_count:
error(self.cfunc_declarator.pos,
"Function with optional arguments may not be declared public or api")
if type.exception_check == '+' and self.visibility != 'extern':
warning(self.cfunc_declarator.pos,
"Only extern functions can throw C++ exceptions.")
for formal_arg, type_arg in zip(self.args, type.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
formal_arg.cname = type_arg.cname
self._validate_type_visibility(type_arg.type, type_arg.pos, env)
if type_arg.type.is_fused:
self.has_fused_arguments = True
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
elif 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
self._validate_type_visibility(type.return_type, self.pos, env)
name = name_declarator.name
cname = name_declarator.cname
type.is_const_method = self.is_const_method
type.is_static_method = self.is_static_method
self.entry = env.declare_cfunction(
name, type, self.pos,
cname=cname, visibility=self.visibility, api=self.api,
defining=self.body is not None, modifiers=self.modifiers,
overridable=self.overridable)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos, "Function cannot return an array")
if self.return_type.is_cpp_class:
self.return_type.check_nullary_constructor(self.pos, "used as a return value")
if self.overridable and not env.is_module_scope and not self.is_static_method:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
self.declare_cpdef_wrapper(env)
self.create_local_scope(env)
def declare_cpdef_wrapper(self, env):
if self.overridable:
if self.is_static_method:
# TODO(robertwb): Finish this up, perhaps via more function refactoring.
error(self.pos, "static cpdef methods not yet supported")
name = self.entry.name
py_func_body = self.call_self_node(is_module_scope=env.is_module_scope)
if self.is_static_method:
from .ExprNodes import NameNode
decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))]
decorators[0].decorator.analyse_types(env)
else:
decorators = []
self.py_func = DefNode(pos=self.pos,
name=self.entry.name,
args=self.args,
star_arg=None,
starstar_arg=None,
doc=self.doc,
body=py_func_body,
decorators=decorators,
is_wrapper=1)
self.py_func.is_module_scope = env.is_module_scope
self.py_func.analyse_declarations(env)
self.py_func.entry.is_overridable = True
self.py_func_stat = StatListNode(self.pos, stats=[self.py_func])
self.py_func.type = PyrexTypes.py_object_type
self.entry.as_variable = self.py_func.entry
self.entry.used = self.entry.as_variable.used = True
# Reset scope entry the above cfunction
env.entries[name] = self.entry
if (not self.entry.is_final_cmethod and
(not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func=self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
def _validate_type_visibility(self, type, pos, env):
"""
Ensure that types used in cdef functions are public or api, or
defined in a C header.
"""
public_or_api = (self.visibility == 'public' or self.api)
entry = getattr(type, 'entry', None)
if public_or_api and entry and env.is_module_scope:
if not (entry.visibility in ('public', 'extern') or
entry.api or entry.in_cinclude):
error(pos, "Function declared public or api may not have private types")
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
from . import ExprNodes
args = self.type.args
if omit_optional_args:
args = args[:len(args) - self.type.optional_arg_count]
arg_names = [arg.name for arg in args]
if is_module_scope:
cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
call_arg_names = arg_names
skip_dispatch = Options.lookup_module_cpdef
elif self.type.is_static_method:
class_entry = self.entry.scope.parent_type.entry
class_node = ExprNodes.NameNode(self.pos, name=class_entry.name)
class_node.entry = class_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name)
# Calling static c(p)def methods on an instance disallowed.
# TODO(robertwb): Support by passing self to check for override?
skip_dispatch = True
else:
type_entry = self.type.args[0].type.entry
type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name)
type_arg.entry = type_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name)
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(
self.pos,
function=cfunc,
args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names],
wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def need_gil_acquisition(self, lenv):
return self.type.with_gil
def nogil_check(self, env):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in self.local_scope.var_entries:
if entry.type.is_pyobject and not entry.in_with_gil_block:
error(self.pos, "Function declared nogil has Python locals or temporaries")
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
if self.py_func_stat is not None:
# this will also analyse the default values and the function name assignment
self.py_func_stat = self.py_func_stat.analyse_expressions(env)
elif self.py_func is not None:
# this will also analyse the default values
self.py_func = self.py_func.analyse_expressions(env)
else:
self.analyse_default_values(env)
self.analyse_annotations(env)
self.acquire_gil = self.need_gil_acquisition(self.local_scope)
return self
def needs_assignment_synthesis(self, env, code=None):
return False
def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None):
scope = self.local_scope
arg_decls = []
type = self.type
for arg in type.args[:len(type.args)-type.optional_arg_count]:
arg_decl = arg.declaration_code()
entry = scope.lookup(arg.name)
if not entry.cf_used:
arg_decl = 'CYTHON_UNUSED %s' % arg_decl
arg_decls.append(arg_decl)
if with_dispatch and self.overridable:
dispatch_arg = PyrexTypes.c_int_type.declaration_code(
Naming.skip_dispatch_cname)
if self.override:
arg_decls.append(dispatch_arg)
else:
arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
if type.optional_arg_count and with_opt_args:
arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
if cname is None:
cname = self.entry.func_cname
entity = type.function_header_code(cname, ', '.join(arg_decls))
if self.entry.visibility == 'private' and '::' not in cname:
storage_class = "static "
else:
storage_class = ""
dll_linkage = None
modifiers = code.build_function_modifiers(self.entry.func_modifiers)
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
needs_proto = self.is_c_class_method
if self.template_declaration:
if needs_proto:
code.globalstate.parts['module_declarations'].putln(self.template_declaration)
code.putln(self.template_declaration)
if needs_proto:
code.globalstate.parts['module_declarations'].putln(
"%s%s%s; /* proto*/" % (storage_class, modifiers, header))
code.putln("%s%s%s {" % (storage_class, modifiers, header))
def generate_argument_declarations(self, env, code):
scope = self.local_scope
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
result = arg.calculate_default_value_code(code)
code.putln('%s = %s;' % (
arg.type.declaration_code(arg.cname), result))
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
i = 0
used = 0
scope = self.local_scope
if self.type.optional_arg_count:
code.putln('if (%s) {' % Naming.optional_args_cname)
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
code.putln('if (%s->%sn > %s) {' %
(Naming.optional_args_cname,
Naming.pyrex_prefix, i))
declarator = arg.declarator
while not hasattr(declarator, 'name'):
declarator = declarator.base
code.putln('%s = %s->%s;' %
(arg.cname, Naming.optional_args_cname,
self.type.opt_arg_cname(declarator.name)))
used += 1
i += 1
for _ in range(used):
code.putln('}')
code.putln('}')
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure and not arg.default:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(scope.lookup_here(arg.name))
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
for arg in self.type.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif arg.type.is_pyobject and not arg.accept_none:
self.generate_arg_none_check(arg, code)
def generate_execution_code(self, code):
super(CFuncDefNode, self).generate_execution_code(code)
if self.py_func_stat:
self.py_func_stat.generate_execution_code(code)
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
# wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
while entry.prev_entry is not None:
k += 1
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
self.generate_function_header(
code, 0,
with_dispatch=entry.type.is_overridable,
with_opt_args=entry.type.optional_arg_count,
cname=entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
args = self.type.args
arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
if entry.type.is_overridable:
arglist.append(Naming.skip_dispatch_cname)
elif func_type.is_overridable:
arglist.append('0')
if entry.type.optional_arg_count:
arglist.append(Naming.optional_args_cname)
elif func_type.optional_arg_count:
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
# annotation ExprNode or None Py3 argument annotation
child_attrs = []
is_self_arg = False
is_type_arg = False
def generate_function_definitions(self, env, code):
self.entry.generate_function_definitions(env, code)
class DecoratorNode(Node):
# A decorator
#
# decorator NameNode or CallNode or AttributeNode
child_attrs = ['decorator']
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# fused_py_func DefNode The original fused cpdef DefNode
# (in case this is a specialization)
# specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
#
# decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
specialized_cpdefs = None
py_wrapper = None
py_wrapper_required = True
func_cname = None
defaults_getter = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
k = rk = r = 0
for arg in self.args:
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, modifiers=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
args=cfunc_args,
has_varargs=False,
exception_value=None,
exception_check=False,
nogil=False,
with_gil=False,
is_overridable=overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg=(i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
from . import ExprNodes
if cfunc_type.exception_value is None:
exception_value = None
else:
exception_value = ExprNodes.ConstNode(
self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args=self.args,
has_varargs=False,
exception_check=cfunc_type.exception_check,
exception_value=exception_value,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers=modifiers or [],
base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator=declarator,
body=self.body,
doc=self.doc,
overridable=cfunc_type.is_overridable,
type=cfunc_type,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil,
visibility='private',
api=False,
directive_locals=getattr(cfunc, 'directive_locals', {}),
directive_returns=returns)
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = 1
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
_, return_type = _analyse_signature_annotation(self.return_type_annotation, env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
self.create_local_scope(env)
self.py_wrapper = DefNodeWrapper(
self.pos,
target=self,
name=self.entry.name,
args=self.args,
star_arg=self.star_arg,
starstar_arg=self.starstar_arg,
return_type=self.return_type)
self.py_wrapper.analyse_declarations(env)
def analyse_argument_types(self, env):
self.directive_locals = env.directives['locals']
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
f2s = env.fused_to_specific
env.fused_to_specific = None
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
if type.is_fused:
self.has_fused_arguments = True
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos, "Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif (arg.type.is_extension_type or arg.type.is_builtin_type
or arg.type.is_buffer or arg.type.is_memoryviewslice):
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
else:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
# Use the simpler calling signature for zero- and one-argument functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.num_fixed_args()
if (sig is TypeSlots.pymethod_signature and nfixed == 1
and len(self.args) == 0 and self.star_arg):
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
sig.fixed_arg_format = "*"
sig.is_staticmethod = True
sig.has_generic_args = True
if ((self.is_classmethod or self.is_staticmethod) and
self.has_fused_arguments and env.is_c_class_scope):
del self.decorator_indirection.stats[:]
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
if nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper:
warning(self.pos, "Overriding cdef method with def method.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = Naming.funcdoc_prefix + prefix + name
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (
entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
self.entry.pyfunc_cname = entry.cname
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
self.analyse_annotations(env)
if self.return_type_annotation:
self.return_type_annotation = self.return_type_annotation.analyse_types(env)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator = decorator.decorator.analyse_expressions(env)
self.py_wrapper.prepare_argument_coercion(env)
return self
def needs_assignment_synthesis(self, env, code=None):
if self.is_staticmethod:
return True
if self.specialized_cpdefs or self.entry.is_fused_specialized:
return False
if self.no_assignment_synthesis:
return False
if self.entry.is_special:
return False
if self.entry.is_anonymous:
return True
if env.is_module_scope or env.is_c_class_scope:
if code is None:
return self.local_scope.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return self.entry.signature.exception_check
def generate_function_definitions(self, env, code):
if self.defaults_getter:
# defaults getter must never live in class scopes, it's always a module function
self.defaults_getter.generate_function_definitions(env.global_scope(), code)
# Before closure cnames are mangled
if self.py_wrapper_required:
# func_cname might be modified by @cname
self.py_wrapper.func_cname = self.entry.func_cname
self.py_wrapper.generate_function_definitions(env, code)
FuncDefNode.generate_function_definitions(self, env, code)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
if proto_only:
if self.py_wrapper_required:
self.py_wrapper.generate_function_header(
code, with_pymethdef, True)
return
arg_code_list = []
if self.entry.signature.has_dummy_arg:
self_arg = 'PyObject *%s' % Naming.self_cname
if not self.needs_outer_scope:
self_arg = 'CYTHON_UNUSED ' + self_arg
arg_code_list.append(self_arg)
def arg_decl_code(arg):
entry = arg.entry
if entry.in_closure:
cname = entry.original_cname
else:
cname = entry.cname
decl = entry.type.declaration_code(cname)
if not entry.cf_used:
decl = 'CYTHON_UNUSED ' + decl
return decl
for arg in self.args:
arg_code_list.append(arg_decl_code(arg))
if self.star_arg:
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
arg_code = ', '.join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
preprocessor_guard = self.get_preprocessor_guard()
if preprocessor_guard:
decls_code.putln(preprocessor_guard)
decls_code.putln(
"static %s(%s); /* proto */" % (dc, arg_code))
if preprocessor_guard:
decls_code.putln("#endif")
code.putln("static %s(%s) {" % (dc, arg_code))
def generate_argument_declarations(self, env, code):
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(arg.entry)
for arg in self.star_arg, self.starstar_arg:
if arg:
put_into_closure(arg.entry)
def generate_argument_type_tests(self, code):
pass
class DefNodeWrapper(FuncDefNode):
# DefNode python wrapper code generator
defnode = None
target = None # Target DefNode
def __init__(self, *args, **kwargs):
FuncDefNode.__init__(self, *args, **kwargs)
self.num_kwonly_args = self.target.num_kwonly_args
self.num_required_kw_args = self.target.num_required_kw_args
self.num_required_args = self.target.num_required_args
self.self_in_stararg = self.target.self_in_stararg
self.signature = None
def analyse_declarations(self, env):
target_entry = self.target.entry
name = self.name
prefix = env.next_id(env.scope_prefix)
target_entry.func_cname = Naming.pywrap_prefix + prefix + name
target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
self.signature = target_entry.signature
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
# all utility code here, simply because we cannot easily distinguish
# different code types.
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
elif arg.hdr_type and not arg.hdr_type.is_pyobject:
if not arg.hdr_type.create_to_py_utility_code(env):
pass # will fail later
if self.starstar_arg and not self.starstar_arg.entry.cf_used:
# we will set the kwargs argument to NULL instead of a new dict
# and must therefore correct the control flow state
entry = self.starstar_arg.entry
entry.xdecref_cleanup = 1
for ass in entry.cf_assignments:
if not ass.is_arg and ass.lhs.is_name:
ass.lhs.cf_maybe_null = True
def signature_has_nongeneric_args(self):
argcount = len(self.args)
if argcount == 0 or (
argcount == 1 and (self.args[0].is_self_arg or
self.args[0].is_type_arg)):
return 0
return 1
def signature_has_generic_args(self):
return self.signature.has_generic_args
def generate_function_body(self, code):
args = []
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
if arg.hdr_type and not (arg.type.is_memoryviewslice or
arg.type.is_struct or
arg.type.is_complex):
args.append(arg.type.cast_code(arg.entry.cname))
else:
args.append(arg.entry.cname)
if self.star_arg:
args.append(self.star_arg.entry.cname)
if self.starstar_arg:
args.append(self.starstar_arg.entry.cname)
args = ', '.join(args)
if not self.return_type.is_void:
code.put('%s = ' % Naming.retval_cname)
code.putln('%s(%s);' % (
self.target.entry.pyfunc_cname, args))
def generate_function_definitions(self, env, code):
lenv = self.target.local_scope
# Generate C code for header and body of function
code.mark_pos(self.pos)
code.putln("")
code.putln("/* Python wrapper */")
preprocessor_guard = self.target.get_preprocessor_guard()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.enter_cfunc_scope(lenv)
code.return_from_error_cleanup_label = code.new_label()
with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or
self.target.pymethdef_required)
self.generate_function_header(code, with_pymethdef)
self.generate_argument_declarations(lenv, code)
tempvardecl_code = code.insertion_point()
if self.return_type.is_pyobject:
retval_init = ' = 0'
else:
retval_init = ''
if not self.return_type.is_void:
code.putln('%s%s;' % (
self.return_type.declaration_code(Naming.retval_cname),
retval_init))
code.put_declare_refcount_context()
code.put_setup_refcount_context('%s (wrapper)' % self.name)
self.generate_argument_parsing_code(lenv, code)
self.generate_argument_type_tests(code)
self.generate_function_body(code)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
err_val = self.error_value()
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.var_entries:
if entry.is_arg and entry.type.is_pyobject:
code.put_var_decref(entry)
code.put_finish_refcount_context()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln('}')
code.exit_cfunc_scope()
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.signature
if sig.has_dummy_arg or self.self_in_stararg:
arg_code = "PyObject *%s" % Naming.self_cname
if not sig.has_dummy_arg:
arg_code = 'CYTHON_UNUSED ' + arg_code
arg_code_list.append(arg_code)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg or arg.is_type_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
entry = self.target.entry
if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if entry.scope.is_c_class_scope and entry.name == "__ipow__":
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s" % (
Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
# Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
mf = ""
if (entry.name in ("__getbuffer__", "__releasebuffer__")
and entry.scope.is_c_class_scope):
mf = "CYTHON_UNUSED "
with_pymethdef = False
dc = self.return_type.declaration_code(entry.func_cname)
header = "static %s%s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
if self.target.fused_py_func:
# If we are the specialized version of the cpdef, we still
# want the prototype for the "fused cpdef", in case we're
# checking to see if our method was overridden in Python
self.target.fused_py_func.generate_function_header(
code, with_pymethdef, proto_only=True)
return
if (Options.docstrings and entry.doc and
not self.target.fused_py_func and
not entry.scope.is_property_scope and
(not entry.is_special or entry.wrapperbase_cname)):
# h_code = code.globalstate['h_code']
docstr = entry.doc
if docstr.is_unicode:
docstr = docstr.as_utf8_string()
code.putln(
'static char %s[] = %s;' % (
entry.doc_cname,
docstr.as_c_string_literal()))
if entry.is_special:
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln(
"struct wrapperbase %s;" % entry.wrapperbase_cname)
code.putln('#endif')
if with_pymethdef or self.target.fused_py_func:
code.put(
"static PyMethodDef %s = " % entry.pymethdef_cname)
code.put_pymethoddef(self.target.entry, ";", allow_skip=False)
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic:
if arg.needs_conversion:
code.putln("PyObject *%s = 0;" % arg.hdr_cname)
else:
code.put_var_declaration(arg.entry)
for entry in env.var_entries:
if entry.is_arg:
code.put_var_declaration(entry)
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label("argument_unpacking_done")
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
if not self.signature_has_generic_args():
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
self.generate_argument_conversion_code(code)
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
self.generate_stararg_copy_code(code)
else:
self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code)
code.error_label = old_error_label
if code.label_used(our_error_label):
if not code.label_used(end_label):
code.put_goto(end_label)
code.put_label(our_error_label)
if has_star_or_kw_args:
self.generate_arg_decref(self.star_arg, code)
if self.starstar_arg:
if self.starstar_arg.entry.xdecref_cleanup:
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
code.put_add_traceback(self.target.entry.qualified_name)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
if code.label_used(end_label):
code.put_label(end_label)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref_clear(arg.entry)
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref_clear(arg.entry)
def generate_stararg_copy_code(self, code):
if not self.star_arg:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
Naming.args_cname)
code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
self.name, Naming.args_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
if self.star_arg or not self.starstar_arg.entry.cf_used:
kwarg_check = "unlikely(%s)" % Naming.kwds_cname
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
Naming.kwds_cname, Naming.kwds_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
code.putln(
"if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
kwarg_check, Naming.kwds_cname, self.name,
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references):
code.putln("if (%s) {" % kwarg_check)
code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
code.putln("} else {")
code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,))
code.putln("}")
self.starstar_arg.entry.xdecref_cleanup = 1
else:
code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
Naming.kwds_cname))
code.putln("if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname, self.error_value()))
self.starstar_arg.entry.xdecref_cleanup = 0
code.put_gotref(self.starstar_arg.entry.cname)
if self.self_in_stararg and not self.target.is_staticmethod:
# need to create a new tuple with 'self' inserted as first item
code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
Naming.args_cname,
self.star_arg.entry.cname))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
code.putln("{")
code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
code.put_gotref(self.star_arg.entry.cname)
code.put_incref(Naming.self_cname, py_object_type)
code.put_giveref(Naming.self_cname)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
temp, temp, Naming.args_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
code.put_giveref("item")
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
Naming.args_cname))
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
argtuple_error_label = code.new_label("argtuple_error")
positional_args = []
required_kw_only_args = []
optional_kw_only_args = []
for arg in args:
if arg.is_generic:
if arg.default:
if not arg.is_self_arg and not arg.is_type_arg:
if arg.kw_only:
optional_kw_only_args.append(arg)
else:
positional_args.append(arg)
elif arg.kw_only:
required_kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
# sort required kw-only args before optional ones to avoid special
# cases in the unpacking code
kw_only_args = required_kw_only_args + optional_kw_only_args
min_positional_args = self.num_required_args - self.num_required_kw_args
if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg):
min_positional_args -= 1
max_positional_args = len(positional_args)
has_fixed_positional_count = not self.star_arg and \
min_positional_args == max_positional_args
has_kw_only_args = bool(kw_only_args)
if self.num_required_kw_args:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
if self.starstar_arg or self.star_arg:
self.generate_stararg_init_code(max_positional_args, code)
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
code.putln("static PyObject **%s[] = {%s,0};" % (
Naming.pykwdlist_cname,
','.join(['&%s' % code.intern_identifier(arg.name)
for arg in all_args])))
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
# collected into a local PyObject* array called "values",
# regardless if they were taken from default arguments,
# positional arguments or keyword arguments. Note that
# C-typed default arguments are handled at conversion time,
# so their array value is NULL in the end if no argument
# was passed for them.
self.generate_argument_values_setup_code(all_args, code)
# --- optimised code when we receive keyword arguments
code.putln("if (%s(%s)) {" % (
(self.num_required_kw_args > 0) and "likely" or "unlikely",
Naming.kwds_cname))
self.generate_keyword_unpacking_code(
min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code)
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
# Python raises arg tuple related errors first, so we must
# check the length here
if min_positional_args == max_positional_args and not self.star_arg:
compare = '!='
else:
compare = '<'
code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
Naming.args_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
code.putln(code.error_goto(self.pos))
break
else:
# optimised tuple unpacking code
code.putln('} else {')
if min_positional_args == max_positional_args:
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
if min_positional_args:
for i in range(min_positional_args-1, -1, -1):
code.putln('case %2d:' % i)
code.put_goto(argtuple_error_label)
else:
code.put('default: ')
code.put_goto(argtuple_error_label)
code.putln('}')
code.putln('}') # end of the conditional unpacking blocks
# Convert arg values to their final type and assign them.
# Also inject non-Python default arguments, which do cannot
# live in the values[] array.
for i, arg in enumerate(all_args):
self.generate_arg_assignment(arg, "values[%d]" % i, code)
code.putln('}') # end of the whole argument unpacking block
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args,
Naming.args_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_assignment(self, arg, item, code):
if arg.type.is_pyobject:
# Python default arguments were already stored in 'item' at the very beginning
if arg.is_generic:
item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
func = arg.type.from_py_function
if func:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
rhs = "%s(%s)" % (func, item)
if arg.type.is_enum:
rhs = arg.type.cast_code(rhs)
code.putln("%s = %s; %s" % (
arg.entry.cname,
rhs,
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
def generate_stararg_init_code(self, max_positional_args, code):
if self.starstar_arg:
self.starstar_arg.entry.xdecref_cleanup = 0
code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname,
max_positional_args))
code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
self.star_arg.entry.cname, Naming.args_cname,
max_positional_args, Naming.args_cname))
code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
if self.starstar_arg:
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.put_finish_refcount_context()
code.putln('return %s;' % self.error_value())
code.putln('}')
code.put_gotref(self.star_arg.entry.cname)
code.putln('} else {')
code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
code.put_incref(Naming.empty_tuple, py_object_type)
code.putln('}')
def generate_argument_values_setup_code(self, args, code):
max_args = len(args)
# the 'values' array collects borrowed references to arguments
# before doing any type coercion etc.
code.putln("PyObject* values[%d] = {%s};" % (
max_args, ','.join('0'*max_args)))
if self.target.defaults_struct:
code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
self.target.defaults_struct, Naming.dynamic_args_cname,
self.target.defaults_struct, Naming.self_cname))
# assign borrowed Python default values to the values array,
# so that they can be overwritten by received arguments below
for i, arg in enumerate(args):
if arg.default and arg.type.is_pyobject:
default_value = arg.calculate_default_value_code(code)
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code):
code.putln('Py_ssize_t kw_args;')
code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
code.putln('switch (pos_args) {')
if self.star_arg:
code.putln('default:')
for i in range(max_positional_args-1, -1, -1):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
# The code above is very often (but not always) the same as
# the optimised non-kwargs tuple unpacking code, so we keep
# the code block above at the very top, before the following
# 'external' PyDict_Size() call, to make it easy for the C
# compiler to merge the two separate tuple unpacking
# implementations into one when they turn out to be identical.
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
if not arg.default:
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
if max_positional_args > 0:
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
if arg.kw_only:
# optional kw-only args are handled separately below
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
code.put('else ')
code.put_goto(argtuple_error_label)
else:
# print the correct number of values (args or
# kwargs) that were passed into positional
# arguments up to this point
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
elif arg.kw_only:
code.putln('else {')
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
if max_positional_args > 0:
code.putln('}')
if has_kw_only_args:
# unpack optional keyword-only arguments separately because
# checking for interned strings in a dict is faster than iterating
self.generate_optional_kwonly_args_unpacking_code(all_args, code)
code.putln('if (unlikely(kw_args > 0)) {')
# non-positional/-required kw args left in dict: default args,
# kw-only args, **kwargs or error
#
# This is sort of a catch-all: except for checking required
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
max_positional_args, max_positional_args))
pos_arg_count = "used_pos_args"
else:
pos_arg_count = "pos_args"
code.globalstate.use_utility_code(
UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
Naming.kwds_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
pos_arg_count,
self.name,
code.error_goto(self.pos)))
code.putln('}')
def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
optional_args = []
first_optional_arg = -1
for i, arg in enumerate(all_args):
if not arg.kw_only or not arg.default:
continue
if not optional_args:
first_optional_arg = i
optional_args.append(arg.name)
if optional_args:
if len(optional_args) > 1:
# if we receive more than the named kwargs, we either have **kwargs
# (in which case we must iterate anyway) or it's an error (which we
# also handle during iteration) => skip this part if there are more
code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
not self.starstar_arg and 'likely' or '',
len(optional_args)))
code.putln('Py_ssize_t index;')
# not unrolling the loop here reduces the C code overhead
code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
first_optional_arg, first_optional_arg + len(optional_args)))
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from signature type to
# declared type, if needed. Also copies signature arguments
# into closure fields.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
if arg.default:
code.putln("if (%s) {" % arg.hdr_cname)
else:
code.putln("assert(%s); {" % arg.hdr_cname)
self.generate_arg_conversion_from_pyobject(arg, code)
code.putln("}")
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
if func:
lhs = arg.entry.cname
rhs = "%s(%s)" % (func, arg.hdr_cname)
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto_if_null(arg.entry.cname, arg.pos)))
code.put_var_gotref(arg.entry)
else:
error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif not arg.accept_none and (arg.type.is_pyobject or
arg.type.is_buffer or
arg.type.is_memoryviewslice):
self.generate_arg_none_check(arg, code)
def error_value(self):
return self.signature.error_value
class GeneratorDefNode(DefNode):
# Generator function node that creates a new generator instance when called.
#
# gbody GeneratorBodyDefNode the function implementing the generator
#
is_generator = True
is_coroutine = False
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
def __init__(self, pos, **kwargs):
# XXX: don't actually needs a body
kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True)
super(GeneratorDefNode, self).__init__(pos, **kwargs)
def analyse_declarations(self, env):
super(GeneratorDefNode, self).analyse_declarations(env)
self.gbody.local_scope = self.local_scope
self.gbody.analyse_declarations(env)
def generate_function_body(self, env, code):
body_cname = self.gbody.entry.func_cname
name = code.intern_identifier(self.name)
qualname = code.intern_identifier(self.qualname)
module_name = code.intern_identifier(self.module_name)
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
'(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s, %s); %s' % (
'Coroutine' if self.is_coroutine else 'Generator',
body_cname, Naming.cur_scope_cname, name, qualname, module_name,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
classobj_cname = 'gen->classobj'
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
classobj_cname, Naming.self_cname))
code.put_incref(classobj_cname, py_object_type)
code.put_giveref(classobj_cname)
code.put_finish_refcount_context()
code.putln('return (PyObject *) gen;')
code.putln('}')
def generate_function_definitions(self, env, code):
env.use_utility_code(UtilityCode.load_cached(
'Coroutine' if self.is_coroutine else 'Generator', "Coroutine.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class AsyncDefNode(GeneratorDefNode):
is_coroutine = True
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
is_inlined = False
inlined_comprehension_type = None # container type for inlined comprehensions
def __init__(self, pos=None, name=None, body=None):
super(GeneratorBodyDefNode, self).__init__(
pos=pos, body=body, name=name, doc=None,
args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
name = env.next_id('generator')
cname = Naming.genbody_prefix + prefix + name
entry = env.declare_var(None, py_object_type, self.pos,
cname=cname, visibility='private')
entry.func_cname = cname
entry.qualified_name = EncodedString(self.name)
self.entry = entry
def analyse_declarations(self, env):
self.analyse_argument_types(env)
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
header = "static PyObject *%s(__pyx_CoroutineObject *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
else:
code.putln('%s /* generator body */\n{' % header)
def generate_function_definitions(self, env, code):
lenv = self.local_scope
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# Generate C code for header and body of function
code.enter_cfunc_scope(lenv)
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code)
closure_init_code = code.insertion_point()
# ----- Local variables
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
resume_code = code.insertion_point()
first_run_label = code.new_label('first_run')
code.use_label(first_run_label)
code.put_label(first_run_label)
code.putln('%s' %
(code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
# ----- prepare target container for inlined comprehension
if self.is_inlined and self.inlined_comprehension_type is not None:
target_type = self.inlined_comprehension_type
if target_type is Builtin.list_type:
comp_init = 'PyList_New(0)'
elif target_type is Builtin.set_type:
comp_init = 'PySet_New(NULL)'
elif target_type is Builtin.dict_type:
comp_init = 'PyDict_New()'
else:
raise InternalError(
"invalid type of inlined comprehension: %s" % target_type)
code.putln("%s = %s; %s" % (
Naming.retval_cname, comp_init,
code.error_goto_if_null(Naming.retval_cname, self.pos)))
code.put_gotref(Naming.retval_cname)
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
if lenv.scope_class.type.scope.entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
Naming.generator_cname)))
# FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
code.putln('PyErr_SetNone(PyExc_StopIteration);')
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
if self.is_inlined and self.inlined_comprehension_type is not None:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
if Future.generator_stop in env.global_scope().context.future_directives:
# PEP 479: turn accidental StopIteration exceptions into a RuntimeError
code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
code.putln("if (unlikely(PyErr_ExceptionMatches(PyExc_StopIteration))) "
"__Pyx_Generator_Replace_StopIteration();")
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
# ----- Non-error return cleanup
code.put_label(code.return_label)
if self.is_inlined:
code.put_xgiveref(Naming.retval_cname)
else:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
code.put_finish_refcount_context()
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
code.exit_cfunc_scope()
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
# is overriden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
from . import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_node = ExprNodes.SimpleCallNode(
self.pos, function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name)
for arg in self.args[first_arg:]])
if env.return_type.is_void or env.return_type.is_returncode:
self.body = StatListNode(self.pos, stats=[
ExprStatNode(self.pos, expr=call_node),
ReturnStatNode(self.pos, value=None)])
else:
self.body = ReturnStatNode(self.pos, value=call_node)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overridden in Python */")
if self.py_func.is_module_scope:
code.putln("else {")
else:
code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg)
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
code.put_gotref(func_node_temp)
is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % (
func_node_temp, self.py_func.entry.func_cname)
code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
self.body.generate_execution_code(code)
code.putln("}")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("}")
class ClassDefNode(StatNode, BlockNode):
pass
class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
"target", "class_cell", "decorators"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, force_py3_semantics=False):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
self.bases = bases
from . import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value=doc)
else:
doc_node = None
allow_py2_metaclass = not force_py3_semantics
if keyword_args:
allow_py2_metaclass = False
self.is_py3_style_class = True
if keyword_args.is_dict_literal:
if keyword_args.key_value_pairs:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
self.mkw = keyword_args
else:
assert self.metaclass is not None
else:
# MergedDictNode
self.mkw = ExprNodes.ProxyNode(keyword_args)
if force_py3_semantics or self.bases or self.mkw or self.metaclass:
if self.metaclass is None:
if keyword_args and not keyword_args.is_dict_literal:
# **kwargs may contain 'metaclass' arg
mkdict = self.mkw
else:
mkdict = None
if (not mkdict and
self.bases.is_sequence_constructor and
not self.bases.args):
pass # no base classes => no inherited metaclass
else:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, mkw=mkdict, bases=self.bases)
needs_metaclass_calculation = False
else:
needs_metaclass_calculation = True
self.dict = ExprNodes.PyClassNamespaceNode(
pos, name=name, doc=doc_node,
metaclass=self.metaclass, bases=self.bases, mkw=self.mkw)
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name,
bases=self.bases, dict=self.dict, doc=doc_node,
metaclass=self.metaclass, mkw=self.mkw,
calculate_metaclass=needs_metaclass_calculation,
allow_py2_metaclass=allow_py2_metaclass)
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
self.classobj = ExprNodes.ClassNode(
pos, name=name,
bases=bases, dict=self.dict, doc=doc_node)
self.target = ExprNodes.NameNode(pos, name=name)
self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
bases = self.classobj.bases.args
if len(bases) == 0:
base_class_name = None
base_class_module = None
elif len(bases) == 1:
base = bases[0]
path = []
from .ExprNodes import AttributeNode, NameNode
while isinstance(base, AttributeNode):
path.insert(0, base.attribute)
base = base.obj
if isinstance(base, NameNode):
path.insert(0, base.name)
base_class_name = path[-1]
if len(path) > 1:
base_class_module = u'.'.join(path[:-1])
else:
base_class_module = None
else:
error(self.classobj.bases.args.pos, "Invalid base class")
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
class_name=self.name,
base_class_module=base_class_module,
base_class_name=base_class_name,
decorators=self.decorators,
body=self.body,
in_pxd=False,
doc=self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv)
return cenv
def analyse_declarations(self, env):
class_result = self.classobj
if self.decorators:
from .ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[class_result])
self.decorators = None
self.class_result = class_result
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
def analyse_expressions(self, env):
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.metaclass:
self.metaclass = self.metaclass.analyse_expressions(env)
if self.mkw:
self.mkw = self.mkw.analyse_expressions(env)
self.dict = self.dict.analyse_expressions(env)
self.class_result = self.class_result.analyse_expressions(env)
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.pyclass_stack.append(self)
cenv = self.scope
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
self.mkw.generate_evaluation_code(code)
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
self.class_cell.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
self.class_cell.generate_injection_code(
code, self.class_result.result())
self.class_cell.generate_disposal_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.metaclass:
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
if self.mkw:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
if self.bases:
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
class CClassDefNode(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
decorators = None
shadow = False
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
from . import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=0,
implementing=0,
module_name=self.module_name,
base_type=None,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for C class defined in 'extern from' block")
if self.decorators:
error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if self.base_class_name == 'object':
# extension classes are special and don't need to inherit from object
if base_class_scope is None or base_class_scope.lookup('object') is None:
self.base_class_name = None
self.base_class_module = None
base_class_scope = None
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type and \
not (base_class_entry.type.is_builtin_type and
base_class_entry.type.objstruct_cname):
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
base_class_entry.type.is_final_type:
error(self.pos, "Base class '%s' of type '%s' is final" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.is_builtin_type and \
base_class_entry.type.name in ('tuple', 'str', 'bytes'):
error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_class_entry.type.name)
else:
self.base_type = base_class_entry.type
if env.directives.get('freelist', 0) > 0:
warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
# To properly initialize inherited attributes, the base type must
# be analysed before this type.
self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
return
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=has_body and self.in_pxd,
implementing=has_body and not self.in_pxd,
module_name=self.module_name,
base_type=self.base_type,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
dict_entry = self.scope.lookup_here("__dict__")
if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented):
dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter")
self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
thunk()
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
return self
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
def annotate(self, code):
if self.body:
self.body.annotate(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc EncodedString or None Doc string
# entry Symtab.Entry
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
self.entry = env.declare_property(self.name, self.doc, self.pos)
self.entry.scope.directives = env.directives
self.body.analyse_declarations(self.entry.scope)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class NonlocalNode(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
child_attrs = ["expr"]
def analyse_declarations(self, env):
from . import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
func = self.expr.function.as_cython_attribute()
if func == u'declare':
args, kwds = self.expr.explicit_args_kwds()
if len(args):
error(self.expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
env.declare_var(var.value, type, var.pos, is_cdef=True)
self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
return self
def nogil_check(self, env):
if self.expr.type.is_pyobject and self.expr.is_temp:
self.gil_error()
gil_message = "Discarding owned Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
node = self.analyse_types(env)
if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode):
if node.rhs.type.is_ptr and node.rhs.is_ephemeral():
error(self.pos, "Storing unsafe C derivative of temporary Python reference")
return node
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
# is_overloaded_assignment bool Is this assignment done via an overloaded operator=
# exception_check
# exception_value
child_attrs = ["lhs", "rhs"]
first = False
is_overloaded_assignment = False
declaration_only = False
def analyse_declarations(self, env):
from . import ExprNodes
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
if func_name in ['declare', 'typedef']:
if len(args) > 2:
error(args[2].pos, "Invalid positional argument.")
return
if kwds is not None:
kwdict = kwds.compile_time_value(None)
if func_name == 'typedef' or 'visibility' not in kwdict:
error(kwds.pos, "Invalid keyword argument.")
return
visibility = kwdict['visibility']
else:
visibility = 'private'
type = args[0].analyse_as_type(env)
if type is None:
error(args[0].pos, "Unknown type")
return
lhs = self.lhs
if func_name == 'declare':
if isinstance(lhs, ExprNodes.NameNode):
vars = [(lhs.name, lhs.pos)]
elif isinstance(lhs, ExprNodes.TupleNode):
vars = [(var.name, var.pos) for var in lhs.args]
else:
error(lhs.pos, "Invalid declaration")
return
for var, pos in vars:
env.declare_var(var, type, pos, is_cdef=True, visibility=visibility)
if len(args) == 2:
# we have a value
self.rhs = args[1]
else:
self.declaration_only = True
else:
self.declaration_only = True
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
error(self.rhs.pos, "Struct or union members must be given by name.")
return
members = []
for member, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
members.append((member.value, type, member.pos))
if len(members) < len(kwds.key_value_pairs):
return
if not isinstance(self.lhs, ExprNodes.NameNode):
error(self.lhs.pos, "Invalid declaration.")
name = self.lhs.name
scope = StructOrUnionScope(name)
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
elif func_name == 'fused_type':
# dtype = cython.fused_type(...)
self.declaration_only = True
if kwds:
error(self.rhs.function.pos,
"fused_type does not take keyword arguments")
fusednode = FusedTypeNode(self.rhs.pos,
name=self.lhs.name, types=args)
fusednode.analyse_declarations(env)
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from . import ExprNodes
self.rhs = self.rhs.analyse_types(env)
unrolled_assignment = self.unroll_rhs(env)
if unrolled_assignment:
return unrolled_assignment
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
unrolled_assignment = self.unroll_lhs(env)
if unrolled_assignment:
return unrolled_assignment
if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode):
self.lhs.analyse_broadcast_operation(self.rhs)
self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs)
elif self.lhs.type.is_array:
if not isinstance(self.lhs, ExprNodes.SliceIndexNode):
# cannot assign to C array, only to its full slice
self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
self.lhs = self.lhs.analyse_target_types(env)
if self.lhs.type.is_cpp_class:
op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type])
if op:
rhs = self.rhs
self.is_overloaded_assignment = True
self.exception_check = op.type.exception_check
self.exception_value = op.type.exception_value
if self.exception_check == '+' and self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp or rhs.is_attribute or (
not rhs.is_name and not rhs.is_literal and
rhs.type.is_pyobject):
# things like (cdef) attribute access are not safe (traverses pointers)
rhs = rhs.coerce_to_temp(env)
elif rhs.type.is_pyobject:
rhs = rhs.coerce_to_simple(env)
self.rhs = rhs
return self
def unroll(self, node, target_size, env):
from . import ExprNodes, UtilNodes
base = node
start_node = stop_node = step_node = check_node = None
if node.type.is_ctuple:
slice_size = node.type.size
elif node.type.is_ptr or node.type.is_array:
while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop):
base = node = node.base
if isinstance(node, ExprNodes.SliceIndexNode):
base = node.base
start_node = node.start
if start_node:
start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
stop_node = node.stop
if stop_node:
stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
else:
if node.type.is_array and node.type.size:
stop_node = ExprNodes.IntNode(
self.pos, value=str(node.type.size),
constant_result=(node.type.size if isinstance(node.type.size, _py_int_types)
else ExprNodes.constant_value_not_set))
else:
error(self.pos, "C array iteration requires known end index")
return
step_node = None #node.step
if step_node:
step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
# TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here.
def get_const(node, none_value):
if node is None:
return none_value
elif node.has_constant_result():
return node.constant_result
else:
raise ValueError("Not a constant.")
try:
slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1)
except ValueError:
error(self.pos, "C array assignment currently requires known endpoints")
return
elif node.type.is_array:
slice_size = node.type.size
if not isinstance(slice_size, _py_int_types):
return # might still work when coercing to Python
else:
return
else:
return
if slice_size != target_size:
error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
return
items = []
base = UtilNodes.LetRefNode(base)
refs = [base]
if start_node and not start_node.is_literal:
start_node = UtilNodes.LetRefNode(start_node)
refs.append(start_node)
if stop_node and not stop_node.is_literal:
stop_node = UtilNodes.LetRefNode(stop_node)
refs.append(stop_node)
if step_node and not step_node.is_literal:
step_node = UtilNodes.LetRefNode(step_node)
refs.append(step_node)
for ix in range(target_size):
ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type)
if step_node is not None:
if step_node.has_constant_result():
step_value = ix_node.constant_result * step_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value)
else:
ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node)
if start_node is not None:
if start_node.has_constant_result() and ix_node.has_constant_result():
index_value = ix_node.constant_result + start_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value)
else:
ix_node = ExprNodes.AddNode(
self.pos, operator='+', operand1=start_node, operand2=ix_node)
items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env)))
return check_node, refs, items
def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env):
from . import UtilNodes
assignments = []
for lhs, rhs in zip(lhs_list, rhs_list):
assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first))
node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env)
if check_node:
node = StatListNode(pos=self.pos, stats=[check_node, node])
for ref in refs[::-1]:
node = UtilNodes.LetNode(ref, node)
return node
def unroll_rhs(self, env):
from . import ExprNodes
if not isinstance(self.lhs, ExprNodes.TupleNode):
return
if any(arg.is_starred for arg in self.lhs.args):
return
unrolled = self.unroll(self.rhs, len(self.lhs.args), env)
if not unrolled:
return
check_node, refs, rhs = unrolled
return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env)
def unroll_lhs(self, env):
if self.lhs.type.is_ctuple:
# Handled directly.
return
from . import ExprNodes
if not isinstance(self.rhs, ExprNodes.TupleNode):
return
unrolled = self.unroll(self.lhs, len(self.rhs.args), env)
if not unrolled:
return
check_node, refs, lhs = unrolled
return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
if self.is_overloaded_assignment:
self.lhs.generate_assignment_code(
self.rhs,
code,
overloaded_assignment=self.is_overloaded_assignment,
exception_check=self.exception_check,
exception_value=self.exception_value)
else:
self.lhs.generate_assignment_code(self.rhs, code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_values [ExprNode] RHS coerced to all distinct LHS types
# cloned_values [ExprNode] cloned RHS value for each LHS
# assignment_overloads [Bool] If each assignment uses a C++ operator=
child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"]
cloned_values = None
coerced_values = None
assignment_overloads = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from .ExprNodes import CloneNode, ProxyNode
# collect distinct types used on the LHS
lhs_types = set()
for i, lhs in enumerate(self.lhs_list):
lhs = self.lhs_list[i] = lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
lhs_types.add(lhs.type)
rhs = self.rhs.analyse_types(env)
# common special case: only one type needed on the LHS => coerce only once
if len(lhs_types) == 1:
# Avoid coercion for overloaded assignment operators.
if next(iter(lhs_types)).is_cpp_class:
op = env.lookup_operator('=', [lhs, self.rhs])
if not op:
rhs = rhs.coerce_to(lhs_types.pop(), env)
else:
rhs = rhs.coerce_to(lhs_types.pop(), env)
if not rhs.is_name and not rhs.is_literal and (
use_temp or rhs.is_attribute or rhs.type.is_pyobject):
rhs = rhs.coerce_to_temp(env)
else:
rhs = rhs.coerce_to_simple(env)
self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs
# clone RHS and coerce it to all distinct LHS types
self.coerced_values = []
coerced_values = {}
self.assignment_overloads = []
for lhs in self.lhs_list:
overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs])
self.assignment_overloads.append(overloaded)
if lhs.type not in coerced_values and lhs.type != rhs.type:
rhs = CloneNode(self.rhs)
if not overloaded:
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_values.append(rhs)
coerced_values[lhs.type] = rhs
# clone coerced values for all LHS assignments
self.cloned_values = []
for lhs in self.lhs_list:
rhs = coerced_values.get(lhs.type, self.rhs)
self.cloned_values.append(CloneNode(rhs))
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
# prepare all coercions
for rhs in self.coerced_values:
rhs.generate_evaluation_code(code)
# assign clones to LHS
for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads):
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload)
# dispose of coerced values and original RHS
for rhs_value in self.coerced_values:
rhs_value.generate_disposal_code(code)
rhs_value.free_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for rhs in self.coerced_values:
rhs.annotate(code)
for lhs, rhs in zip(self.lhs_list, self.cloned_values):
lhs.annotate(code)
rhs.annotate(code)
self.rhs.annotate(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
self.stats = [stat.analyse_types(env, use_temp=1)
for stat in self.stats]
return self
# def analyse_expressions(self, env):
# for stat in self.stats:
# stat.analyse_expressions_1(env, use_temp=1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
def generate_function_definitions(self, env, code):
for stat in self.stats:
stat.generate_function_definitions(env, code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class InPlaceAssignmentNode(AssignmentNode):
# An in place arithmetic operand:
#
# a += b
# a -= b
# ...
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# operator char one of "+-*/%^&|"
#
# This code is a bit tricky because in order to obey Python
# semantics the sub-expressions (e.g. indices) of the lhs must
# not be evaluated twice. So we must re-use the values calculated
# in evaluation phase for the assignment phase as well.
# Fortunately, the type of the lhs node is fairly constrained
# (it must be a NameNode, AttributeNode, or IndexNode).
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
# When assigning to a fully indexed buffer or memoryview, coerce the rhs
if self.lhs.is_memview_index or self.lhs.is_buffer_access:
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
elif self.lhs.type.is_string and self.operator in '+-':
# use pointer arithmetic for char* LHS instead of string concat
self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
lhs, rhs = self.lhs, self.rhs
rhs.generate_evaluation_code(code)
lhs.generate_subexpr_evaluation_code(code)
c_op = self.operator
if c_op == "//":
c_op = "/"
elif c_op == "**":
error(self.pos, "No C inplace power operator")
if lhs.is_buffer_access or lhs.is_memview_index:
if lhs.type.is_pyobject:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']:
error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
lhs.generate_buffer_setitem_code(rhs, code, c_op)
elif lhs.is_memview_slice:
error(self.pos, "Inplace operators not supported on memoryview slices")
else:
# C++
# TODO: make sure overload is declared
code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result()))
lhs.generate_subexpr_disposal_code(code)
lhs.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
def create_binop_node(self):
from . import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
class PrintStatNode(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
stream = self.stream.analyse_expressions(env)
self.stream = stream.coerce_to_pyobject(env)
arg_tuple = self.arg_tuple.analyse_expressions(env)
self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
class ExecStatNode(StatNode):
# exec statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = arg.analyse_expressions(env)
arg = arg.coerce_to_pyobject(env)
self.args[i] = arg
env.use_utility_code(Builtin.pyexec_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python exec statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
args = []
for arg in self.args:
arg.generate_evaluation_code(code)
args.append(arg.py_result())
args = tuple(args + ['0', '0'][:3-len(args)])
temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args))
for arg in self.args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
code.put_gotref(temp_result)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
ignore_nonexisting = False
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice):
if arg.is_name and arg.entry.is_cglobal:
error(arg.pos, "Deletion of global C variable")
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
pass # del ba[i]
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
return self
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for arg in self.args:
if (arg.type.is_pyobject or
arg.type.is_memoryviewslice or
arg.is_subscript and arg.base.type is Builtin.bytearray_type):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_result_code(code)
code.putln("delete %s;" % arg.result())
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class PassStatNode(StatNode):
# pass statement
child_attrs = []
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class IndirectionNode(StatListNode):
"""
This adds an indirection so that the node can be shared and a subtree can
be removed at any time by clearing self.stats.
"""
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
class BreakStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if not code.continue_label:
error(self.pos, "continue statement not inside loop")
return
code.mark_pos(self.pos)
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
child_attrs = ["value"]
is_terminator = True
in_generator = False
# Whether we are in a parallel section
in_parallel = False
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return self
if self.value:
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos, "Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
return self
def nogil_check(self, env):
if self.return_type.is_pyobject:
self.gil_error()
gil_message = "Returning Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not self.return_type:
# error reported earlier
return
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
if self.value:
self.value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
# return value == raise StopIteration(value), but uncatchable
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
self.value.py_result()))
self.value.generate_disposal_code(code)
else:
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
self.put_return(code, self.return_type.default_value)
for cname, type in code.funcstate.temps_holding_reference():
code.put_decref_clear(cname, type)
code.put_goto(code.return_label)
def put_return(self, code, value):
if self.in_parallel:
code.putln_openmp("#pragma omp critical(__pyx_returning)")
code.putln("%s = %s;" % (Naming.retval_cname, value))
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
if self.value:
self.value.annotate(code)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
from . import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.put_giveref(vars[0])
code.put_giveref(vars[1])
# fresh exceptions may not have a traceback yet (-> finally!)
code.put_xgiveref(vars[2])
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
code.putln()
code.putln(code.error_goto(self.pos))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
value = self.value.analyse_types(env)
if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
# prevent tuple values from being interpreted as argument value tuples
from .ExprNodes import TupleNode
value = TupleNode(value.pos, args=[value], slow=True)
self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
else:
self.value = value.coerce_to_pyobject(env)
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
code.putln("if (unlikely(!Py_OptimizeFlag)) {")
code.mark_pos(self.pos)
self.cond.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" % self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result())
self.value.generate_disposal_code(code)
self.value.free_temps(code)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
self.cond.free_temps(code)
code.putln(
"}")
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.cond.generate_function_definitions(env, code)
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
self.cond.annotate(code)
if self.value:
self.value.annotate(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses]
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
if not self.else_clause:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
for if_clause in self.if_clauses:
if_clause.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
child_attrs = ["condition", "body"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("if (%s) {" % self.condition.result())
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
if not (is_last or self.body.is_terminator):
code.put_goto(end_label)
code.putln("}")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
code.mark_pos(cond.pos)
cond.generate_evaluation_code(code)
code.putln("case %s:" % cond.result())
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln("break;")
def generate_function_definitions(self, env, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
child_attrs = ['test', 'cases', 'else_clause']
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("switch (%s) {" % self.test.result())
for case in self.cases:
case.generate_execution_code(code)
if self.else_clause is not None:
code.putln("default:")
self.else_clause.generate_execution_code(code)
code.putln("break;")
else:
# Always generate a default clause to prevent C compiler warnings
# about unmatched enum values (it was not the user who decided to
# generate the switch statement, so shouldn't be bothered).
code.putln("default: break;")
code.putln("}")
def generate_function_definitions(self, env, code):
self.test.generate_function_definitions(env, code)
for case in self.cases:
case.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode(object):
pass
class WhileStatNode(LoopNode, StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
child_attrs = ["condition", "body", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
if self.condition:
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
if self.condition:
self.condition.generate_evaluation_code(code)
self.condition.generate_disposal_code(code)
code.putln(
"if (!%s) break;" % self.condition.result())
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
def generate_function_definitions(self, env, code):
if self.condition:
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
if self.condition:
self.condition.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class DictIterationNextNode(Node):
# Helper node for calling PyDict_Next() inside of a WhileStatNode
# and checking the dictionary size for changes. Created in
# Optimize.py.
child_attrs = ['dict_obj', 'expected_size', 'pos_index_var',
'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
coerced_key_var = key_ref = None
coerced_value_var = value_ref = None
coerced_tuple_var = tuple_ref = None
def __init__(self, dict_obj, expected_size, pos_index_var,
key_target, value_target, tuple_target, is_dict_flag):
Node.__init__(
self, dict_obj.pos,
dict_obj=dict_obj,
expected_size=expected_size,
pos_index_var=pos_index_var,
key_target=key_target,
value_target=value_target,
tuple_target=tuple_target,
is_dict_flag=is_dict_flag,
is_temp=True,
type=PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
from . import ExprNodes
self.dict_obj = self.dict_obj.analyse_types(env)
self.expected_size = self.expected_size.analyse_types(env)
if self.pos_index_var:
self.pos_index_var = self.pos_index_var.analyse_types(env)
if self.key_target:
self.key_target = self.key_target.analyse_target_types(env)
self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
if self.value_target:
self.value_target = self.value_target.analyse_target_types(env)
self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
if self.tuple_target:
self.tuple_target = self.tuple_target.analyse_target_types(env)
self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
self.is_dict_flag = self.is_dict_flag.analyse_types(env)
return self
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c"))
self.dict_obj.generate_evaluation_code(code)
assignments = []
temp_addresses = []
for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target),
(self.value_ref, self.coerced_value_var, self.value_target),
(self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
if target is None:
addr = 'NULL'
else:
assignments.append((var, result, target))
var.allocate(code)
addr = '&%s' % var.result()
temp_addresses.append(addr)
result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
result_temp,
self.dict_obj.py_result(),
self.expected_size.result(),
self.pos_index_var.result(),
temp_addresses[0],
temp_addresses[1],
temp_addresses[2],
self.is_dict_flag.result()
))
code.putln("if (unlikely(%s == 0)) break;" % result_temp)
code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
code.funcstate.release_temp(result_temp)
# evaluate all coercions before the assignments
for var, result, target in assignments:
code.put_gotref(var.result())
for var, result, target in assignments:
result.generate_evaluation_code(code)
for var, result, target in assignments:
target.generate_assignment_code(result, code)
var.release(code)
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
return AsyncForStatNode(pos, **kw)
else:
return ForInStatNode(pos, **kw)
else:
return ForFromStatNode(pos, **kw)
class _ForInStatNode(LoopNode, StatNode):
# Base class of 'for-in' statements.
#
# target ExprNode
# iterator IteratorNode | AIterAwaitExprNode(AsyncIteratorNode)
# body StatNode
# else_clause StatNode
# item NextNode | AwaitExprNode(AsyncNextNode)
# is_async boolean true for 'async for' statements
child_attrs = ["target", "item", "iterator", "body", "else_clause"]
item = None
is_async = False
def _create_item_node(self):
raise NotImplementedError("must be implemented by subclasses")
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
self._create_item_node()
def analyse_expressions(self, env):
self.target = self.target.analyse_target_types(env)
self.iterator = self.iterator.analyse_expressions(env)
self._create_item_node() # must rewrap self.item after analysis
self.item = self.item.analyse_expressions(env)
if (not self.is_async and
(self.iterator.type.is_ptr or self.iterator.type.is_array) and
self.target.type.assignable_from(self.iterator.type)):
# C array slice optimization.
pass
else:
self.item = self.item.coerce_to(self.target.type, env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln("for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
# in nested loops, the 'else' block can contain a
# 'continue' statement for the outer loop, but we may need
# to generate cleanup code before taking that path, so we
# intercept it here
orig_continue_label = code.continue_label
code.continue_label = code.new_label('outer_continue')
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
if code.label_used(code.continue_label):
code.put_goto(break_label)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
self.iterator.generate_disposal_code(code)
code.put_goto(orig_continue_label)
code.set_loop_labels(old_loop_labels)
code.mark_pos(self.pos)
if code.label_used(break_label):
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
self.iterator.free_temps(code)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.iterator.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.iterator.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
self.item.annotate(code)
class ForInStatNode(_ForInStatNode):
# 'for' statement
is_async = False
def _create_item_node(self):
from .ExprNodes import NextNode
self.item = NextNode(self.iterator)
class AsyncForStatNode(_ForInStatNode):
# 'async for' statement
#
# iterator AIterAwaitExprNode(AsyncIteratorNode)
# item AwaitIterNextExprNode(AsyncIteratorNode)
is_async = True
def __init__(self, pos, iterator, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
kw['iterator'] = ExprNodes.AIterAwaitExprNode(iterator.pos, arg=iterator)
kw['item'] = ExprNodes.AwaitIterNextExprNode(iterator.pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
from . import ExprNodes
self.item.arg = ExprNodes.AsyncNextNode(self.iterator)
class ForFromStatNode(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.target = self.target.analyse_target_types(env)
self.bound1 = self.bound1.analyse_types(env)
self.bound2 = self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statement. "
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
if self.target.type.is_numeric:
loop_type = self.target.type
else:
loop_type = PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
if target_type.is_numeric:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step)
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
if from_range:
loopvar_name = code.funcstate.allocate_temp(self.target.type, False)
else:
loopvar_name = self.loopvar_node.result()
if self.target.type.is_int and not self.target.type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
if not self.step:
step = 1
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
loopvar_name, self.relation2, self.bound2.result(), step,
loopvar_name, incop))
else:
code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
elif from_range:
code.putln("%s = %s;" % (
self.target.result(), loopvar_name))
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
if self.target.entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
lookup_func = '__Pyx_GetModuleGlobalName(%s)'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
lookup_func = '__Pyx_GetNameInClass(%s, %%s)' % (
self.target.entry.scope.namespace_cname)
code.putln("%s = %s; %s" % (
target_node.result(),
lookup_func % interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
self.loopvar_node.type, target_node, self.target.entry.scope)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--"),
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
# exit_var String the cname of the __exit__() method reference
child_attrs = ["manager", "enter_call", "target", "body"]
enter_call = None
target_temp = None
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager = self.manager.analyse_types(env)
self.enter_call = self.enter_call.analyse_types(env)
if self.target:
# set up target_temp before descending into body (which uses it)
from .ExprNodes import TempNode
self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type)
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if self.target:
# The temp result will be cleaned up by the WithTargetAssignmentStatNode
# after assigning its result to the target of the 'with' statement.
self.target_temp.allocate(code)
self.enter_call.make_owned_reference(code)
code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result()))
self.enter_call.generate_post_assignment_code(code)
else:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
class WithTargetAssignmentStatNode(AssignmentNode):
# The target assignment of the 'with' statement value (return
# value of the __enter__() call).
#
# This is a special cased assignment that properly cleans up the RHS.
#
# lhs ExprNode the assignment target
# rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode)
# with_node WithStatNode the surrounding with-statement
child_attrs = ["rhs", "lhs"]
with_node = None
rhs = None
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env)
return self
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.lhs.generate_assignment_code(self.rhs, code)
self.with_node.target_temp.release(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
default_clause_seen = 0
for i, except_clause in enumerate(self.except_clauses):
except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env)
if default_clause_seen:
error(except_clause.pos, "default 'except:' must be last")
if not except_clause.pattern:
default_clause_seen = 1
self.has_default_clause = default_clause_seen
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
nogil_check = Node.gil_error
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_return_label = code.return_label
old_break_label = code.break_label
old_continue_label = code.continue_label
old_error_label = code.new_error_label()
our_error_label = code.error_label
except_end_label = code.new_label('exception_handled')
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
try_break_label = code.new_label('try_break')
try_continue_label = code.new_label('try_continue')
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
for _ in range(3)]
code.mark_pos(self.pos)
code.putln("{")
save_exc = code.insertion_point()
code.putln(
"/*try:*/ {")
code.return_label = try_return_label
code.break_label = try_break_label
code.continue_label = try_continue_label
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln(
"}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
can_raise = code.label_used(our_error_label)
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
save_exc.putln("__Pyx_PyThreadState_declare")
save_exc.putln("__Pyx_PyThreadState_assign")
save_exc.putln("__Pyx_ExceptionSave(%s);" % (
', '.join(['&%s' % var for var in exc_save_vars])))
for var in exc_save_vars:
save_exc.put_xgotref(var)
def restore_saved_exception():
for name in exc_save_vars:
code.put_xgiveref(name)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
save_exc.putln("if (%s); else {/*mark used*/}" % '||'.join(exc_save_vars))
def restore_saved_exception():
pass
code.error_label = except_error_label
code.return_label = except_return_label
normal_case_terminates = self.body.is_terminator
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
if not normal_case_terminates:
normal_case_terminates = self.else_clause.is_terminator
if can_raise:
if not normal_case_terminates:
for var in exc_save_vars:
code.put_xdecref_clear(var, py_object_type)
code.put_goto(try_end_label)
code.put_label(our_error_label)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
if not self.has_default_clause:
code.put_goto(except_error_label)
for exit_label, old_label in [(except_error_label, old_error_label),
(try_break_label, old_break_label),
(try_continue_label, old_continue_label),
(try_return_label, old_return_label),
(except_return_label, old_return_label)]:
if code.label_used(exit_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(exit_label)
code.mark_pos(self.pos, trace=False)
if can_raise:
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
restore_saved_exception()
code.put_goto(old_label)
if code.label_used(except_end_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(except_end_label)
if can_raise:
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
restore_saved_exception()
if code.label_used(try_end_label):
code.put_label(try_end_label)
code.putln("}")
for cname in exc_save_vars:
code.funcstate.release_temp(cname)
code.return_label = old_return_label
code.break_label = old_break_label
code.continue_label = old_continue_label
code.error_label = old_error_label
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
for except_clause in self.except_clauses:
except_clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
for except_node in self.except_clauses:
except_node.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern [ExprNode]
# target ExprNode or None
# body StatNode
# excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# is_except_as bool Py3-style "except ... as xyz"
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
child_attrs = ["pattern", "target", "body", "exc_value"]
exc_value = None
excinfo_target = None
is_except_as = False
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.function_name = env.qualified_name
if self.pattern:
# normalise/unpack self.pattern into a list
for i, pattern in enumerate(self.pattern):
pattern = pattern.analyse_expressions(env)
self.pattern[i] = pattern.coerce_to_pyobject(env)
if self.target:
from . import ExprNodes
self.exc_value = ExprNodes.ExcValueNode(self.pos)
self.target = self.target.analyse_target_expression(env, self.exc_value)
self.body = self.body.analyse_expressions(env)
return self
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
exc_tests.append("__Pyx_PyErr_ExceptionMatches(%s)" % pattern.py_result())
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(
"%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
code.putln(
"if (%s) {" %
match_flag)
code.funcstate.release_temp(match_flag)
else:
code.putln("/*except:*/ {")
if (not getattr(self.body, 'stats', True)
and self.excinfo_target is None
and self.target is None):
# most simple case: no exception variable, empty body (pass)
# => reset the exception state, done
code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
code.putln("__Pyx_ErrRestore(0,0,0);")
code.put_goto(end_label)
code.putln("}")
return
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
exc_args, code.error_goto(self.pos)))
for x in exc_vars:
code.put_gotref(x)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
self.target.generate_assignment_code(self.exc_value, code)
if self.excinfo_target is not None:
for tempvar, node in zip(exc_vars, self.excinfo_target.args):
node.set_var(tempvar)
old_break_label, old_continue_label = code.break_label, code.continue_label
code.break_label = code.new_label('except_break')
code.continue_label = code.new_label('except_continue')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
if not self.body.is_terminator:
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(end_label)
for new_label, old_label in [(code.break_label, old_break_label),
(code.continue_label, old_continue_label)]:
if code.label_used(new_label):
code.put_label(new_label)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_label)
code.break_label = old_break_label
code.continue_label = old_continue_label
for temp in exc_vars:
code.funcstate.release_temp(temp)
code.putln(
"}")
def generate_function_definitions(self, env, code):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
pattern.annotate(code)
if self.target:
self.target.annotate(code)
self.body.annotate(code)
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
#
# Each of the continue, break, return and error gotos runs
# into its own deep-copy of the finally block code.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause", "finally_except_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
func_return_type = None
finally_except_clause = None
is_try_finally_in_nogil = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_except_clause = copy.deepcopy(self.finally_clause)
self.finally_except_clause.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
self.finally_clause = self.finally_clause.analyse_expressions(env)
self.finally_except_clause = self.finally_except_clause.analyse_expressions(env)
if env.return_type and not env.return_type.is_void:
self.func_return_type = env.return_type
return self
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
code.putln("/*try:*/ {")
was_in_try_finally = code.funcstate.in_try_finally
code.funcstate.in_try_finally = 1
self.body.generate_execution_code(code)
code.funcstate.in_try_finally = was_in_try_finally
code.putln("}")
code.set_all_labels(old_labels)
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
def fresh_finally_clause(_next=[self.finally_clause]):
# generate the original subtree once and always keep a fresh copy
node = _next[0]
node_copy = copy.deepcopy(node)
if node is self.finally_clause:
_next[0] = node_copy
else:
node = node_copy
return node
preserve_error = self.preserve_exception and code.label_used(new_error_label)
needs_success_cleanup = not self.finally_clause.is_terminator
if not self.body.is_terminator:
code.putln('/*normal exit:*/{')
fresh_finally_clause().generate_execution_code(code)
if not self.finally_clause.is_terminator:
code.put_goto(catch_label)
code.putln('}')
if preserve_error:
code.putln('/*exception exit:*/{')
code.putln("__Pyx_PyThreadState_declare")
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
exc_lineno_cnames = tuple([
code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
for _ in range(2)])
exc_filename_cname = code.funcstate.allocate_temp(
PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
manage_ref=False)
else:
exc_lineno_cnames = exc_filename_cname = None
exc_vars = tuple([
code.funcstate.allocate_temp(py_object_type, manage_ref=False)
for _ in range(6)])
code.put_label(new_error_label)
self.put_error_catcher(
code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
finally_old_labels = code.all_new_labels()
code.putln('{')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars[:3]
self.finally_except_clause.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
code.putln('}')
if needs_success_cleanup:
self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
if exc_lineno_cnames:
for cname in exc_lineno_cnames:
code.funcstate.release_temp(cname)
if exc_filename_cname:
code.funcstate.release_temp(exc_filename_cname)
code.put_goto(old_error_label)
for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
if not code.label_used(new_label):
continue
code.put_label(new_label)
self.put_error_cleaner(code, exc_vars)
code.put_goto(old_label)
for cname in exc_vars:
code.funcstate.release_temp(cname)
code.putln('}')
code.set_all_labels(old_labels)
return_label = code.return_label
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
code.put('%s: ' % new_label)
code.putln('{')
ret_temp = None
if old_label == return_label and not self.finally_clause.is_terminator:
# store away return value for later reuse
if (self.func_return_type and
not self.is_try_finally_in_nogil and
not isinstance(self.finally_clause, GILExitNode)):
ret_temp = code.funcstate.allocate_temp(
self.func_return_type, manage_ref=False)
code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % Naming.retval_cname)
fresh_finally_clause().generate_execution_code(code)
if ret_temp:
code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % ret_temp)
code.funcstate.release_temp(ret_temp)
ret_temp = None
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
# End finally
code.put_label(catch_label)
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign")
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3)"
" __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
code.putln("if ((PY_MAJOR_VERSION < 3) ||"
# if __Pyx_GetException() fails in Py3,
# store the newly raised exception instead
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
code.put_xgotref(var)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
exc_lineno_cnames[1], Naming.clineno_cname,
exc_filename_cname, Naming.filename_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xgiveref(var)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
Naming.clineno_cname, exc_lineno_cnames[1],
Naming.filename_cname, exc_filename_cname))
def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xdecref_clear(var, py_object_type)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
class NogilTryFinallyStatNode(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
state_temp = None
def __init__(self, pos, state, body):
self.state = state
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(
self, pos,
body=body,
finally_clause=GILExitNode(
pos, state=state, state_temp=self.state_temp))
def create_state_temp_if_needed(self, pos, state, body):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
if not collector.yields and not collector.awaits:
return
if state == 'gil':
temp_type = PyrexTypes.c_gilstate_type
else:
temp_type = PyrexTypes.c_threadstate_ptr_type
from . import ExprNodes
self.state_temp = ExprNodes.TempNode(pos, temp_type)
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if self.state == 'gil':
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
node = TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
return node
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state_temp:
self.state_temp.allocate(code)
variable = self.state_temp.result()
else:
variable = None
old_gil_config = code.funcstate.gil_owned
if self.state == 'gil':
code.put_ensure_gil(variable=variable)
code.funcstate.gil_owned = True
else:
code.put_release_gil(variable=variable)
code.funcstate.gil_owned = False
TryFinallyStatNode.generate_execution_code(self, code)
if self.state_temp:
self.state_temp.release(code)
code.funcstate.gil_owned = old_gil_config
code.end_block()
class GILExitNode(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
"""
child_attrs = []
state_temp = None
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.state_temp:
variable = self.state_temp.result()
else:
variable = None
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
code.put_acquire_gil(variable)
class EnsureGILNode(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
def cython_view_utility_code():
from . import MemoryView
return MemoryView.view_utility_code
utility_code_for_cimports = {
# utility code (or inlining c) in a pxd (or pyx) file.
# TODO: Consider a generic user-level mechanism for importing
'cpython.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"),
'cpython.array.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"),
'cython.view' : cython_view_utility_code,
}
utility_code_for_imports = {
# utility code used when special modules are imported.
# TODO: Consider a generic user-level mechanism for importing
'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"),
'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"),
}
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
# is_absolute bool True for absolute imports, False otherwise
child_attrs = []
is_absolute = False
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(
self.module_name, self.pos, relative_level=0 if self.is_absolute else -1)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
if self.module_name in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[self.module_name]())
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# relative_level int or None Relative import: number of dots before module_name
# imported_names [(pos, name, as_name, kind)] Names to be imported
child_attrs = []
module_name = None
relative_level = None
imported_names = None
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
if self.relative_level and self.relative_level > env.qualified_name.count('.'):
error(self.pos, "relative cimport beyond main package is not allowed")
return
module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level)
module_name = module_scope.qualified_name
env.add_imported_module(module_scope)
for pos, name, as_name, kind in self.imported_names:
if name == "*":
for local_name, entry in list(module_scope.entries.items()):
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(
name, kind=kind, scope=None, typedef_flag=0, pos=pos)
elif kind == 'class':
entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name)
else:
submodule_scope = env.context.find_module(
name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
if submodule_scope.parent_module is module_scope:
env.declare_module(as_name or name, submodule_scope, self.pos)
else:
error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now
if module_name in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[module_name]())
for _, name, _, _ in self.imported_names:
fqname = '%s.%s' % (module_name, name)
if fqname in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[fqname]())
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind != type.kind:
return 0
return 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode, ExprNode)]
# item PyTempNode used internally
# import_star boolean used internally
child_attrs = ["module"]
import_star = 0
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
if not env.is_module_scope:
error(self.pos, "import * only allowed at module level")
return
env.has_import_star = 1
self.import_star = 1
else:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.module = self.module.analyse_expressions(env)
self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
self.interned_items = []
for name, target in self.items:
if name == '*':
for _, entry in env.entries.items():
if not entry.is_type and entry.type.is_extension_type:
env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
break
else:
entry = env.lookup(target.name)
# check whether or not entry is already cimported
if (entry.is_type and entry.type.name == name
and hasattr(entry.type, 'module_name')):
if entry.type.module_name == self.module.module_name.value:
# cimported with absolute name
continue
try:
# cimported with relative name
module = env.find_module(self.module.module_name.value, pos=self.pos,
relative_level=self.module.level)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
pass
target = target.analyse_target_expression(env, None) # FIXME?
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.module.generate_evaluation_code(code)
if self.import_star:
code.putln(
'if (%s(%s) < 0) %s;' % (
Naming.import_star,
self.module.py_result(),
code.error_goto(self.pos)))
item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.item.set_cname(item_temp)
if self.interned_items:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
for name, target, coerced_item in self.interned_items:
code.putln(
'%s = __Pyx_ImportFrom(%s, %s); %s' % (
item_temp,
self.module.py_result(),
code.intern_identifier(name),
code.error_goto_if_null(item_temp, self.pos)))
code.put_gotref(item_temp)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
coerced_item.allocate_temp_result(code)
coerced_item.generate_result_code(code)
target.generate_assignment_code(coerced_item, code)
code.put_decref_clear(item_temp, py_object_type)
code.funcstate.release_temp(item_temp)
self.module.generate_disposal_code(code)
self.module.free_temps(code)
class ParallelNode(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
class ParallelStatNode(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads']
body = None
is_prange = False
is_nested_prange = False
error_label_used = False
num_threads = None
chunksize = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super(ParallelStatNode, self).__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: (op, lastprivate) }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
for dictitem in self.kwargs.key_value_pairs:
if dictitem.key.value == 'num_threads':
self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.kwargs.key_value_pairs = pairs
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception as e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.items():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if self.parent and self.parent.num_threads is not None and not self.parent.is_prange:
error(self.pos, "num_threads already declared in outer section")
elif self.parent and not self.parent.is_prange:
error(self.pos, "num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
if not self.num_threads.is_simple():
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.items():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos, "Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
lastprivate = True
self.propagate_var_privatization(entry, pos, op, lastprivate)
def propagate_var_privatization(self, entry, pos, op, lastprivate):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = (op, lastprivate)
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
# We don't need to propagate privates, only reductions and
# lastprivates
if parent and (op or lastprivate):
parent.propagate_var_privatization(entry, pos, op, lastprivate)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def initialize_privates_to_nan(self, code, exclude=None):
first = True
for entry, (op, lastprivate) in sorted(self.privates.items()):
if not op and (not exclude or entry != exclude):
invalid_value = entry.type.invalid_value()
if invalid_value:
if first:
code.putln("/* Initialize private variables to "
"invalid values */")
first = False
code.putln("%s = %s;" % (entry.cname,
entry.type.cast_code(invalid_value)))
def evaluate_before_block(self, code, expr):
c = self.begin_of_parallel_control_block_point_after_decls
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
expr.generate_evaluation_code(c)
c.funcstate.owner = owner
return expr.result()
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry in sorted(self.assignments):
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
if self.is_parallel:
c = self.privatization_insertion_point
self.temps = temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in sorted(temps):
if type.is_pyobject or type.is_memoryviewslice:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_temps(self, code):
# Now clean up any memoryview slice and object temporaries
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
if type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(temp, have_gil=False)
elif type.is_pyobject:
code.put_xdecref(temp, type)
code.putln("%s = NULL;" % temp)
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
self.undef_builtin_expect_apple_gcc_bug(code)
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"""
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
if self.error_label_used:
begin_code = self.begin_of_parallel_block
end_code = code
begin_code.putln("#ifdef _OPENMP")
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
begin_code.putln("#endif /* _OPENMP */")
end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */")
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
if (self.breaking_label_used and self.is_prange and not
is_continue_label):
code.put_goto(save_lastprivates_label)
else:
code.put_goto(dont_return_label)
if self.any_label_used:
if self.is_prange and self.breaking_label_used:
# Don't rely on lastprivate, save our lastprivates
code.put_label(save_lastprivates_label)
self.save_parallel_vars(code)
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def save_parallel_vars(self, code):
"""
The following shenanigans are instated when we break, return or
propagate errors from a prange. In this case we cannot rely on
lastprivate() to do its job, as no iterations may have executed yet
in the last thread, leaving the values undefined. It is most likely
that the breaking thread has well-defined values of the lastprivate
variables, so we keep those values.
"""
section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
temp_count = 0
for entry, (op, lastprivate) in sorted(self.privates.items()):
if not lastprivate or entry.type.is_pyobject:
continue
type_decl = entry.type.empty_declaration_code()
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
temp_count += 1
invalid_value = entry.type.invalid_value()
if invalid_value:
init = ' = ' + invalid_value
else:
init = ''
# Declare the parallel private in the outer block
c.putln("%s %s%s;" % (type_decl, temp_cname, init))
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
self.parallel_private_temps.append((temp_cname, private_cname))
code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_gotref(Naming.parallel_exc_type)
code.putln(
"}")
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_giveref(Naming.parallel_exc_type)
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(
self, code, break_=False, continue_=False, return_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc)
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname in self.parallel_private_temps:
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
if return_:
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition = "(defined(__GNUC__) && " \
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
def undef_builtin_expect_apple_gcc_bug(self, code):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if not self.parent:
code.undef_builtin_expect(self.redef_condition)
def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
valid_keyword_arguments = ['num_threads']
num_threads = None
def analyse_declarations(self, env):
super(ParallelWithBlockNode, self).analyse_declarations(env)
if self.args:
error(self.pos, "cython.parallel.parallel() does not take "
"positional arguments")
def generate_execution_code(self, code):
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
code.put('private(%s)' % ', '.join(sorted(privates)))
self.privatization_insertion_point = code.insertion_point()
self.put_num_threads(code)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # parallel block
self.begin_parallel_block(code)
self.initialize_privates_to_nan(code)
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code)
self.privatize_temps(code)
self.end_parallel_block(code)
code.end_block() # end parallel block
continue_ = code.label_used(code.continue_label)
break_ = code.label_used(code.break_label)
return_ = code.label_used(code.return_label)
self.restore_labels(code)
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_,
return_=return_)
self.release_closure_privates(code)
class ParallelRangeNode(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
'chunksize']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return self
self.target = self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
if not self.index_type.signed:
warning(self.target.pos,
"Unsigned index type not allowed before OpenMP 3.0",
level=2)
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause = self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = self.target.pos, None
node = super(ParallelRangeNode, self).analyse_expressions(env)
if node.chunksize:
if not node.schedule:
error(node.chunksize.pos,
"Must provide schedule with chunksize")
elif node.schedule == 'runtime':
error(node.chunksize.pos,
"Chunksize not valid for the schedule runtime")
elif (node.chunksize.type.is_int and
node.chunksize.is_literal and
node.chunksize.compile_time_value(env) <= 0):
error(node.chunksize.pos, "Chunksize must not be negative")
node.chunksize = node.chunksize.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = node.parent and node.parent.is_prange
if node.is_nested_prange:
parent = node
while parent.parent and parent.parent.is_prange:
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target'
nodes = self.start, self.stop, self.step, self.target
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
'target_type': self.target.type.empty_declaration_code()
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
code.putln("if (%(step)s == 0) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
self.control_flow_var_code_point = code.insertion_point()
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step + (self.chunksize, self.num_threads):
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, (op, lastprivate) in sorted(self.privates.items()):
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if entry == self.target.entry:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
continue
if not entry.type.is_pyobject:
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put(" %s(%s)" % (private, entry.cname))
if self.schedule:
if self.chunksize:
chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize)
else:
chunksize = ""
code.put(" schedule(%s%s)" % (self.schedule, chunksize))
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict)
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if self.is_parallel:
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
class CnameDecoratorNode(StatNode):
"""
This node is for the cname decorator in CythonUtilityCode:
@cname('the_cname')
cdef func(...):
...
In case of a cdef class the cname specifies the objstruct_cname.
node the node to which the cname decorator is applied
cname the cname the node should get
"""
child_attrs = ['node']
def analyse_declarations(self, env):
self.node.analyse_declarations(env)
node = self.node
if isinstance(node, CompilerDirectivesNode):
node = node.body.stats[0]
self.is_function = isinstance(node, FuncDefNode)
is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode))
e = node.entry
if self.is_function:
e.cname = self.cname
e.func_cname = self.cname
e.used = True
if e.pyfunc_cname and '.' in e.pyfunc_cname:
e.pyfunc_cname = self.mangle(e.pyfunc_cname)
elif is_struct_or_enum:
e.cname = e.type.cname = self.cname
else:
scope = node.scope
e.cname = self.cname
e.type.objstruct_cname = self.cname + '_obj'
e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
e.type.typeptr_cname = self.cname + '_type'
e.type.scope.namespace_cname = e.type.typeptr_cname
e.as_variable.cname = e.type.typeptr_cname
scope.scope_prefix = self.cname + "_"
for name, entry in scope.entries.items():
if entry.func_cname:
entry.func_cname = self.mangle(entry.cname)
if entry.pyfunc_cname:
entry.pyfunc_cname = self.mangle(entry.pyfunc_cname)
def mangle(self, cname):
if '.' in cname:
# remove __pyx_base from func_cname
cname = cname.split('.')[-1]
return '%s_%s' % (self.cname, cname)
def analyse_expressions(self, env):
self.node = self.node.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
"Ensure a prototype for every @cname method in the right place"
if self.is_function and env.is_c_class_scope:
# method in cdef class, generate a prototype in the header
h_code = code.globalstate['utility_code_proto']
if isinstance(self.node, DefNode):
self.node.generate_function_header(
h_code, with_pymethdef=False, proto_only=True)
else:
from . import ModuleNode
entry = self.node.entry
cname = entry.cname
entry.cname = entry.func_cname
ModuleNode.generate_cfunction_declaration(
entry,
env.global_scope(),
h_code,
definition=True)
entry.cname = cname
self.node.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.node.generate_execution_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
if Options.gcc_branch_hints:
branch_prediction_macros = """
/* Test for GCC > 2.95 */
#if defined(__GNUC__) \
&& (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
"""
else:
branch_prediction_macros = """
#define likely(x) (x)
#define unlikely(x) (x)
"""
#------------------------------------------------------------------------------------
printing_utility_code = UtilityCode.load_cached("Print", "Printing.c")
printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c")
#------------------------------------------------------------------------------------
# Exception raising code
#
# Exceptions are raised by __Pyx_Raise() and stored as plain
# type/value/tb in PyThreadState->curexc_*. When being caught by an
# 'except' statement, curexc_* is moved over to exc_* by
# __Pyx_GetException()
restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")
raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c")
get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c")
swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c")
reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c")
traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c")
#------------------------------------------------------------------------------------
get_exception_tuple_utility_code = UtilityCode(
proto="""
static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/
""",
# I doubt that calling __Pyx_GetException() here is correct as it moves
# the exception from tstate->curexc_* to tstate->exc_*, which prevents
# exception handlers later on from receiving it.
# NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro
impl = """
static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
if (__Pyx_GetException(&type, &value, &tb) == 0) {
PyObject* exc_info = PyTuple_New(3);
if (exc_info) {
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(tb);
PyTuple_SET_ITEM(exc_info, 0, type);
PyTuple_SET_ITEM(exc_info, 1, value);
PyTuple_SET_ITEM(exc_info, 2, tb);
return exc_info;
}
}
return NULL;
}
""",
requires=[get_exception_utility_code])
| 40.590101
| 120
| 0.597723
|
1eb60500d28469e2ff7287e03bc9f616b11e7c0c
| 9,318
|
py
|
Python
|
docs/conf.py
|
graysham/fluentcms-jumbotron
|
4c28475a13cc3b4c190acca313d73641a2c533f2
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
graysham/fluentcms-jumbotron
|
4c28475a13cc3b4c190acca313d73641a2c533f2
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
graysham/fluentcms-jumbotron
|
4c28475a13cc3b4c190acca313d73641a2c533f2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# fluentcms-jumbotron documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 20 14:26:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fluentcms-jumbotron'
copyright = u'2016, Diederik van der Boor'
author = u'Diederik van der Boor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fluentcms-jumbotrondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fluentcms-jumbotron.tex', u'fluentcms-jumbotron Documentation',
u'Diederik van der Boor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_jumbotronefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fluentcms-jumbotron', u'fluentcms-jumbotron Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fluentcms-jumbotron', u'fluentcms-jumbotron Documentation',
author, 'fluentcms-jumbotron', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.809859
| 81
| 0.71979
|
8d23d62b5dc7b99f8d5522af95ce66a2fd33a6fc
| 680
|
py
|
Python
|
floydwarshall.py
|
priyanshu5055/cpp
|
16310ae8a824cd08b74f47bb4aec300b9b8b0f50
|
[
"MIT"
] | 1
|
2021-12-17T06:12:51.000Z
|
2021-12-17T06:12:51.000Z
|
floydwarshall.py
|
AneriSonani09/cpp
|
898fd0719c6ca501837661869070c939d07b595a
|
[
"MIT"
] | null | null | null |
floydwarshall.py
|
AneriSonani09/cpp
|
898fd0719c6ca501837661869070c939d07b595a
|
[
"MIT"
] | 5
|
2020-10-01T04:12:34.000Z
|
2021-10-22T04:49:45.000Z
|
V = 4
INF = 99999
def floydWarshall(graph):
dist = map(lambda i : map(lambda j : j , i) , graph)
for k in range(V):
for i in range(V):
for j in range(V):
dist[i][j] = min(dist[i][j] ,
dist[i][k]+ dist[k][j]
)
printSolution(dist)
def printSolution(dist):
print("Following matrix shows the shortest distances between every pair of vertices" )
for i in range(V):
for j in range(V):
if(dist[i][j] == INF):
print("%7s" %("INF"),)
else:
print("%7d\t" %(dist[i][j]), )
if j == V-1:
print("")
graph = [[0,5,INF,10],
[INF,0,3,INF],
[INF, INF, 0, 1],
[INF, INF, INF, 0]
]
floydWarshall(graph);
| 19.428571
| 87
| 0.532353
|
0a4e97b0bf7dbd4ee50cff7db1ccb596ac1d1625
| 2,336
|
py
|
Python
|
freyr_app/core/management/commands/download_models.py
|
blanchefort/freyrmonitoring
|
5bf10ba86d3f88390f91106426dd964289f5aee6
|
[
"MIT"
] | 2
|
2021-06-01T20:27:14.000Z
|
2021-10-01T23:24:45.000Z
|
freyr_app/core/management/commands/download_models.py
|
blanchefort/freyrmonitoring
|
5bf10ba86d3f88390f91106426dd964289f5aee6
|
[
"MIT"
] | null | null | null |
freyr_app/core/management/commands/download_models.py
|
blanchefort/freyrmonitoring
|
5bf10ba86d3f88390f91106426dd964289f5aee6
|
[
"MIT"
] | null | null | null |
"""Команда скачивания новых версий ML-моделей
$python manage.py download_models --full
"""
import logging
from django.core.management.base import BaseCommand
from core.models import categories, Category
from ._download_funcs import (geography,
download_freyr_model,
download_kaldi,
recreate_dirs,
ML_NAMES)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Команда скачивания новых версий ML-моделей'
def handle(self, *args, **options):
if options['loyalty']:
download_freyr_model(name='article_sentiment')
download_freyr_model(name='article_theme')
if options['categories']:
download_freyr_model(name='gov_categories')
if options['appeal']:
download_freyr_model(name='article_appeal')
if options['comments']:
download_freyr_model(name='comment_sentiment')
if options['stopwords']:
download_freyr_model(name='addenda')
if options['kaldi']:
download_kaldi()
if options['geography']:
geography()
if options['full']:
recreate_dirs()
if Category.objects.all().count() == 0:
for cat in categories:
Category.objects.create(name=cat)
for name in ML_NAMES:
download_freyr_model(name)
geography()
download_kaldi()
def add_arguments(self, parser):
commands = (
('--full', 'Полное обновление всех моделей системы',),
('--loyalty', 'Модели для индекса лояльности',),
('--categories', 'Модель для классификации категорий постов',),
('--appeal', 'Модель для выявления обращений граждан',),
('--comments', 'Модель для определения тональности комментариев',),
('--stopwords', 'Стоп-слова',),
('--kaldi', 'Модель для распознавания речи',),
('--geography', 'Наборы данных для работы с географией',),
)
for command, help in commands:
parser.add_argument(
command,
action='store_true',
default=False,
help=help
)
| 35.393939
| 79
| 0.56464
|
4c1a67022c237566bd29841e7e9cf4d6b10544e3
| 1,154
|
py
|
Python
|
main.py
|
minexixi/BlackRGB
|
03767e7f96a3298a07a40e2098298288180eeb78
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
minexixi/BlackRGB
|
03767e7f96a3298a07a40e2098298288180eeb78
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
minexixi/BlackRGB
|
03767e7f96a3298a07a40e2098298288180eeb78
|
[
"Apache-2.0"
] | null | null | null |
import os,argparse
from PIL import Image
def Color2RGB(Cname, mB):
C = Image.open(Cname)
w, h =C .size
nR = Image.new('RGBA', C.size, 'Black')
nG = Image.new('RGBA', C.size, 'Black')
nB = Image.new('RGBA', C.size, 'Black')
for x in range(w):
for y in range(h):
PR, PG, PB, PA = C.getPixel((x, y))
nR.PutPixel((x, y), (PR, PR, PR))
nG.PutPixel((x, y), (PG, PG, PG))
nB.PutPixel((x, y), (PB, PB, PB))
Nname = Cname.split('.')
del Nname[-1]
Nn = '.'.join(Nname)
nR.save(Nn + '_R.png')
nG.save(Nn + '_G.png')
nB.save(Nn + '_B.png')
if mB == '1':
nM = Image.new('RGBA', (3 * w, h), 'black')
nM.Paste(nR, (0, 0))
nM.Paste(nG, (w + 1, 0))
nM.Paste(nB, (2 * w + 2, 0))
nM.Save(Nn + '_Mix.png')
elif mB == '2':
nM = Image.new('RGBA', (w, 3 * h), 'black')
nM.Paste(nR, (0, 0))
nM.Paste(nG, (0, h + 1))
nM.Paste(nB, (0, 2 * h + 2))
nM.Save(Nn + '_Mix.png')
def RGB2Color(Rname, Gname, Bname):
R = Image.OPEN(Rname)
G = Image.OPEN(Gname)
B = Image.OPEN(Bname)
| 29.589744
| 51
| 0.47487
|
421144ba783bf47f99c877e56ccb3330c355e1da
| 4,156
|
py
|
Python
|
examples/simple.py
|
Mortafix/AutoConv-Telegram-Python
|
9274539f5622b416f4a5187289038f8d8649864e
|
[
"MIT"
] | 4
|
2020-12-27T09:52:23.000Z
|
2022-01-20T13:40:06.000Z
|
examples/simple.py
|
Mortafix/AutoConv-Telegram-Python
|
9274539f5622b416f4a5187289038f8d8649864e
|
[
"MIT"
] | 4
|
2021-03-06T18:06:03.000Z
|
2021-11-03T10:59:09.000Z
|
examples/simple.py
|
Mortafix/AutoConv-Telegram-Python
|
9274539f5622b416f4a5187289038f8d8649864e
|
[
"MIT"
] | 2
|
2020-11-14T17:35:14.000Z
|
2021-09-21T17:37:21.000Z
|
import logging
from datetime import datetime
from autoconv.autoconv_handler import AutoConvHandler
from autoconv.conversation import Conversation
from autoconv.state import State
from telegram.ext import (CallbackQueryHandler, CommandHandler,
ConversationHandler, Filters, MessageHandler,
Updater)
# Enable logging and port
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
# --------------------------------- Simple commands -----------------------------------
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def start(update, context):
update.message.reply_text(
f"Welcome *{update.message.from_user.first_name}*!\n\nTry /example.",
parse_mode="Markdown",
)
def handle_text(update, context):
update.message.delete()
# --------------------------------- Example AutoConv ----------------------------------
STATE = range(1)
# ---- FUNCS
def comment_type(data):
return "" if data == "0" else data
def recap(tdata):
if (ma := 18 - tdata.sdata.get("age")) > 0 and tdata.sdata.get(
"consent"
) == "Abort":
return f"Come back when you'll have your *parents consent* or in *{ma}* years."
return "\n".join([f"{k.title()}: *{v}*" for k, v in tdata.sdata.items()])
def add_timestamp(text):
return f"({datetime.now():%H:%M})\n\n{text}"
# ---- STATES
name = State("name", "Enter your *name*.", data_type=str, back_button=False)
name.add_text()
gender = State("gender", "Select your *gender*", back_button="< Another back")
gender.add_keyboard(["Male", "Female", "Other"])
age = State("age", "Enter your <b>age</b>", parse_mode="html")
age.add_text(r"\d{1,2}", "Enter a *valid* age")
underage = State("consent", "Drop the _responsibility_ on your parents?")
underage.add_keyboard(["Yes", "Abort"])
comment = State(
"comment", "Do you want to enter additional comment?", data_type=comment_type
)
comment.add_keyboard(["Nope"])
comment.add_text()
end = State("end", "@@@", back_button=False)
end.add_action(recap)
# ---- CONVERSATION
conv = Conversation(name, end_state=end)
conv.set_defaults(
params={"parse_mode": "Markdown", "disable_web_page_preview": True},
func=add_timestamp,
back_button="< Back",
)
conv.add_routes(name, default=gender)
conv.add_routes(gender, default=age, back=name)
conv.add_routes(
age, routes={i: underage for i in range(19)}, default=comment, back=gender
)
conv.add_routes(underage, routes={0: comment, 1: end}, back=age)
conv.add_routes(comment, default=end)
conv.add_routes(end)
# ---- HANDLER
autoconv = AutoConvHandler(conv, STATE)
def autoconv_command(update, context):
return autoconv.manage_conversation(update, context)
# MAIN --------------------------------------------------------------------------------
def main():
"""Bot instance"""
updater = Updater("BOT-TOKEN")
dp = updater.dispatcher
# -----------------------------------------------------------------------
# commands
cmd_start = CommandHandler("start", start)
# conversations
autoconv = ConversationHandler(
entry_points=[CommandHandler("example", autoconv_command)],
states={
STATE: [
MessageHandler(Filters.text, autoconv_command),
CallbackQueryHandler(autoconv_command),
]
},
fallbacks=[CommandHandler("start", start)],
name="example-conversation",
)
# -----------------------------------------------------------------------
# handlers - commands and conversations
dp.add_handler(cmd_start)
dp.add_handler(autoconv)
# handlers - no command
dp.add_handler(MessageHandler(Filters.all, handle_text))
# handlers - error
dp.add_error_handler(error)
# ----------------------------------------------------------------------
updater.start_polling()
print("Bot started!")
# Run the bot until you press Ctrl-C
updater.idle()
if __name__ == "__main__":
main()
| 28.272109
| 87
| 0.595765
|
0c391acb692f08fc760f82b52fd1ab3679f55ee8
| 3,120
|
py
|
Python
|
7-assets/past-student-repos/Python-master/compression/lempel_ziv_decompress.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Python-master/compression/lempel_ziv_decompress.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Python-master/compression/lempel_ziv_decompress.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
"""
One of the several implementations of Lempel-Ziv-Welch decompression algorithm
https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch
"""
import math
import sys
def read_file_binary(file_path: str) -> str:
"""
Reads given file as bytes and returns them as a long string
"""
result = ""
try:
with open(file_path, "rb") as binary_file:
data = binary_file.read()
for dat in data:
curr_byte = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible")
sys.exit()
def decompress_data(data_bits: str) -> str:
"""
Decompresses given data_bits using Lempel-Ziv-Welch compression algorithm
and returns the result as a string
"""
lexicon = {"0": "0", "1": "1"}
result, curr_string = "", ""
index = len(lexicon)
for i in range(len(data_bits)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
last_match_id = lexicon[curr_string]
result += last_match_id
lexicon[curr_string] = last_match_id + "0"
if math.log2(index).is_integer():
newLex = {}
for curr_key in list(lexicon):
newLex["0" + curr_key] = lexicon.pop(curr_key)
lexicon = newLex
lexicon[bin(index)[2:]] = last_match_id + "1"
index += 1
curr_string = ""
return result
def write_file_binary(file_path: str, to_write: str) -> None:
"""
Writes given to_write string (should only consist of 0's and 1's) as bytes in the
file
"""
byte_length = 8
try:
with open(file_path, "wb") as opened_file:
result_byte_array = [
to_write[i : i + byte_length]
for i in range(0, len(to_write), byte_length)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("10000000")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(elem, 2).to_bytes(1, byteorder="big"))
except OSError:
print("File not accessible")
sys.exit()
def remove_prefix(data_bits: str) -> str:
"""
Removes size prefix, that compressed file should have
Returns the result
"""
counter = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
data_bits = data_bits[counter:]
data_bits = data_bits[counter + 1 :]
return data_bits
def compress(source_path: str, destination_path: str) -> None:
"""
Reads source file, decompresses it and writes the result in destination file
"""
data_bits = read_file_binary(source_path)
data_bits = remove_prefix(data_bits)
decompressed = decompress_data(data_bits)
write_file_binary(destination_path, decompressed)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 27.857143
| 85
| 0.590705
|
dcd959fb071ec0418e03060a7ff4244fd093e600
| 438
|
py
|
Python
|
openarticlegauge/tests/plugins/test_workflow/test_13_3/test_13_3.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | 1
|
2016-04-07T18:29:27.000Z
|
2016-04-07T18:29:27.000Z
|
openarticlegauge/tests/plugins/test_workflow/test_13_3/test_13_3.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:53:09.000Z
|
2022-03-01T01:46:14.000Z
|
openarticlegauge/tests/plugins/test_workflow/test_13_3/test_13_3.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | null | null | null |
from openarticlegauge import plugin
class mock_licence_plugin_error(plugin.Plugin):
_short_name="mock"
def capabilities(self):
return {
"type_detect_verify" : False,
"canonicalise" : [],
"detect_provider" : [],
"license_detect" : True
}
def supports(self, provider):
return True
def license_detect(self, record):
raise Exception("oh dear!")
| 27.375
| 47
| 0.598174
|
faadced976f17c690169fd01bbc46f76df168ca7
| 5,310
|
py
|
Python
|
logger/writers/udp_writer.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
logger/writers/udp_writer.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
logger/writers/udp_writer.py
|
timburbank/openrvdas
|
ba77d3958075abd21ff94a396e4a97879962ac0c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
import json
import ipaddress
import logging
import socket
import struct
import sys
from os.path import dirname, realpath; sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.utils.formats import Text
from logger.utils.das_record import DASRecord
from logger.writers.network_writer import NetworkWriter
################################################################################
class UDPWriter(NetworkWriter):
"""Write UDP packets to network."""
def __init__(self, port, destination='', interface='',
ttl=3, num_retry=2, eol=''):
"""
Write text records to a network socket.
```
port Port to which packets should be sent
destination If specified, either multicast group or unicast IP addr
interface If specified, the network interface to send from
ttl For multicast, how many network hops to allow
num_retry Number of times to retry if write fails.
eol If specified, an end of line string to append to record
before sending.
```
"""
self.ttl = ttl
self.num_retry = num_retry
self.eol = eol
self.target_str = 'interface: %s, destination: %s, port: %d' % (interface, destination, port)
if interface and destination:
ipaddress.ip_address(interface) # throw a ValueError if bad addr
ipaddress.ip_address(destination)
# At the moment, we don't know how to do both interface and
# multicast/unicast. If they've specified both, then complain
# and ignore the interface part.
logging.warning('UDPWriter doesn\'t yet support specifying both '
'interface and destination. Ignoring interface '
'specification.')
# If they've specified the interface we're supposed to be sending
# via, then we have to do a little legerdemain: we're going to
# connect to the broadcast address of the specified interface as
# our destination. The broadcast address is just the normal
# address with the last tuple replaced by ".255".
elif interface:
if interface == '0.0.0.0': # local network
destination = '255.255.255.255'
elif interface in ['<broadcast>', 'None']:
destination = '<broadcast>'
else:
# Change interface's lowest tuple to 'broadcast' value (255)
ipaddress.ip_address(interface)
destination = interface[:interface.rfind('.')] + '.255'
# If we've been given a destination, make sure it's a valid IP
elif destination:
ipaddress.ip_address(destination)
# If no destination, it's a broadcast; set flag allowing broadcast and
# set dest to special string
else:
destination = '<broadcast>'
self.destination = destination
self.port = port
# Try opening the socket
self.socket = self._open_socket()
############################
def _open_socket(self):
"""Try to open and return the network socket.
"""
udp_socket = socket.socket(family=socket.AF_INET,
type=socket.SOCK_DGRAM,
proto=socket.IPPROTO_UDP)
udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
try: # Raspbian doesn't recognize SO_REUSEPORT
udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, True)
except AttributeError:
logging.warning('Unable to set socket REUSEPORT; may be unsupported')
# Set the time-to-live for messages, in case of multicast
udp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack('b', self.ttl))
udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
try:
udp_socket.connect((self.destination, self.port))
return udp_socket
except OSError as e:
logging.warning('Unable to connect to %s:%d', self.destination, self.port)
return None
############################
def write(self, record):
"""Write the record to the network."""
# If we don't have a record, there's nothing to do
if not record: return
# If record is not a string, try converting to JSON. If we don't know
# how, throw a hail Mary and force it into str format
if not type(record) is str:
if type(record) in [int, float, bool, list, dict]:
record = json.dumps(record)
elif type(record) is DASRecord:
record = record.as_json()
else:
record = str(record)
if self.eol:
record += self.eol
# If socket isn't connected, try reconnecting. If we can't
# reconnect, complain and return without writing.
if not self.socket:
self.socket = self._open_socket()
if not self.socket:
logging.error('Unable to write record to %s:%d',
self.destination, self.port)
return
num_tries = bytes_sent = 0
rec_len = len(record)
while num_tries < self.num_retry and bytes_sent < rec_len:
try:
bytes_sent = self.socket.send(record.encode('utf-8'))
except ConnectionRefusedError as e:
logging.error('ERROR: %s: %s', self.target_str, str(e))
num_tries += 1
logging.debug('UDPWriter.write() wrote %d/%d bytes after %d tries',
bytes_sent, rec_len, num_tries)
| 36.122449
| 101
| 0.64049
|
d837d94694df636dfe7b9df2a1451c965365c8bd
| 28,964
|
py
|
Python
|
saveDataWorker.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | null | null | null |
saveDataWorker.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | null | null | null |
saveDataWorker.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | 1
|
2021-12-03T16:18:57.000Z
|
2021-12-03T16:18:57.000Z
|
import tables
import logging
import os
import json
import numpy as np
from datetime import datetime
from PyQt5.QtCore import QObject, QThread, QTimer, pyqtSignal, pyqtSlot
logging.basicConfig(format="%(message)s", level=logging.INFO)
'''
Example trial info dictionary created by the bpod.
{
'Bpod start timestamp': 4.344831,
'Trial start timestamp': 28.050931,
'Trial end timestamp': 55.769934,
'States timestamps': {
'WaitForOdor': [(0, 5.0)],
'WaitForSniff': [(5.0, 5.1)],
'WaitForResponse': [(5.1, 9.7189)],
'Punish': [(9.7189, 9.719)],
'ITIdelay': [(9.719, 27.719)],
'Reward': [(nan, nan)],
'NoLick': [(nan, nan)]
},
'Events timestamps': {
'Tup': [
5.0,
5.1,
9.719,
27.719
],
'Port1In': [
9.7189,
10.2883,
10.5192,
],
'Port1Out': [
10.1076,
10.393,
10.6725,
]
}
}
'''
class SaveDataWorker(QObject):
analogDataSignal = pyqtSignal(np.ndarray)
finished = pyqtSignal()
def __init__(self,
mouseNum, rigLetter, protocolFile, olfaConfigFile, shuffleMultiplier, itiMin, itiMax, leftWaterValveDuration, rightWaterValveDuration, analogInSettings, analogInModule=None, bpod=None
):
super(SaveDataWorker, self).__init__()
# QObject.__init__(self) # super(...).__init() does this for you in the line above.
dateTimeString = datetime.now().strftime("%Y-%m-%d_%H%M%S")
fileName = f"results/Mouse_{mouseNum}_Rig_{rigLetter}_{dateTimeString}.h5"
if not os.path.isdir('results'):
os.mkdir('results')
self.h5file = tables.open_file(filename=fileName, mode='w', title=f"Mouse {mouseNum} Experiment Data")
# File attributes for future reference.
self.h5file.root._v_attrs.mouseNum = mouseNum
self.h5file.root._v_attrs.rig = rigLetter
self.h5file.root._v_attrs.date = dateTimeString
self.h5file.root._v_attrs.protocolFile = protocolFile
self.h5file.root._v_attrs.olfaConfigFile = olfaConfigFile
self.h5file.root._v_attrs.shuffleMultiplier = shuffleMultiplier
self.h5file.root._v_attrs.itiMax = itiMax
self.h5file.root._v_attrs.itiMin = itiMin
self.h5file.root._v_attrs.leftWaterValveDuration = leftWaterValveDuration
self.h5file.root._v_attrs.rightWaterValveDuration = rightWaterValveDuration
self.eventsGroup = self.h5file.create_group(where='/', name='event_times', title='Event Timestamps Per Trial')
self.trialsTable = None # Make it None for now because have to wait for completion of first trial to get the infoDict with data on the trial. Once that comes, make a description dictionary using the infoDict and then use that description dict to create the trialsTable.
self.trialsTableDescDict = {} # Description for the trialsTable (using this instead of making a class definition and subclassing tables.IsDescription).
self.statesTable = None # Make it None because have to wait for completion of first trial to get names of all the states. Afterwhich, make description dictionary and then table.
self.statesTableDescDict = {} # Description for the states table (using this instead of making a class definition and subclassing tables.IsDescription).
self.eventsTable = None # Same thing here, and also because I do not want to create the eventsTable if no input events even occurred.
self.eventsTableDescDict = {} # Description for the events table instead of making a class definition and subclassing tables.IsDescription.
self.keepRunning = True
self.newData = False
self.trialNum = 1
self.infoDict = {}
self.adc = analogInModule
self.bpod = bpod
if olfaConfigFile:
with open(olfaConfigFile, 'r') as configFile:
self.olfaConfigDict = json.load(configFile)
self.nOlfas = len(self.olfaConfigDict['Olfactometers'])
# Make the description dict for the vials table.
self.vialsTableDescDict = {}
pos = 0
self.vialsTableDescDict["olfa"] = tables.UInt8Col(pos=pos)
pos += 1
self.vialsTableDescDict["vial"] = tables.UInt8Col(pos=pos)
pos += 1
self.vialsTableDescDict["odor"] = tables.StringCol(32, pos=pos)
pos += 1
self.vialsTableDescDict["conc"] = tables.Float32Col(pos=pos)
# Make the vials table using the description dict above.
self.vialsTable = self.h5file.create_table(where=self.h5file.root, name='vials', description=self.vialsTableDescDict, title='Vial Details')
self.vialsRow = self.vialsTable.row
# Write to the vials table.
olfaIndex = 0
for olfaDict in self.olfaConfigDict['Olfactometers']:
for vialNum, vialInfo in olfaDict['Vials'].items():
if not (vialInfo['odor'] == 'dummy'):
self.vialsRow['olfa'] = olfaIndex
self.vialsRow['vial'] = int(vialNum)
self.vialsRow['odor'] = vialInfo['odor']
self.vialsRow['conc'] = vialInfo['conc']
self.vialsRow.append()
olfaIndex += 1
self.vialsTable.flush()
if self.adc is not None:
self.bpod = None # Avoid using the bpod in case it was also given as a parameter.
self.analogSettings = analogInSettings
self.rangeLimits = {'-10V:10V': [-10.0, 10.0], '-5V:5V': [-5.0, 5.0], '-2.5V:2.5V': [-2.5, 2.5],'0V:10V': [0.0, 10.0]}
self.maxVoltages = [self.rangeLimits[x][1] for x in self.analogSettings['inputRanges']] # Make a list of integers for the max voltage of each channel's input range. analogSettings['inputRanges'] returns a list of strings that are used as keys in self.rangeLimits.
self.minVoltages = [self.rangeLimits[x][0] for x in self.analogSettings['inputRanges']] # Make a list of integers for the min voltage of each channel's input range.
self.samplingPeriod = 1 / (self.analogSettings['samplingRate'])
self.analogDataBufferSize = 5 # Size of buffer to send to the streamingWorker for plotting the analog data. Larger the buffer, the thicker the line gets and steps become more visible as buffers get sent before the previous buffer is completely plotted.
self.analogDataBuffer = np.zeros(shape=self.analogDataBufferSize, dtype='float32')
self.saveVoltages = False
self.counter = 0
self.previousTimer = 0
self.t_start = 0
self.bpodTime = 0
self.voltsGroup = self.h5file.create_group(where='/', name='voltages', title='Voltages Per Trial')
# Make the description dict for the setting table.
self.voltsSettingsDescDict = {}
pos = 0
self.voltsSettingsDescDict['samplingRate'] = tables.UInt16Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['inputRange'] = tables.StringCol(10, pos=pos)
pos += 1
self.voltsSettingsDescDict['thresholdVoltage'] = tables.Float32Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['resetVoltage'] = tables.Float32Col(pos=pos)
# Make the settings table using the description dict above.
self.voltsSettingsTable = self.h5file.create_table(where='/voltages', name='settings', description=self.voltsSettingsDescDict, title='Analog Input Settings')
self.voltsSettingsRow = self.voltsSettingsTable.row
# Write to the settings table.
for i in range(self.analogSettings['nActiveChannels']): # Each active channel will have a row.
self.voltsSettingsRow['samplingRate'] = self.analogSettings['samplingRate'] # sampling rate is global for all channels.
self.voltsSettingsRow['inputRange'] = self.analogSettings['inputRanges'][i]
self.voltsSettingsRow['thresholdVoltage'] = self.analogSettings['thresholdVoltages'][i]
self.voltsSettingsRow['resetVoltage'] = self.analogSettings['resetVoltages'][i]
self.voltsSettingsRow.append()
self.voltsSettingsTable.flush()
# Make the description dict for the volts table.
self.voltsTableDescDict = {} # Description dictionary for the volts table instead of making a class definition and subclassing tables.IsDescription.
pos = 0
# self.voltsTableDescDict['prefix'] = tables.UInt8Col(pos=pos)
# pos += 1
# self.voltsTableDescDict['syncByte'] = tables.UInt8Col(pos=pos)
# pos += 1
self.voltsTableDescDict['bpodTime'] = tables.Float32Col(pos=pos)
pos += 1
for i in range(self.analogSettings['nActiveChannels']):
self.voltsTableDescDict[f'voltageCh{i}'] = tables.Float32Col(pos=pos) # make a column for each channel used.
pos += 1
# Make the volts table using the description dict above.
self.voltsTable = self.h5file.create_table(where='/voltages', name=f'trial_{self.trialNum:03d}', description=self.voltsTableDescDict, title=f'Trial {self.trialNum} Voltage Data')
self.voltsRow = self.voltsTable.row
elif self.bpod is not None:
self.channelIndices = self.bpod.hardware.analog_input_channels # list of channel indices of channels configured for analog input.
if (self.channelIndices is not None) and (len(self.channelIndices) > 0):
# This means it is a list of at least one flex channel number that is configured for analog input.
self.nChannels = len(self.channelIndices)
self.thresholds_1 = self.bpod.hardware.analog_input_thresholds_1
self.thresholds_2 = self.bpod.hardware.analog_input_thresholds_2
self.polarities_1 = self.bpod.hardware.analog_input_threshold_polarity_1
self.polarities_2 = self.bpod.hardware.analog_input_threshold_polarity_2
self.maxVoltages = [5] * self.nChannels # Make a list of integers for the max voltage of each channel's input range.
self.minVoltages = [0] * self.nChannels # Make a list of integers for the min voltage of each channel's input range.
self.samplingPeriod = self.bpod.hardware.analog_input_sampling_interval * 0.0001 # Multiply by the state machines timer period of 100 microseconds.
self.bpodTime = 0
self.voltsGroup = self.h5file.create_group(where='/', name='voltages', title='Voltages Per Trial')
# Make the description dict for the setting table.
self.voltsSettingsDescDict = {}
pos = 0
self.voltsSettingsDescDict['samplingRate'] = tables.UInt16Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['inputRange'] = tables.StringCol(10, pos=pos)
pos += 1
self.voltsSettingsDescDict['thresholdVoltage_1'] = tables.Float32Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['thresholdVoltage_2'] = tables.Float32Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['thresholdPolarity_1'] = tables.UInt8Col(pos=pos)
pos += 1
self.voltsSettingsDescDict['thresholdPolarity_2'] = tables.UInt8Col(pos=pos)
# Make the settings table using the description dict above.
self.voltsSettingsTable = self.h5file.create_table(where='/voltages', name='settings', description=self.voltsSettingsDescDict, title='Analog Input Settings')
self.voltsSettingsRow = self.voltsSettingsTable.row
# Write to the settings table.
for i in range(self.nChannels): # Each analog input channel will have a row.
self.voltsSettingsRow['samplingRate'] = 1 / self.samplingPeriod # sampling rate is global for all channels.
self.voltsSettingsRow['inputRange'] = "0V:5V" # global for all flex channels.
self.voltsSettingsRow['thresholdVoltage_1'] = (self.thresholds_1[self.channelIndices[i]] / 4095) * self.maxVoltages[i] # Convert to voltage
self.voltsSettingsRow['thresholdVoltage_2'] = (self.thresholds_2[self.channelIndices[i]] / 4095) * self.minVoltages[i] # Convert to voltage
self.voltsSettingsRow['thresholdPolarity_1'] = self.polarities_1[self.channelIndices[i]]
self.voltsSettingsRow['thresholdPolarity_2'] = self.polarities_2[self.channelIndices[i]]
self.voltsSettingsRow.append()
self.voltsSettingsTable.flush()
# Make the description dict for the volts table.
self.voltsTableDescDict = {} # Description dictionary for the volts table instead of making a class definition and subclassing tables.IsDescription.
pos = 0
self.voltsTableDescDict['trialNum'] = tables.UInt8Col(pos=pos)
pos += 1
self.voltsTableDescDict['bpodTime'] = tables.Float32Col(pos=pos)
pos += 1
for i in range(self.nChannels):
self.voltsTableDescDict[f'voltageCh{self.channelIndices[i]}'] = tables.Float32Col(pos=pos) # make a column for each channel used.
pos += 1
# Make the volts table using the description dict above.
self.voltsTable = self.h5file.create_table(where='/voltages', name=f'trial_{self.trialNum:03d}', description=self.voltsTableDescDict, title=f'Trial {self.trialNum} Voltage Data')
self.voltsRow = self.voltsTable.row
else:
self.bpod = None # Make it None to indicate to other functions below that there is no analog input.
def receiveInfoDict(self, infoDict):
self.newData = True
self.infoDict = infoDict
def saveStatesTimestamps(self):
# Define the description for the states table using a dictionary of the states timestamps. Then create the states table (only once).
if self.statesTable is None:
pos = 0
for k, v in self.infoDict['States timestamps'].items():
keyString = k + '_start'
self.statesTableDescDict[keyString] = tables.Float32Col(pos=pos)
pos += 1
keyString = k + '_end'
self.statesTableDescDict[keyString] = tables.Float32Col(pos=pos)
pos += 1
self.statesTable = self.h5file.create_table(where='/', name='state_times', description=self.statesTableDescDict, title='States Timestamps')
self.statesRow = self.statesTable.row
# Fill in column values for the states timestamps for the current row since statesTable has now been created.
for k, v in self.infoDict['States timestamps'].items():
keyString = k + '_start'
self.statesRow[keyString] = v[0][0]
keyString = k + '_end'
self.statesRow[keyString] = v[0][1]
# only one row per end of trial data so append what was written and flush to disk.
self.statesRow.append()
self.statesTable.flush()
def saveEventsTimestamps(self):
# Create the eventsTableDescDict (and then the eventsTable) every trial because it is not always the same every trial. Some events happen one trial but not in another.
pos = 0
eventCounters = []
for event, eventList in self.infoDict['Events timestamps'].items():
self.eventsTableDescDict[event] = tables.Float32Col(dflt=np.nan, pos=pos)
eventCounters.append(len(eventList)) # Store the lengths of each eventList because will need to find the longest list below by finding the max.
pos += 1
self.eventsTable = self.h5file.create_table(where='/event_times', name=f'trial_{self.trialNum:03d}', description=self.eventsTableDescDict, title=f'Trial {self.trialNum} Event Timestamps')
self.eventsRow = self.eventsTable.row
# Use the length of the longest list to make an index to take one element from each eventList and add it to the row. If an IndexError happens because an eventList is shorter
# than the longest eventList, then add np.nan to that row.
for i in range(max(eventCounters)):
for event, eventsList in self.infoDict['Events timestamps'].items():
try:
self.eventsRow[event] = eventsList[i]
except IndexError:
self.eventsRow[event] = np.nan
self.eventsRow.append()
self.eventsTable.flush()
def saveTrialData(self):
# If its None, that means the first trial's data just came, so make the description dict and then create the trialsTable using that description dict. This only happens once.
if self.trialsTable is None:
pos = 0
# self.trialsTableDescDict['trialNum'] = tables.UInt16Col(pos=pos)
# pos += 1
# self.trialsTableDescDict['correctResponse'] = tables.StringCol(8, pos=pos)
# pos += 1
self.trialsTableDescDict['responseResult'] = tables.StringCol(7, pos=pos)
pos += 1
# self.trialsTableDescDict['itiDuration'] = tables.UInt8Col(pos=pos)
# pos += 1
# self.trialsTableDescDict['bpodStartTime'] = tables.Float32Col(pos=pos)
# pos += 1
self.trialsTableDescDict['trialStartTime'] = tables.Float32Col(pos=pos)
pos += 1
self.trialsTableDescDict['trialEndTime'] = tables.Float32Col(pos=pos)
pos += 1
# self.trialsTableDescDict['totalTrialTime'] = tables.Float32Col(pos=pos)
# pos += 1
# Loop through the olfactometers used to save each one's parameters for each stimulus in their own column.
stimIndex = 0
for stimDict in self.infoDict['stimList']:
for olfaName in stimDict['olfas'].keys():
self.trialsTableDescDict[f'odor{stimIndex}_{olfaName}_vial'] = tables.UInt8Col(pos=pos)
pos += 1
# self.trialsTableDescDict[f'odor{stimIndex}_{olfaName}_name'] = tables.StringCol(32, pos=pos) # Size of strings added to the column does not need to exactly match the size given during initialization.
# pos += 1
# self.trialsTableDescDict[f'odor{stimIndex}_{olfaName}_conc'] = tables.Float32Col(pos=pos)
# pos += 1
self.trialsTableDescDict[f'odor{stimIndex}_{olfaName}_flow'] = tables.UInt8Col(pos=pos) # This is assuming that only flowrates between 1 to 100 will be used.
pos += 1
stimIndex += 1
self.trialsTable = self.h5file.create_table(where='/', name='trial_data', description=self.trialsTableDescDict, title='Trial Data')
self.trialRow = self.trialsTable.row
self.h5file.root._v_attrs.bpodStartTime = self.infoDict['Bpod start timestamp'] # Save the bpod start time as an attribute instead of in the table because it remains the same for every trial. So save it when the first trial's data comes.
# Fill in the column values for the row now that the trialsTable has been created.
# self.trialRow['trialNum'] = self.infoDict['currentTrialNum']
# self.trialRow['correctResponse'] = self.infoDict['correctResponse']
self.trialRow['responseResult'] = self.infoDict['responseResult']
# self.trialRow['itiDuration'] = self.infoDict['currentITI']
# self.trialRow['bpodStartTime'] = self.infoDict['Bpod start timestamp']
self.trialRow['trialStartTime'] = self.infoDict['Trial start timestamp']
self.trialRow['trialEndTime'] = self.infoDict['Trial end timestamp']
# self.trialRow['totalTrialTime'] = self.trialRow['trialEndTime'] - self.trialRow['trialStartTime']
stimIndex = 0
for stimDict in self.infoDict['stimList']: # Loop again to save the data to the columns.
for olfaName, olfaValues in stimDict['olfas'].items():
self.trialRow[f'odor{stimIndex}_{olfaName}_vial'] = int(olfaValues['vialNum'])
# self.trialRow[f'odor{stimIndex}_{olfaName}_name'] = olfaValues['odor']
# self.trialRow[f'odor{stimIndex}_{olfaName}_conc'] = olfaValues['vialconc']
self.trialRow[f'odor{stimIndex}_{olfaName}_flow'] = olfaValues['mfc_1_flow']
stimIndex += 1
self.trialRow.append()
self.trialsTable.flush()
def saveAnalogDataFromModule(self):
analogData = self.adc.getSampleFromUSB()
# Uses the computer's clock to make the timestamps for the samples and period in between each sample.
# currentTimer = time.perf_counter()
# period = currentTimer - self.previousTimer
# elapsed = currentTimer - self.t_start
# self.previousTimer = currentTimer
if analogData is not None:
prefix = analogData[0][0]
syncByte = analogData[0][1]
samples = analogData[1]
voltages = [0] * len(samples)
if (prefix == 35): # 35 is the decimal value for the ascii char '#' which is the prefix for when the syncByte is received. Otherwise the prefix is 'R' and the syncByte will be zero.
if (syncByte == 1):
self.saveVoltages = True # Start saving to h5 file when syncByte value of 1 is received.
elif (syncByte == 2):
self.saveVoltages = False # Stop saving to h5 file when syncByte value of 2 is received.
# convert decimal bit value to voltage. The length of samples indicates how many channels are streaming to USB.
for i in range(len(samples)):
if (self.minVoltages[i] == 0): # This is when input voltage range is 0V to 10V.
voltages[i] = ((samples[i] * self.maxVoltages[i]) / 8192)
else:
if samples[i] >= 4096:
samples[i] -= 4096
voltages[i] = (samples[i] * self.maxVoltages[i]) / 4096
elif samples[i] < 4096:
voltages[i] = ((samples[i] * self.maxVoltages[i]) / 4096) - self.maxVoltages[i]
if self.saveVoltages:
# self.voltsRow['computerTime'] = elapsed
# self.voltsRow['computerPeriod'] = period
# self.voltsRow['prefix'] = prefix
# self.voltsRow['syncByte'] = syncByte
self.voltsRow['bpodTime'] = self.bpodTime
self.bpodTime += self.samplingPeriod
for i in range(len(voltages)):
self.voltsRow[f'voltageCh{i}'] = voltages[i]
self.voltsRow.append()
# fill buffer and send it when full using the signal.
if self.counter < self.analogDataBufferSize:
self.analogDataBuffer[self.counter] = voltages[0] # Need to use element, not list
self.counter += 1
else:
# self.voltsTable.flush() # Write to the file whenever the buffer gets full instead of waiting for the end of trial dict to come from the protocolWorker thread.
self.analogDataSignal.emit(self.analogDataBuffer)
self.counter = 0
self.analogDataBuffer[self.counter] = voltages[0]
self.counter += 1
def saveAnalogDataFromBpod(self):
analogData = self.bpod.read_analog_input()
if len(analogData) > 0:
# convert decimal bit value to voltage. The length of samples indicates how many channels are streaming to USB.
nSamples = int(len(analogData) / (self.nChannels + 1)) # Add one to account for the trial number that is included with every sample.
voltages = [[]] * self.nChannels # make a sublist for each channel
ind = 0
for s in range(nSamples):
trialNum = analogData[ind]
ind += 1
if trialNum == self.trialNum: # Note that self.trialNum starts at 1 and is incremented each time a new info dict is received.
self.voltsRow['trialNum'] = trialNum
self.voltsRow['bpodTime'] = self.bpodTime
self.bpodTime += self.samplingPeriod
for i in range(self.nChannels):
samp = (analogData[ind] / 4095) * self.maxVoltages[i]
ind += 1
voltages[i].append(samp)
self.voltsRow[f'voltageCh{self.channelIndices[i]}'] = voltages[i][-1] # The most recently appended value is the current ind.
self.voltsRow.append()
else:
# Skip over this sample. Instead of breaking out of the for loop, run the for loop below to increment ind, just in case analogData
# is more than one sample, in which case the next sample could be the correct trialNum.
for i in range(self.nChannels):
ind += 1
self.analogDataSignal.emit(np.array(voltages[0], dtype='float32')) # StreamingWorker is currently only capable of plotting one channel.
def run(self):
# self.t_start = time.perf_counter()
while self.keepRunning:
if self.newData:
self.newData = False # reset
if not (self.infoDict == {}):
self.saveTrialData()
self.saveEventsTimestamps()
self.saveStatesTimestamps()
self.trialNum += 1 # increment trial number.
if (self.adc is not None) or (self.bpod is not None):
# The trial data above comes at the end of a trial, so write the voltages to the disk, and create a new table for the next trial's voltages
self.voltsTable.flush()
self.saveVoltages = False # reset for the next trial.
self.bpodTime = 0 # reset timestamps for samples back to zero.
# Re-iterate through the volts table row by row to update the bpodTime so it corresponds to the bpod's trial start time instead of starting it at zero.
# self.bpodTime = self.infoDict['Trial start timestamp']
# for voltsRow in self.voltsTable.iterrows():
# voltsRow['bpodTime'] = self.bpodTime
# self.bpodTime += self.samplingPeriod
# voltsRow.update()
# self.voltsTable.flush()
self.voltsTable = self.h5file.create_table(where='/voltages', name=f'trial_{self.trialNum:03d}', description=self.voltsTableDescDict, title=f'Trial {self.trialNum} Voltage Data')
self.voltsRow = self.voltsTable.row
else:
# Empty dict means to discard the trial and repeat it.
if (self.adc is not None) or (self.bpod is not None):
self.saveVoltages = False
self.bpodTime = 0
self.voltsTable.remove() # Delete the current table and create and new empty below.
self.voltsTable = self.h5file.create_table(where='/voltages', name=f'trial_{self.trialNum:03d}', description=self.voltsTableDescDict, title=f'Trial {self.trialNum} Voltage Data')
self.voltsRow = self.voltsTable.row
elif self.adc is not None:
self.saveAnalogDataFromModule()
elif self.bpod is not None:
self.saveAnalogDataFromBpod()
else:
QThread.sleep(1) # Need this or else entire application will become severely unresponsive.
if (self.adc is not None) or (self.bpod is not None):
self.voltsTable.flush()
self.h5file.close()
logging.info("h5 file closed")
self.finished.emit()
def stopRunning(self):
self.keepRunning = False
| 58.395161
| 278
| 0.610482
|
be6623a9ea9767d5eb20a9cf0c394244be98c206
| 7,380
|
py
|
Python
|
invar-example/target/generated-sources/example/python/TestXyzTestRefer.py
|
struqt/invar
|
4547a6de593839ae68e19bc108918fb0d2530d5e
|
[
"MIT"
] | 7
|
2016-08-26T05:10:20.000Z
|
2017-08-09T14:28:56.000Z
|
invar-example/target/generated-sources/example/python/TestXyzTestRefer.py
|
struqt/invar
|
4547a6de593839ae68e19bc108918fb0d2530d5e
|
[
"MIT"
] | null | null | null |
invar-example/target/generated-sources/example/python/TestXyzTestRefer.py
|
struqt/invar
|
4547a6de593839ae68e19bc108918fb0d2530d5e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===------------------------------* Python *------------------------------===
# THIS FILE IS GENERATED BY INVAR. DO NOT EDIT !!!
# ===------------------------------------------------------------------------===
from TestAbcCustom import Custom
from TestAbcGender import Gender
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from InvarCodec import DataWriter
from InvarCodec import DataReader
class TestRefer(object):
"""引用类型测试"""
CRC32_ = 0xBBD63AFD
SIZE_ = 60
__slots__ = (
'_numberi08',
'_numberi16',
'_numberi32',
'_numberi64',
'_numberu08',
'_numberu16',
'_numberu32',
'_numberu64',
'_numberSingle',
'_numberDouble',
'_boolValue',
'_stringValue',
'_enumValue',
'_other',
'_self',
'_listI08',
'_dictI08')
#__slots__
def __init__(self):
self._numberi08 = -1
self._numberi16 = -1
self._numberi32 = -1
self._numberi64 = -1
self._numberu08 = 0
self._numberu16 = 0
self._numberu32 = 0
self._numberu64 = 0
self._numberSingle = 0.0
self._numberDouble = 0.00
self._boolValue = False
self._stringValue = ''
self._enumValue = Gender.NONE
self._other = Custom()
self._self = None
self._listI08 = []
self._dictI08 = {}
#def __init__
def __str__(self):
s = StringIO()
s.write(u'{')
s.write(u' ')
s.write(u'TestRefer')
s.write(u',')
s.write(u' ')
s.write(u'numberi08')
s.write(u':')
s.write(unicode(self._numberi08))
s.write(u',')
s.write(u' ')
s.write(u'numberi16')
s.write(u':')
s.write(unicode(self._numberi16))
s.write(u',')
s.write(u' ')
s.write(u'numberi32')
s.write(u':')
s.write(unicode(self._numberi32))
s.write(u',')
s.write(u' ')
s.write(u'numberi64')
s.write(u':')
s.write(unicode(self._numberi64))
s.write(u',')
s.write(u' ')
s.write(u'numberu08')
s.write(u':')
s.write(unicode(self._numberu08))
s.write(u',')
s.write(u' ')
s.write(u'numberu16')
s.write(u':')
s.write(unicode(self._numberu16))
s.write(u',')
s.write(u' ')
s.write(u'numberu32')
s.write(u':')
s.write(unicode(self._numberu32))
s.write(u',')
s.write(u' ')
s.write(u'numberu64')
s.write(u':')
s.write(unicode(self._numberu64))
s.write(u',')
s.write(u' ')
s.write(u'numberSingle')
s.write(u':')
s.write(unicode(self._numberSingle))
s.write(u',')
s.write(u' ')
s.write(u'numberDouble')
s.write(u':')
s.write(unicode(self._numberDouble))
s.write(u',')
s.write(u' ')
s.write(u'boolValue')
s.write(u':')
s.write(unicode(self._boolValue))
s.write(u',')
s.write(u' ')
s.write(u'stringValue')
s.write(u':')
s.write(u'"')
s.write(self._stringValue)
s.write(u'"')
s.write(u',')
s.write(u' ')
s.write(u'enumValue')
s.write(u':')
s.write(unicode(self._enumValue))
s.write(u',')
s.write(u' ')
s.write(u'other')
s.write(u':')
s.write(u'<')
s.write(u'Custom')
s.write(u'>')
s.write(u',')
s.write(u' ')
s.write(u'self')
s.write(u':')
if self._self is None:
s.write(u'null')
else:
s.write(u'<')
s.write(u'TestRefer')
s.write(u'>')
s.write(u',')
s.write(u' ')
s.write(u'listI08')
s.write(u':')
s.write(u'(')
s.write(str(len(self._listI08)))
s.write(u')')
s.write(u',')
s.write(u' ')
s.write(u'dictI08')
s.write(u':')
s.write(u'[')
s.write(str(len(self._dictI08)))
s.write(u']')
s.write(u' ')
s.write(u'}')
result = s.getvalue()
s.close()
return result
#def __str__
def __len__(self):
size = TestRefer.SIZE_
size += len(self._stringValue)
size += len(self._other)
if self._self is not None:
size += len(self._self)
if len(self._listI08) > 0:
size += len(self._listI08) * 1
if len(self._dictI08) > 0:
size += len(self._dictI08) * 2
return size
#def __len__
def read(r):
self._numberi08 = r.readInt8()
self._numberi16 = r.readInt16()
self._numberi32 = r.readInt32()
self._numberi64 = r.readInt64()
self._numberu08 = r.readUInt8()
self._numberu16 = r.readUInt16()
self._numberu32 = r.readUInt32()
self._numberu64 = r.readUInt64()
self._numberSingle = r.readSingle()
self._numberDouble = r.readDouble()
self._boolValue = r.readBoolean()
self._stringValue = r.readString()
self._enumValue = r.readInt32()
self._other.read(r)
selfExists = r.readInt8()
if 0x01 == selfExists:
if self._self == None:
self._self = TestRefer()
self._self.read(r)
elif 0x00 == selfExists:
self._self = None
else:
raise InvarError(497, 'Protoc read error: The value of \'selfExists\' is invalid.')
lenListI08 = r.readUInt32()
num = 0
while num < lenListI08:
num += 1
n1 = r.readInt8()
self._listI08.append(n1)
lenDictI08 = r.readUInt32()
num = 0
while num < lenDictI08:
num += 1
k1 = r.readInt8()
v1 = r.readInt8()
self._dictI08[k1] = v1
#def read
def write(w):
w.writeInt8(self._numberi08)
w.writeInt16(self._numberi16)
w.writeInt32(self._numberi32)
w.writeInt64(self._numberi64)
w.writeUInt8(self._numberu08)
w.writeUInt16(self._numberu16)
w.writeUInt32(self._numberu32)
w.writeUInt64(self._numberu64)
w.writeFloat(self._numberSingle)
w.writeDouble(self._numberDouble)
w.writeBool(self._boolValue)
w.writeString(self._stringValue)
w.writeInt32(self._enumValue)
self._other.write(w)
if self._self != None:
w.writeUInt8(0x01)
self._self.write(w)
else:
w.writeUInt8(0x00)
w.writeUInt32(len(self._listI08))
for n1 in self._listI08:
w.writeInt8(n1)
w.writeUInt32(len(self._dictI08))
for (k1,v1) in self._dictI08.items():
w.writeInt8(k1)
w.writeInt8(v1)
#def write
#class TestRefer
if '__main__' == __name__:
print('dir(TestRefer()) =>\n' + '\n'.join(dir(TestRefer())))
print('TestRefer.__doc__ => ' + TestRefer.__doc__)
print('TestRefer.__len__ => ' + str(len(TestRefer())))
print('TestRefer.__str__ => ' + str(TestRefer()))
| 28.384615
| 95
| 0.499187
|
ca65e093659c860b114ea8a3534c62099ab8bde5
| 626
|
py
|
Python
|
pyramid_sendgrid_webhooks/parser.py
|
GoodRx/pyramid-sendgrid-webhooks
|
5746b59c7b5a90c6a87c3e0114d760011fc13436
|
[
"MIT"
] | 5
|
2016-07-11T19:34:36.000Z
|
2021-11-06T17:00:39.000Z
|
pyramid_sendgrid_webhooks/parser.py
|
GoodRx/pyramid-sendgrid-webhooks
|
5746b59c7b5a90c6a87c3e0114d760011fc13436
|
[
"MIT"
] | 1
|
2019-07-09T15:59:04.000Z
|
2019-07-09T15:59:04.000Z
|
pyramid_sendgrid_webhooks/parser.py
|
GoodRx/pyramid-sendgrid-webhooks
|
5746b59c7b5a90c6a87c3e0114d760011fc13436
|
[
"MIT"
] | 4
|
2016-01-09T10:17:51.000Z
|
2021-11-06T17:00:32.000Z
|
# -*- coding: utf-8 -*-
"""
Parses webhook events from request
"""
from . import events
from . import errors
def parse_event_data(request, event_data):
""" Returns a single BaseWebhookEvent instance """
event_type = event_data['event']
try:
event_cls = events.event_mapping[event_type]
except KeyError:
raise errors.UnknownEventError(event_type)
return event_cls(request, event_data)
def webhooks_from_request(request):
"""
Generates a sequence of BaseWebhookEvent instances
"""
for event_data in request.json_body:
yield parse_event_data(request, event_data)
| 23.185185
| 54
| 0.704473
|
ccda05aec1fd2aaf56d604469b2a3a545404270a
| 2,907
|
py
|
Python
|
federatedml/logistic_regression/test/logistic_regression_test.py
|
chenlongzhen/FATE-0.1
|
5a1f316676e77dca8311bb74a26a7623c4a97b86
|
[
"Apache-2.0"
] | 1
|
2019-02-25T13:43:24.000Z
|
2019-02-25T13:43:24.000Z
|
federatedml/logistic_regression/test/logistic_regression_test.py
|
crownpku/FATE
|
38fe6cea0dca3841b59c3d04cb04f556803e2e29
|
[
"Apache-2.0"
] | null | null | null |
federatedml/logistic_regression/test/logistic_regression_test.py
|
crownpku/FATE
|
38fe6cea0dca3841b59c3d04cb04f556803e2e29
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from federatedml.logistic_regression.base_logistic_regression import BaseLogisticRegression
from federatedml.param import LogisticParam
from arch.api import eggroll
from federatedml.feature import Instance
import numpy as np
class TestHomoLRGuest(unittest.TestCase):
def setUp(self):
# use default setting
eggroll.init("123")
logistic_param = LogisticParam()
self.model = BaseLogisticRegression(logistic_param)
self.data_instance = self.__prepare_data()
def __prepare_data(self, data_num=1000, feature_num=100):
final_result = []
for i in range(data_num):
tmp = i * np.ones(feature_num)
inst = Instance(inst_id=i, features=tmp, label=0)
tmp = (i, inst)
final_result.append(tmp)
table = eggroll.parallelize(final_result,
include_key=True,
partition=3)
return table
def test_save_load_model(self):
n_iter_ = 10
coef_ = [1., 0.2, 3.]
intercept_ = 0.3
classes_ = 2
model_table = "test_lr_table"
model_namespace = "test_model_namesapce"
self.model.save_model(model_table=model_table, model_namespace=model_namespace)
self.model.n_iter_ = n_iter_
self.model.coef_ = coef_
self.model.intercept_ = intercept_
self.model.classes_ = classes_
# self.model.load_model(model_table=model_table, model_namespace=model_namespace)
# Load model should change the value and make them not equal.
#self.assertNotEqual(self.model.n_iter_, n_iter_)
#self.assertNotEqual(self.model.coef_, coef_)
#self.assertNotEqual(self.model.intercept_, intercept_)
#self.assertNotEqual(self.model.classes_, classes_)
self.model.save_model(model_table=model_table, model_namespace=model_namespace)
self.model.load_model(model_table=model_table, model_namespace=model_namespace)
self.assertEqual(self.model.n_iter_, n_iter_)
self.assertEqual(self.model.coef_, coef_)
self.assertEqual(self.model.intercept_, intercept_)
self.assertEqual(self.model.classes_, classes_)
if __name__ == '__main__':
unittest.main()
| 37.269231
| 91
| 0.689026
|
18d214bee86fa42e41b3e88646b1ee8bba51d43f
| 914
|
py
|
Python
|
tadpole/template/app/lib/database/custom_types/email.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 2
|
2017-12-02T07:02:31.000Z
|
2020-10-13T02:20:18.000Z
|
tadpole/template/app/lib/database/custom_types/email.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | null | null | null |
tadpole/template/app/lib/database/custom_types/email.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 1
|
2018-04-23T04:59:38.000Z
|
2018-04-23T04:59:38.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
create at 2017/11/6 by allen
"""
import re
import sqlalchemy as sa
from sqlalchemy import types
from app.lib.database.utils import CaseInsensitiveComparator
class EmailType(types.TypeDecorator):
impl = sa.Unicode
comparator_factory = CaseInsensitiveComparator
re_exp = re.compile(r"^\w+([-+.']\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$")
def __init__(self, length=255, *args, **kwargs):
super(EmailType, self).__init__(length=length, *args, **kwargs)
def _validate(self, value):
if not self.re_exp.match(value):
raise ValueError('invalid email format %s' % value)
return value
def process_bind_param(self, value, dialect):
if value is not None:
return self._validate(value.lower())
return value
@property
def python_type(self):
return self.impl.type.python_type
| 25.388889
| 74
| 0.645514
|
8c6fd1f8fc3a608784ac80c6dd954239a00d94d4
| 242
|
py
|
Python
|
exercises/pyfiles/ex311_hypotenuse.py
|
TUDelft-AE-Python/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | 1
|
2021-10-05T04:49:54.000Z
|
2021-10-05T04:49:54.000Z
|
exercises/pyfiles/ex311_hypotenuse.py
|
TUDelft-AE1205/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | null | null | null |
exercises/pyfiles/ex311_hypotenuse.py
|
TUDelft-AE1205/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | null | null | null |
from math import sqrt
a = float(input("Enter the length of right side a: "))
b = float(input("Enter the length of right side b: "))
# Option 1
c = sqrt(a * a + b * b)
# Option 2
c = (a**2 + b**2)**0.5
print("The length hypotenuse c is", c)
| 22
| 54
| 0.619835
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.